mirror of
https://github.com/coolsnowwolf/lede.git
synced 2025-04-16 04:13:31 +00:00
quectel_MHI: add support Linux Kernel 6.1+
This commit is contained in:
parent
9d50fcccf0
commit
68253688d8
@ -8,7 +8,7 @@
|
||||
include $(TOPDIR)/rules.mk
|
||||
|
||||
PKG_NAME:=pcie_mhi
|
||||
PKG_VERSION:=3.2
|
||||
PKG_VERSION:=1.3.6
|
||||
PKG_RELEASE:=1
|
||||
|
||||
include $(INCLUDE_DIR)/kernel.mk
|
||||
|
@ -15,10 +15,36 @@
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/msi.h>
|
||||
#include <linux/interrupt.h>
|
||||
#define MAX_MHI 8
|
||||
#ifdef CONFIG_PCI_MSM
|
||||
#define QCOM_AP_AND_EFUSE_PCIE_SLEEP
|
||||
#ifdef QCOM_AP_AND_EFUSE_PCIE_SLEEP
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/msm_pcie.h>
|
||||
#endif
|
||||
#endif
|
||||
//#define QCOM_AP_SDM845_IOMMU_MAP
|
||||
#ifdef QCOM_AP_SDM845_IOMMU_MAP
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <asm/dma-iommu.h>
|
||||
#include <linux/iommu.h>
|
||||
#endif
|
||||
#include "../core/mhi.h"
|
||||
#include "../core/mhi_internal.h"
|
||||
#include "mhi_qti.h"
|
||||
|
||||
#ifdef QCOM_AP_AND_EFUSE_PCIE_SLEEP
|
||||
extern int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val);
|
||||
struct arch_info {
|
||||
struct mhi_dev *mhi_dev;
|
||||
struct msm_bus_scale_pdata *msm_bus_pdata;
|
||||
u32 bus_client;
|
||||
struct pci_saved_state *pcie_state;
|
||||
struct pci_saved_state *ref_pcie_state;
|
||||
struct dma_iommu_mapping *mapping;
|
||||
};
|
||||
#endif
|
||||
|
||||
#if 1
|
||||
#if (LINUX_VERSION_CODE < KERNEL_VERSION( 3,10,65 ))
|
||||
static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
|
||||
@ -177,11 +203,19 @@ static int mhi_init_pci_dev(struct mhi_controller *mhi_cntrl)
|
||||
|
||||
#if 1 //some SOC like rpi_4b need next codes
|
||||
ret = -EIO;
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0))
|
||||
if (!dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(64))) {
|
||||
ret = dma_set_coherent_mask(&pci_dev->dev, DMA_BIT_MASK(64));
|
||||
} else if (!dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32))) {
|
||||
ret = dma_set_coherent_mask(&pci_dev->dev, DMA_BIT_MASK(32));
|
||||
}
|
||||
#else
|
||||
if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(64))) {
|
||||
ret = pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(64));
|
||||
} else if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32))) {
|
||||
ret = pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(32));
|
||||
}
|
||||
#endif
|
||||
if (ret) {
|
||||
MHI_ERR("Error dma mask\n");
|
||||
}
|
||||
@ -584,6 +618,28 @@ static void mhi_runtime_mark_last_busy(struct mhi_controller *mhi_cntrl, void *p
|
||||
pm_runtime_mark_last_busy(dev);
|
||||
}
|
||||
|
||||
#ifdef QCOM_AP_AND_EFUSE_PCIE_SLEEP
|
||||
static void mhi_pci_event_cb(struct msm_pcie_notify *notify)
|
||||
{
|
||||
struct pci_dev *pci_dev = notify->user;
|
||||
struct device *dev = &pci_dev->dev;
|
||||
|
||||
dev_info(&pci_dev->dev, "Received PCIe event %d", notify->event);
|
||||
switch (notify->event) {
|
||||
case MSM_PCIE_EVENT_WAKEUP:
|
||||
if (dev && pm_runtime_status_suspended(dev)) {
|
||||
pm_request_resume(dev);
|
||||
pm_runtime_mark_last_busy(dev);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static struct msm_pcie_register_event mhi_pcie_events[MAX_MHI];
|
||||
#endif
|
||||
|
||||
static void mhi_status_cb(struct mhi_controller *mhi_cntrl,
|
||||
void *priv,
|
||||
enum MHI_CB reason)
|
||||
@ -854,6 +910,11 @@ int mhi_pci_probe(struct pci_dev *pci_dev,
|
||||
pr_info("%s pci_dev->name = %s, domain=%d, bus=%d, slot=%d, vendor=%04X, device=%04X\n",
|
||||
__func__, dev_name(&pci_dev->dev), domain, bus, slot, pci_dev->vendor, pci_dev->device);
|
||||
|
||||
#if !defined(CONFIG_PCI_MSI)
|
||||
/* MT7621 RTL8198D EcoNet-EN7565 */
|
||||
#error "pcie msi is not support by this soc! and i donot support INTx (SW1SDX55-2688)"
|
||||
#endif
|
||||
|
||||
if (!mhi_pci_is_alive(pci_dev)) {
|
||||
/*
|
||||
root@OpenWrt:~# hexdump /sys/bus/pci/devices/0000:01:00.0/config
|
||||
@ -877,9 +938,11 @@ int mhi_pci_probe(struct pci_dev *pci_dev,
|
||||
mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
|
||||
mhi_dev->powered_on = true;
|
||||
|
||||
mhi_arch_iommu_init(mhi_cntrl);
|
||||
|
||||
ret = mhi_arch_pcie_init(mhi_cntrl);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto error_init_pci_arch;
|
||||
|
||||
mhi_cntrl->dev = &pci_dev->dev;
|
||||
ret = mhi_init_pci_dev(mhi_cntrl);
|
||||
@ -897,6 +960,28 @@ int mhi_pci_probe(struct pci_dev *pci_dev,
|
||||
|
||||
mhi_pci_show_link(mhi_cntrl, pci_dev);
|
||||
|
||||
#ifdef QCOM_AP_AND_EFUSE_PCIE_SLEEP
|
||||
{
|
||||
struct msm_pcie_register_event *pcie_event = &mhi_pcie_events[mhi_cntrl->cntrl_idx];
|
||||
|
||||
pcie_event->events = MSM_PCIE_EVENT_WAKEUP;
|
||||
#if (LINUX_VERSION_CODE > KERNEL_VERSION( 4,14,117 ))
|
||||
pcie_event->pcie_event.user = pci_dev;
|
||||
pcie_event->pcie_event.mode = MSM_PCIE_TRIGGER_CALLBACK;
|
||||
pcie_event->pcie_event.callback = mhi_pci_event_cb;
|
||||
#else
|
||||
pcie_event->user = pci_dev;
|
||||
pcie_event->mode = MSM_PCIE_TRIGGER_CALLBACK;
|
||||
pcie_event->callback = mhi_pci_event_cb;
|
||||
#endif
|
||||
|
||||
ret = msm_pcie_register_event(pcie_event);
|
||||
if (ret) {
|
||||
MHI_LOG("Failed to register for PCIe event");
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
MHI_LOG("Return successful\n");
|
||||
|
||||
return 0;
|
||||
@ -907,6 +992,8 @@ error_power_up:
|
||||
|
||||
error_init_pci:
|
||||
mhi_arch_pcie_deinit(mhi_cntrl);
|
||||
error_init_pci_arch:
|
||||
mhi_arch_iommu_deinit(mhi_cntrl);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -925,6 +1012,14 @@ void mhi_pci_device_removed(struct pci_dev *pci_dev)
|
||||
|
||||
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
|
||||
|
||||
#ifdef QCOM_AP_AND_EFUSE_PCIE_SLEEP
|
||||
{
|
||||
struct msm_pcie_register_event *pcie_event = &mhi_pcie_events[mhi_cntrl->cntrl_idx];
|
||||
|
||||
msm_pcie_deregister_event(pcie_event);
|
||||
}
|
||||
#endif
|
||||
|
||||
pm_stay_awake(&mhi_cntrl->mhi_dev->dev);
|
||||
|
||||
/* if link is in drv suspend, wake it up */
|
||||
@ -950,6 +1045,7 @@ void mhi_pci_device_removed(struct pci_dev *pci_dev)
|
||||
mhi_arch_link_off(mhi_cntrl, false);
|
||||
|
||||
mhi_arch_pcie_deinit(mhi_cntrl);
|
||||
mhi_arch_iommu_deinit(mhi_cntrl);
|
||||
|
||||
pm_relax(&mhi_cntrl->mhi_dev->dev);
|
||||
|
||||
@ -1004,26 +1100,108 @@ void mhi_controller_qcom_exit(void)
|
||||
pr_info("%s exit\n", __func__);
|
||||
}
|
||||
|
||||
#ifdef QCOM_AP_SDM845_IOMMU_MAP
|
||||
struct dma_iommu_mapping *mhi_smmu_mapping[MAX_MHI];
|
||||
|
||||
#define SMMU_BASE 0x10000000
|
||||
#define SMMU_SIZE 0x40000000
|
||||
static struct dma_iommu_mapping * sdm845_smmu_init(struct pci_dev *pdev) {
|
||||
int ret = 0;
|
||||
int atomic_ctx = 1;
|
||||
int s1_bypass = 1;
|
||||
struct dma_iommu_mapping *mapping;
|
||||
|
||||
mapping = arm_iommu_create_mapping(&platform_bus_type, SMMU_BASE, SMMU_SIZE);
|
||||
if (IS_ERR(mapping)) {
|
||||
ret = PTR_ERR(mapping);
|
||||
dev_err(&pdev->dev, "Create mapping failed, err = %d\n", ret);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ret = iommu_domain_set_attr(mapping->domain, DOMAIN_ATTR_ATOMIC, &atomic_ctx);
|
||||
if (ret < 0) {
|
||||
dev_err(&pdev->dev, "Set atomic_ctx attribute failed, err = %d\n", ret);
|
||||
goto set_attr_fail;
|
||||
}
|
||||
|
||||
ret = iommu_domain_set_attr(mapping->domain, DOMAIN_ATTR_S1_BYPASS, &s1_bypass);
|
||||
if (ret < 0) {
|
||||
dev_err(&pdev->dev, "Set s1_bypass attribute failed, err = %d\n", ret);
|
||||
arm_iommu_release_mapping(mapping);
|
||||
goto set_attr_fail;
|
||||
}
|
||||
|
||||
ret = arm_iommu_attach_device(&pdev->dev, mapping);
|
||||
if (ret < 0) {
|
||||
dev_err(&pdev->dev, "Attach device failed, err = %d\n", ret);
|
||||
goto attach_fail;
|
||||
}
|
||||
|
||||
return mapping;
|
||||
|
||||
attach_fail:
|
||||
set_attr_fail:
|
||||
arm_iommu_release_mapping(mapping);
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
int mhi_arch_iommu_init(struct mhi_controller *mhi_cntrl)
|
||||
{
|
||||
#ifdef QCOM_AP_SDM845_IOMMU_MAP
|
||||
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
|
||||
|
||||
mhi_cntrl->dev = &mhi_dev->pci_dev->dev;
|
||||
mhi_smmu_mapping[mhi_cntrl->cntrl_idx] = sdm845_smmu_init(mhi_dev->pci_dev);
|
||||
#endif
|
||||
|
||||
return dma_set_mask_and_coherent(mhi_cntrl->dev, DMA_BIT_MASK(64));
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mhi_arch_iommu_deinit(struct mhi_controller *mhi_cntrl)
|
||||
{
|
||||
#ifdef QCOM_AP_SDM845_IOMMU_MAP
|
||||
if (mhi_smmu_mapping[mhi_cntrl->cntrl_idx]) {
|
||||
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
|
||||
|
||||
arm_iommu_detach_device(&mhi_dev->pci_dev->dev);
|
||||
arm_iommu_release_mapping(mhi_smmu_mapping[mhi_cntrl->cntrl_idx]);
|
||||
mhi_smmu_mapping[mhi_cntrl->cntrl_idx] = NULL;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
static int mhi_arch_set_bus_request(struct mhi_controller *mhi_cntrl, int index)
|
||||
{
|
||||
MHI_LOG("Setting bus request to index %d\n", index);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mhi_arch_pcie_init(struct mhi_controller *mhi_cntrl)
|
||||
{
|
||||
return 0;
|
||||
#ifdef QCOM_AP_AND_EFUSE_PCIE_SLEEP
|
||||
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
|
||||
struct arch_info *arch_info = mhi_dev->arch_info;
|
||||
|
||||
if (!arch_info) {
|
||||
arch_info = devm_kzalloc(&mhi_dev->pci_dev->dev,
|
||||
sizeof(*arch_info), GFP_KERNEL);
|
||||
if (!arch_info)
|
||||
return -ENOMEM;
|
||||
|
||||
mhi_dev->arch_info = arch_info;
|
||||
|
||||
/* save reference state for pcie config space */
|
||||
arch_info->ref_pcie_state = pci_store_saved_state(
|
||||
mhi_dev->pci_dev);
|
||||
}
|
||||
#endif
|
||||
|
||||
return mhi_arch_set_bus_request(mhi_cntrl, 1);
|
||||
}
|
||||
|
||||
void mhi_arch_pcie_deinit(struct mhi_controller *mhi_cntrl)
|
||||
{
|
||||
mhi_arch_set_bus_request(mhi_cntrl, 0);
|
||||
}
|
||||
|
||||
int mhi_arch_platform_init(struct mhi_dev *mhi_dev)
|
||||
@ -1038,11 +1216,91 @@ void mhi_arch_platform_deinit(struct mhi_dev *mhi_dev)
|
||||
int mhi_arch_link_off(struct mhi_controller *mhi_cntrl,
|
||||
bool graceful)
|
||||
{
|
||||
#ifdef QCOM_AP_AND_EFUSE_PCIE_SLEEP
|
||||
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
|
||||
struct arch_info *arch_info = mhi_dev->arch_info;
|
||||
struct pci_dev *pci_dev = mhi_dev->pci_dev;
|
||||
int ret;
|
||||
|
||||
MHI_LOG("Entered\n");
|
||||
|
||||
if (graceful) {
|
||||
pci_clear_master(pci_dev);
|
||||
ret = pci_save_state(mhi_dev->pci_dev);
|
||||
if (ret) {
|
||||
MHI_ERR("Failed with pci_save_state, ret:%d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
arch_info->pcie_state = pci_store_saved_state(pci_dev);
|
||||
pci_disable_device(pci_dev);
|
||||
}
|
||||
|
||||
/*
|
||||
* We will always attempt to put link into D3hot, however
|
||||
* link down may have happened due to error fatal, so
|
||||
* ignoring the return code
|
||||
*/
|
||||
pci_set_power_state(pci_dev, PCI_D3hot);
|
||||
|
||||
ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, mhi_cntrl->bus, pci_dev,
|
||||
NULL, 0);
|
||||
MHI_ERR("msm_pcie_pm_control(MSM_PCIE_SUSPEND), ret:%d\n", ret);
|
||||
|
||||
/* release the resources */
|
||||
mhi_arch_set_bus_request(mhi_cntrl, 0);
|
||||
|
||||
MHI_LOG("Exited\n");
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mhi_arch_link_on(struct mhi_controller *mhi_cntrl)
|
||||
{
|
||||
#ifdef QCOM_AP_AND_EFUSE_PCIE_SLEEP
|
||||
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
|
||||
struct arch_info *arch_info = mhi_dev->arch_info;
|
||||
struct pci_dev *pci_dev = mhi_dev->pci_dev;
|
||||
int ret;
|
||||
|
||||
MHI_LOG("Entered\n");
|
||||
|
||||
/* request resources and establish link trainning */
|
||||
ret = mhi_arch_set_bus_request(mhi_cntrl, 1);
|
||||
if (ret)
|
||||
MHI_LOG("Could not set bus frequency, ret:%d\n", ret);
|
||||
|
||||
ret = msm_pcie_pm_control(MSM_PCIE_RESUME, mhi_cntrl->bus, pci_dev,
|
||||
NULL, 0);
|
||||
MHI_LOG("msm_pcie_pm_control(MSM_PCIE_RESUME), ret:%d\n", ret);
|
||||
if (ret) {
|
||||
MHI_ERR("Link training failed, ret:%d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = pci_set_power_state(pci_dev, PCI_D0);
|
||||
if (ret) {
|
||||
MHI_ERR("Failed to set PCI_D0 state, ret:%d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = pci_enable_device(pci_dev);
|
||||
if (ret) {
|
||||
MHI_ERR("Failed to enable device, ret:%d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = pci_load_and_free_saved_state(pci_dev, &arch_info->pcie_state);
|
||||
if (ret)
|
||||
MHI_LOG("Failed to load saved cfg state\n");
|
||||
|
||||
pci_restore_state(pci_dev);
|
||||
pci_set_master(pci_dev);
|
||||
|
||||
MHI_LOG("Exited\n");
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
@ -4,10 +4,14 @@
|
||||
#ifndef _MHI_H_
|
||||
#define _MHI_H_
|
||||
|
||||
#define PCIE_MHI_DRIVER_VERSION "V1.3.4"
|
||||
#define PCIE_MHI_DRIVER_VERSION "V1.3.6"
|
||||
#define ENABLE_MHI_MON
|
||||
//#define ENABLE_IP_SW0
|
||||
|
||||
// #define ENABLE_ADPL
|
||||
|
||||
// #define ENABLE_QDSS
|
||||
|
||||
#include <linux/miscdevice.h>
|
||||
typedef enum
|
||||
{
|
||||
@ -82,7 +86,8 @@ typedef enum
|
||||
MHI_CLIENT_IP_HW_0_OUT = 100,
|
||||
MHI_CLIENT_IP_HW_0_IN = 101,
|
||||
MHI_CLIENT_ADPL = 102,
|
||||
MHI_CLIENT_RESERVED_5_LOWER = 103,
|
||||
MHI_CLIENT_IP_HW_QDSS = 103,
|
||||
// MHI_CLIENT_RESERVED_5_LOWER = 103,
|
||||
MHI_CLIENT_RESERVED_5_UPPER = 127,
|
||||
MHI_MAX_CHANNELS = 128
|
||||
}MHI_CLIENT_CHANNEL_TYPE;
|
||||
@ -98,7 +103,12 @@ typedef enum
|
||||
#endif
|
||||
IPA_OUT_EVENT_RING,
|
||||
IPA_IN_EVENT_RING,
|
||||
#ifdef ENABLE_ADPL
|
||||
ADPL_EVT_RING,
|
||||
#endif
|
||||
#ifdef ENABLE_QDSS
|
||||
QDSS_EVT_RING,
|
||||
#endif
|
||||
|
||||
MAX_EVT_RING_IDX
|
||||
}MHI_EVT_RING_IDX;
|
||||
@ -109,7 +119,7 @@ typedef enum
|
||||
#define MAX_NUM_MHI_DEVICES 1
|
||||
#define NUM_MHI_XFER_RINGS 128
|
||||
#define NUM_MHI_EVT_RINGS MAX_EVT_RING_IDX
|
||||
#define NUM_MHI_HW_EVT_RINGS 3
|
||||
#define NUM_MHI_HW_EVT_RINGS 4
|
||||
#define NUM_MHI_XFER_RING_ELEMENTS 16
|
||||
#define NUM_MHI_EVT_RING_ELEMENTS (NUM_MHI_IPA_IN_RING_ELEMENTS*2) //must *2, event ring full will make x55 dump
|
||||
#define NUM_MHI_IPA_IN_RING_ELEMENTS 512
|
||||
@ -117,6 +127,14 @@ typedef enum
|
||||
#define NUM_MHI_DIAG_IN_RING_ELEMENTS 128
|
||||
#define NUM_MHI_SW_IP_RING_ELEMENTS 512
|
||||
|
||||
#ifdef ENABLE_ADPL
|
||||
#define NUM_MHI_ADPL_RING_ELEMENTS 256
|
||||
#endif
|
||||
|
||||
#ifdef ENABLE_QDSS
|
||||
#define NUM_MHI_QDSS_RING_ELEMENTS 256
|
||||
#endif
|
||||
|
||||
/*
|
||||
* for if set Interrupt moderation time as 1ms,
|
||||
and transfer more than NUM_MHI_CHAN_RING_ELEMENTS data are sent to the modem in 1ms.
|
||||
|
@ -200,8 +200,11 @@ static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl)
|
||||
lower_32_bits(mhi_buf->dma_addr));
|
||||
|
||||
mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len);
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0))
|
||||
sequence_id = get_random_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK;
|
||||
#else
|
||||
sequence_id = prandom_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK;
|
||||
|
||||
#endif
|
||||
if (unlikely(!sequence_id))
|
||||
sequence_id = 1;
|
||||
|
||||
@ -313,7 +316,11 @@ int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic)
|
||||
|
||||
mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len);
|
||||
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0))
|
||||
sequence_id = get_random_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK;
|
||||
#else
|
||||
sequence_id = prandom_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK;
|
||||
#endif
|
||||
mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS,
|
||||
BHIE_RXVECDB_SEQNUM_BMSK, BHIE_RXVECDB_SEQNUM_SHFT,
|
||||
sequence_id);
|
||||
@ -364,8 +371,11 @@ static int mhi_fw_load_amss(struct mhi_controller *mhi_cntrl,
|
||||
lower_32_bits(mhi_buf->dma_addr));
|
||||
|
||||
mhi_write_reg(mhi_cntrl, base, BHIE_TXVECSIZE_OFFS, mhi_buf->len);
|
||||
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0))
|
||||
mhi_cntrl->sequence_id = get_random_u32() & BHIE_TXVECSTATUS_SEQNUM_BMSK;
|
||||
#else
|
||||
mhi_cntrl->sequence_id = prandom_u32() & BHIE_TXVECSTATUS_SEQNUM_BMSK;
|
||||
#endif
|
||||
mhi_write_reg_field(mhi_cntrl, base, BHIE_TXVECDB_OFFS,
|
||||
BHIE_TXVECDB_SEQNUM_BMSK, BHIE_TXVECDB_SEQNUM_SHFT,
|
||||
mhi_cntrl->sequence_id);
|
||||
@ -641,12 +651,11 @@ void mhi_fw_load_worker(struct work_struct *work)
|
||||
ret = mhi_fw_load_sbl(mhi_cntrl, dma_addr, size);
|
||||
mhi_free_coherent(mhi_cntrl, size, buf, dma_addr);
|
||||
|
||||
if (!mhi_cntrl->fbc_download || ret || mhi_cntrl->ee == MHI_EE_EDL)
|
||||
release_firmware(firmware);
|
||||
|
||||
/* error or in edl, we're done */
|
||||
if (ret || mhi_cntrl->ee == MHI_EE_EDL)
|
||||
if (ret || mhi_cntrl->ee == MHI_EE_EDL) {
|
||||
release_firmware(firmware);
|
||||
return;
|
||||
}
|
||||
|
||||
write_lock_irq(&mhi_cntrl->pm_lock);
|
||||
mhi_cntrl->dev_state = MHI_STATE_RESET;
|
||||
@ -678,8 +687,10 @@ void mhi_fw_load_worker(struct work_struct *work)
|
||||
TO_MHI_STATE_STR(mhi_cntrl->dev_state),
|
||||
TO_MHI_EXEC_STR(mhi_cntrl->ee), ret);
|
||||
|
||||
if (!mhi_cntrl->fbc_download)
|
||||
return;
|
||||
if (!mhi_cntrl->fbc_download) {
|
||||
release_firmware(firmware);
|
||||
return;
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
MHI_ERR("Did not transition to READY state\n");
|
||||
@ -836,7 +847,10 @@ long bhi_write_image(struct mhi_controller *mhi_cntrl, void __user *ubuf)
|
||||
MHI_ERR("IOCTL_BHI_WRITEIMAGE copy size error, ret = %ld\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (size <= 0) {
|
||||
MHI_ERR("IOCTL_BHI_WRITEIMAGE copy size error, size\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
ret = BhiWrite(mhi_cntrl, ubuf+sizeof(size), size);
|
||||
if (ret) {
|
||||
MHI_ERR("IOCTL_BHI_WRITEIMAGE BhiWrite error, ret = %ld\n", ret);
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <linux/version.h>
|
||||
#if (LINUX_VERSION_CODE > KERNEL_VERSION( 4,11,0 ))
|
||||
@ -27,6 +28,7 @@ struct mhi_controller_map {
|
||||
u32 domain;
|
||||
u32 bus;
|
||||
u32 slot;
|
||||
struct mhi_controller *mhi_cntrl;
|
||||
};
|
||||
|
||||
#define MAX_MHI_CONTROLLER 16
|
||||
@ -388,7 +390,7 @@ static void mon_text_complete(void *data, u32 chan, dma_addr_t wp, struct mhi_tr
|
||||
mon_text_event(rp, chan, wp, mhi_tre, NULL, 0, 'E');
|
||||
}
|
||||
|
||||
void mon_reader_add(struct mhi_controller *mbus, struct mon_reader *r)
|
||||
static void mon_reader_add(struct mhi_controller *mbus, struct mon_reader *r)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
@ -560,9 +562,9 @@ static ssize_t mon_text_read_u(struct file *file, char __user *buf,
|
||||
ptr.limit = rp->printf_size;
|
||||
|
||||
ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt,
|
||||
"%u %c %03d WP:%llx TRE: %llx %08x %08x",
|
||||
ep->tstamp, ep->type, ep->chan, ep->wp,
|
||||
ep->mhi_tre.ptr, ep->mhi_tre.dword[0], ep->mhi_tre.dword[1]);
|
||||
"%u %c %03d WP:%llx TRE: %llx %08x %08x",
|
||||
ep->tstamp, ep->type, ep->chan, (long long unsigned int)ep->wp,
|
||||
ep->mhi_tre.ptr, ep->mhi_tre.dword[0], ep->mhi_tre.dword[1]);
|
||||
|
||||
if (ep->len) {
|
||||
struct mon_text_ptr *p = &ptr;
|
||||
@ -651,7 +653,7 @@ static const struct file_operations mon_fops_text_u = {
|
||||
void mhi_init_debugfs(struct mhi_controller *mhi_cntrl)
|
||||
{
|
||||
struct dentry *dentry;
|
||||
char node[32];
|
||||
char node[64];
|
||||
|
||||
#ifdef ENABLE_MHI_MON
|
||||
struct mhi_controller *mbus = mhi_cntrl;
|
||||
@ -663,11 +665,11 @@ void mhi_init_debugfs(struct mhi_controller *mhi_cntrl)
|
||||
#endif
|
||||
|
||||
if (!mhi_cntrl->parent)
|
||||
snprintf(node, sizeof(node), "mhi_%04x_%02u:%02u.%02u",
|
||||
snprintf(node, sizeof(node), "mhi_%04x_%02x:%02x.%02x",
|
||||
mhi_cntrl->dev_id, mhi_cntrl->domain, mhi_cntrl->bus,
|
||||
mhi_cntrl->slot);
|
||||
else
|
||||
snprintf(node, sizeof(node), "%04x_%02u:%02u.%02u",
|
||||
snprintf(node, sizeof(node), "%04x_%02x:%02x.%02x",
|
||||
mhi_cntrl->dev_id, mhi_cntrl->domain, mhi_cntrl->bus,
|
||||
mhi_cntrl->slot);
|
||||
|
||||
@ -1009,7 +1011,7 @@ exit_timesync:
|
||||
|
||||
int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
|
||||
{
|
||||
u32 val;
|
||||
u32 val = 0;
|
||||
int i, ret;
|
||||
struct mhi_chan *mhi_chan;
|
||||
struct mhi_event *mhi_event;
|
||||
@ -1086,7 +1088,7 @@ int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
|
||||
MHIDATALIMIT_LOWER, U32_MAX, 0,
|
||||
lower_32_bits(mhi_cntrl->iova_stop),
|
||||
},
|
||||
{ 0, 0, 0 }
|
||||
{ 0, 0, 0, 0 }
|
||||
};
|
||||
|
||||
MHI_LOG("Initializing MMIO\n");
|
||||
@ -1194,6 +1196,21 @@ int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
|
||||
tre_ring->dma_handle = mhi_cntrl->mhi_ctxt->ctrl_seg_addr + offsetof(struct mhi_ctrl_seg, sw_out_chan_ring[mhi_chan->ring]);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef ENABLE_ADPL
|
||||
else if (MHI_CLIENT_ADPL == mhi_chan->chan) {
|
||||
tre_ring->pre_aligned = &mhi_cntrl->mhi_ctxt->ctrl_seg->adpl_in_chan_ring[mhi_chan->ring];
|
||||
tre_ring->dma_handle = mhi_cntrl->mhi_ctxt->ctrl_seg_addr + offsetof(struct mhi_ctrl_seg, adpl_in_chan_ring[mhi_chan->ring]);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef ENABLE_QDSS
|
||||
else if (MHI_CLIENT_IP_HW_QDSS == mhi_chan->chan) {
|
||||
tre_ring->pre_aligned = &mhi_cntrl->mhi_ctxt->ctrl_seg->qdss_in_chan_ring[mhi_chan->ring];
|
||||
tre_ring->dma_handle = mhi_cntrl->mhi_ctxt->ctrl_seg_addr + offsetof(struct mhi_ctrl_seg, qdss_in_chan_ring[mhi_chan->ring]);
|
||||
}
|
||||
#endif
|
||||
|
||||
else if (MHI_CLIENT_DIAG_IN == mhi_chan->chan) {
|
||||
tre_ring->pre_aligned = &mhi_cntrl->mhi_ctxt->ctrl_seg->diag_in_chan_ring[mhi_chan->ring];
|
||||
tre_ring->dma_handle = mhi_cntrl->mhi_ctxt->ctrl_seg_addr + offsetof(struct mhi_ctrl_seg, diag_in_chan_ring[mhi_chan->ring]);
|
||||
@ -1606,11 +1623,21 @@ static int of_parse_ev_cfg(struct mhi_controller *mhi_cntrl,
|
||||
mhi_event->er_index = i;
|
||||
|
||||
mhi_event->ring.elements = NUM_MHI_EVT_RING_ELEMENTS; //Event ring length in elements
|
||||
if (i == PRIMARY_EVENT_RING || i == ADPL_EVT_RING)
|
||||
if (i == PRIMARY_EVENT_RING)
|
||||
mhi_event->ring.elements = 256; //256 is enough, and 1024 some times make driver fail to open channel (reason is x6x fail to malloc)
|
||||
|
||||
mhi_event->intmod = 1; //Interrupt moderation time in ms
|
||||
|
||||
#ifdef ENABLE_ADPL
|
||||
if (i == ADPL_EVT_RING)
|
||||
mhi_event->ring.elements = 256;
|
||||
#endif
|
||||
|
||||
#ifdef ENABLE_QDSS
|
||||
if (i == QDSS_EVT_RING)
|
||||
mhi_event->ring.elements = 512;
|
||||
#endif
|
||||
|
||||
/* see mhi_netdev_status_cb(), when interrupt come, the napi_poll maybe scheduled, so can reduce interrupts
|
||||
root@OpenWrt:/# cat /proc/interrupts | grep mhi
|
||||
root@OpenWrt:/# cat /sys/kernel/debug/mhi_q/mhi_netdev/pcie_mhi_0306_00.01.00_0/rx_int
|
||||
@ -1623,6 +1650,16 @@ static int of_parse_ev_cfg(struct mhi_controller *mhi_cntrl,
|
||||
mhi_event->intmod = 5;
|
||||
#endif
|
||||
|
||||
#ifdef ENABLE_ADPL
|
||||
if (i == ADPL_EVT_RING)
|
||||
mhi_event->intmod = 0;
|
||||
#endif
|
||||
|
||||
#ifdef ENABLE_QDSS
|
||||
if (i == QDSS_EVT_RING)
|
||||
mhi_event->intmod = 0;
|
||||
#endif
|
||||
|
||||
mhi_event->msi = 1 + i + mhi_cntrl->msi_irq_base; //MSI associated with this event ring
|
||||
|
||||
if (i == IPA_OUT_EVENT_RING)
|
||||
@ -1635,6 +1672,16 @@ static int of_parse_ev_cfg(struct mhi_controller *mhi_cntrl,
|
||||
else if (i == SW_0_IN_EVT_RING)
|
||||
mhi_event->chan = MHI_CLIENT_IP_SW_0_IN;
|
||||
#endif
|
||||
|
||||
#ifdef ENABLE_ADPL
|
||||
else if (i == ADPL_EVT_RING)
|
||||
mhi_event->chan = MHI_CLIENT_ADPL;
|
||||
#endif
|
||||
|
||||
#ifdef ENABLE_QDSS
|
||||
else if (i == QDSS_EVT_RING)
|
||||
mhi_event->chan = MHI_CLIENT_IP_HW_QDSS;
|
||||
#endif
|
||||
else
|
||||
mhi_event->chan = 0;
|
||||
|
||||
@ -1659,6 +1706,16 @@ static int of_parse_ev_cfg(struct mhi_controller *mhi_cntrl,
|
||||
else if (i == SW_0_OUT_EVT_RING || i == SW_0_IN_EVT_RING)
|
||||
mhi_event->data_type = MHI_ER_DATA_ELEMENT_TYPE;
|
||||
#endif
|
||||
|
||||
#ifdef ENABLE_ADPL
|
||||
else if (i == ADPL_EVT_RING)
|
||||
mhi_event->data_type = MHI_ER_DATA_ELEMENT_TYPE;
|
||||
#endif
|
||||
|
||||
#ifdef ENABLE_QDSS
|
||||
else if (i == QDSS_EVT_RING)
|
||||
mhi_event->data_type = MHI_ER_DATA_ELEMENT_TYPE;
|
||||
#endif
|
||||
else
|
||||
mhi_event->data_type = MHI_ER_CTRL_ELEMENT_TYPE;
|
||||
|
||||
@ -1674,7 +1731,14 @@ static int of_parse_ev_cfg(struct mhi_controller *mhi_cntrl,
|
||||
break;
|
||||
}
|
||||
|
||||
if (i == IPA_OUT_EVENT_RING || i == IPA_IN_EVENT_RING)
|
||||
if (i == IPA_OUT_EVENT_RING || i == IPA_IN_EVENT_RING
|
||||
#ifdef ENABLE_ADPL
|
||||
|| i == ADPL_EVT_RING
|
||||
#endif
|
||||
#ifdef ENABLE_QDSS
|
||||
|| i == QDSS_EVT_RING
|
||||
#endif
|
||||
)
|
||||
mhi_event->hw_ring = true;
|
||||
else
|
||||
mhi_event->hw_ring = false;
|
||||
@ -1714,8 +1778,9 @@ static struct chan_cfg_t chan_cfg[] = {
|
||||
{"DIAG", MHI_CLIENT_DIAG_OUT, NUM_MHI_CHAN_RING_ELEMENTS},
|
||||
{"DIAG", MHI_CLIENT_DIAG_IN, NUM_MHI_DIAG_IN_RING_ELEMENTS},
|
||||
//"Qualcomm PCIe QDSS Data"
|
||||
{"QDSS", MHI_CLIENT_QDSS_OUT, NUM_MHI_CHAN_RING_ELEMENTS},
|
||||
{"QDSS", MHI_CLIENT_QDSS_IN, NUM_MHI_CHAN_RING_ELEMENTS},
|
||||
//"Do not use this QDSS. xingduo.du 2023-02-16"
|
||||
// {"QDSS", MHI_CLIENT_QDSS_OUT, NUM_MHI_CHAN_RING_ELEMENTS},
|
||||
// {"QDSS", MHI_CLIENT_QDSS_IN, NUM_MHI_CHAN_RING_ELEMENTS},
|
||||
//"Qualcomm PCIe EFS"
|
||||
{"EFS", MHI_CLIENT_EFS_OUT, NUM_MHI_CHAN_RING_ELEMENTS},
|
||||
{"EFS", MHI_CLIENT_EFS_IN, NUM_MHI_CHAN_RING_ELEMENTS},
|
||||
@ -1753,6 +1818,13 @@ static struct chan_cfg_t chan_cfg[] = {
|
||||
//"Qualcomm PCIe WWAN Adapter"
|
||||
{"IP_HW0", MHI_CLIENT_IP_HW_0_OUT, NUM_MHI_IPA_OUT_RING_ELEMENTS},
|
||||
{"IP_HW0", MHI_CLIENT_IP_HW_0_IN, NUM_MHI_IPA_IN_RING_ELEMENTS},
|
||||
#ifdef ENABLE_ADPL
|
||||
{"ADPL", MHI_CLIENT_ADPL, NUM_MHI_ADPL_RING_ELEMENTS},
|
||||
#endif
|
||||
|
||||
#ifdef ENABLE_QDSS
|
||||
{"QDSS", MHI_CLIENT_IP_HW_QDSS, NUM_MHI_QDSS_RING_ELEMENTS},
|
||||
#endif
|
||||
};
|
||||
|
||||
extern int mhi_netdev_mbin_enabled(void);
|
||||
@ -1806,7 +1878,14 @@ static int of_parse_ch_cfg(struct mhi_controller *mhi_cntrl,
|
||||
mhi_chan->buf_ring.elements = mhi_chan->tre_ring.elements;
|
||||
|
||||
if (chan == MHI_CLIENT_IP_HW_0_OUT || chan == MHI_CLIENT_IP_HW_0_IN || chan == MHI_CLIENT_DIAG_IN
|
||||
|| chan == MHI_CLIENT_IP_SW_0_OUT || chan == MHI_CLIENT_IP_SW_0_IN) {
|
||||
|| chan == MHI_CLIENT_IP_SW_0_OUT || chan == MHI_CLIENT_IP_SW_0_IN
|
||||
#ifdef ENABLE_ADPL
|
||||
|| chan == MHI_CLIENT_ADPL
|
||||
#endif
|
||||
#ifdef ENABLE_QDSS
|
||||
|| chan == MHI_CLIENT_IP_HW_QDSS
|
||||
#endif
|
||||
) {
|
||||
mhi_chan->ring = 0;
|
||||
}
|
||||
else {
|
||||
@ -1824,11 +1903,29 @@ static int of_parse_ch_cfg(struct mhi_controller *mhi_cntrl,
|
||||
else if (chan == MHI_CLIENT_IP_SW_0_IN)
|
||||
mhi_chan->er_index = SW_0_IN_EVT_RING;
|
||||
#endif
|
||||
|
||||
#ifdef ENABLE_ADPL
|
||||
else if (chan == MHI_CLIENT_ADPL)
|
||||
mhi_chan->er_index = ADPL_EVT_RING;
|
||||
#endif
|
||||
#ifdef ENABLE_QDSS
|
||||
else if (chan == MHI_CLIENT_IP_HW_QDSS)
|
||||
mhi_chan->er_index = QDSS_EVT_RING;
|
||||
#endif
|
||||
else
|
||||
mhi_chan->er_index = PRIMARY_EVENT_RING;
|
||||
|
||||
mhi_chan->dir = CHAN_INBOUND(chan) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
|
||||
|
||||
#ifdef ENABLE_ADPL
|
||||
if (chan == MHI_CLIENT_ADPL)
|
||||
mhi_chan->dir = DMA_FROM_DEVICE;
|
||||
#endif
|
||||
|
||||
#ifdef ENABLE_QDSS
|
||||
if (chan == MHI_CLIENT_IP_HW_QDSS)
|
||||
mhi_chan->dir = DMA_FROM_DEVICE;
|
||||
#endif
|
||||
/*
|
||||
* For most channels, chtype is identical to channel directions,
|
||||
* if not defined, assign ch direction to chtype
|
||||
@ -1847,6 +1944,14 @@ static int of_parse_ch_cfg(struct mhi_controller *mhi_cntrl,
|
||||
mhi_chan->xfer_type = MHI_XFER_SKB;
|
||||
else if (chan == MHI_CLIENT_IP_HW_0_IN || chan == MHI_CLIENT_IP_SW_0_IN)
|
||||
mhi_chan->xfer_type = MHI_XFER_SKB; //MHI_XFER_DMA;
|
||||
#ifdef ENABLE_ADPL
|
||||
else if (chan == MHI_CLIENT_ADPL)
|
||||
mhi_chan->xfer_type = MHI_XFER_BUFFER;
|
||||
#endif
|
||||
#ifdef ENABLE_QDSS
|
||||
else if (chan == MHI_CLIENT_IP_HW_QDSS)
|
||||
mhi_chan->xfer_type = MHI_XFER_BUFFER;
|
||||
#endif
|
||||
else
|
||||
mhi_chan->xfer_type = MHI_XFER_BUFFER;
|
||||
|
||||
@ -1904,6 +2009,14 @@ static int of_parse_ch_cfg(struct mhi_controller *mhi_cntrl,
|
||||
if (chan == MHI_CLIENT_IP_HW_0_OUT || chan == MHI_CLIENT_IP_HW_0_IN)
|
||||
mhi_chan->db_cfg.brstmode = MHI_BRSTMODE_ENABLE;
|
||||
|
||||
#ifdef ENABLE_ADPL
|
||||
if (chan == MHI_CLIENT_ADPL)
|
||||
mhi_chan->db_cfg.brstmode = MHI_BRSTMODE_DISABLE;
|
||||
#endif
|
||||
#ifdef ENABLE_QDSS
|
||||
if (chan == MHI_CLIENT_IP_HW_QDSS)
|
||||
mhi_chan->db_cfg.brstmode = MHI_BRSTMODE_DISABLE;
|
||||
#endif
|
||||
if (MHI_INVALID_BRSTMODE(mhi_chan->db_cfg.brstmode))
|
||||
goto error_chan_cfg;
|
||||
|
||||
@ -1942,7 +2055,17 @@ static int of_parse_dt(struct mhi_controller *mhi_cntrl,
|
||||
ret = of_parse_ev_cfg(mhi_cntrl, of_node);
|
||||
if (ret)
|
||||
goto error_ev_cfg;
|
||||
#if defined(QCOM_AP_QCA6490_DMA_IOMMU)
|
||||
/* for QCS6490 iommu-dma is fastmap
|
||||
for SG845 iommu-dma is set in driver
|
||||
for ipq iommu-dma is disabled
|
||||
*/
|
||||
const char *str;
|
||||
ret = of_property_read_string(of_node, "qcom,iommu-dma", &str);
|
||||
if (ret)
|
||||
MHI_ERR("mhi qcom,iommu-dma need set");
|
||||
|
||||
#endif
|
||||
#if 0
|
||||
ret = of_property_read_u32(of_node, "mhi,timeout",
|
||||
&mhi_cntrl->timeout_ms);
|
||||
@ -1996,6 +2119,7 @@ int of_register_mhi_controller(struct mhi_controller *mhi_cntrl)
|
||||
mhi_controller_minors[i].domain = mhi_cntrl->domain;
|
||||
mhi_controller_minors[i].bus = mhi_cntrl->bus;
|
||||
mhi_controller_minors[i].slot = mhi_cntrl->slot;
|
||||
mhi_controller_minors[i].mhi_cntrl = mhi_cntrl;
|
||||
mhi_cntrl->cntrl_idx = i;
|
||||
break;
|
||||
}
|
||||
@ -2548,7 +2672,11 @@ static int __init mhi_cntrl_init(void)
|
||||
return ret;
|
||||
|
||||
mhi_cntrl_drv.major = ret;
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 4, 0))
|
||||
mhi_cntrl_drv.class = class_create(MHI_CNTRL_DRIVER_NAME);
|
||||
#else
|
||||
mhi_cntrl_drv.class = class_create(THIS_MODULE, MHI_CNTRL_DRIVER_NAME);
|
||||
#endif
|
||||
if (IS_ERR(mhi_cntrl_drv.class)) {
|
||||
unregister_chrdev(mhi_cntrl_drv.major, MHI_CNTRL_DRIVER_NAME);
|
||||
return -ENODEV;
|
||||
|
@ -860,6 +860,15 @@ struct mhi_ctrl_seg
|
||||
struct mhi_tre sw_in_chan_ring[NUM_MHI_SW_IP_RING_ELEMENTS] __packed __aligned(NUM_MHI_IPA_IN_RING_ELEMENTS*16);
|
||||
struct mhi_tre sw_out_chan_ring[NUM_MHI_SW_IP_RING_ELEMENTS] __packed __aligned(NUM_MHI_IPA_OUT_RING_ELEMENTS*16);
|
||||
#endif
|
||||
|
||||
#ifdef ENABLE_ADPL
|
||||
struct mhi_tre adpl_in_chan_ring[NUM_MHI_ADPL_RING_ELEMENTS] __packed __aligned(NUM_MHI_IPA_IN_RING_ELEMENTS*16);
|
||||
#endif
|
||||
|
||||
#ifdef ENABLE_QDSS
|
||||
struct mhi_tre qdss_in_chan_ring[NUM_MHI_QDSS_RING_ELEMENTS] __packed __aligned(NUM_MHI_IPA_IN_RING_ELEMENTS*16);
|
||||
#endif
|
||||
|
||||
struct mhi_tre diag_in_chan_ring[NUM_MHI_DIAG_IN_RING_ELEMENTS] __packed __aligned(NUM_MHI_IPA_OUT_RING_ELEMENTS*16);
|
||||
struct mhi_tre chan_ring[NUM_MHI_CHAN_RING_ELEMENTS*2*12] __packed __aligned(NUM_MHI_CHAN_RING_ELEMENTS*16);
|
||||
struct mhi_tre event_ring[NUM_MHI_EVT_RINGS][NUM_MHI_EVT_RING_ELEMENTS] __packed __aligned(NUM_MHI_EVT_RING_ELEMENTS*16);
|
||||
|
@ -919,7 +919,7 @@ static void mhi_create_time_sync_dev(struct mhi_controller *mhi_cntrl)
|
||||
|
||||
mhi_dev->dev_type = MHI_TIMESYNC_TYPE;
|
||||
mhi_dev->chan_name = "TIME_SYNC";
|
||||
dev_set_name(&mhi_dev->dev, "%04x_%02u.%02u.%02u_%s", mhi_dev->dev_id,
|
||||
dev_set_name(&mhi_dev->dev, "%04x_%02x.%02x.%02x_%s", mhi_dev->dev_id,
|
||||
mhi_dev->domain, mhi_dev->bus, mhi_dev->slot,
|
||||
mhi_dev->chan_name);
|
||||
|
||||
@ -1012,7 +1012,7 @@ void mhi_create_devices(struct mhi_controller *mhi_cntrl)
|
||||
}
|
||||
|
||||
mhi_dev->chan_name = mhi_chan->name;
|
||||
dev_set_name(&mhi_dev->dev, "%04x_%02u.%02u.%02u_%s",
|
||||
dev_set_name(&mhi_dev->dev, "%04x_%02x.%02x.%02x_%s",
|
||||
mhi_dev->dev_id, mhi_dev->domain, mhi_dev->bus,
|
||||
mhi_dev->slot, mhi_dev->chan_name);
|
||||
|
||||
|
@ -748,6 +748,8 @@ void mhi_pm_ready_worker(struct work_struct *work)
|
||||
schedule_delayed_work(&mhi_cntrl->ready_worker, msecs_to_jiffies(10));
|
||||
else if (ee == MHI_EE_AMSS || ee == MHI_EE_SBL)
|
||||
mhi_queue_state_transition(mhi_cntrl, MHI_ST_TRANSITION_READY);
|
||||
else if (ee == MHI_EE_EDL)
|
||||
mhi_queue_state_transition(mhi_cntrl, MHI_ST_TRANSITION_PBL);
|
||||
}
|
||||
|
||||
void mhi_pm_st_worker(struct work_struct *work)
|
||||
|
@ -30,9 +30,16 @@
|
||||
#include <net/ipv6.h>
|
||||
#include <net/tcp.h>
|
||||
#include <linux/usb/cdc.h>
|
||||
#include "../core/mhi.h"
|
||||
|
||||
//#define CONFIG_IPQ5018_RATE_CONTROL //Only used with spf11.5 for IPQ5018
|
||||
#if defined(CONFIG_IPQ5018_RATE_CONTROL)
|
||||
//#include <linux/jiffies.h>
|
||||
#include <asm/arch_timer.h>
|
||||
#endif
|
||||
|
||||
#include "../core/mhi.h"
|
||||
//#define MHI_NETDEV_ONE_CARD_MODE
|
||||
//#define ANDROID_gki //some fuction not allow used in this TEST
|
||||
|
||||
#ifndef ETH_P_MAP
|
||||
#define ETH_P_MAP 0xDA1A
|
||||
@ -61,13 +68,15 @@ struct rmnet_nss_cb {
|
||||
int (*nss_tx)(struct sk_buff *skb);
|
||||
};
|
||||
static struct rmnet_nss_cb __read_mostly *nss_cb = NULL;
|
||||
#if defined(CONFIG_PINCTRL_IPQ807x) || defined(CONFIG_PINCTRL_IPQ5018)
|
||||
#ifdef CONFIG_RMNET_DATA
|
||||
#if defined(CONFIG_PINCTRL_IPQ807x) || defined(CONFIG_PINCTRL_IPQ5018) || defined(CONFIG_PINCTRL_IPQ8074)
|
||||
//#ifdef CONFIG_RMNET_DATA //spf12.x have no macro defined, just for spf11.x
|
||||
#define CONFIG_QCA_NSS_DRV
|
||||
/* define at qsdk/qca/src/linux-4.4/net/rmnet_data/rmnet_data_main.c */
|
||||
/* define at qsdk/qca/src/linux-4.4/net/rmnet_data/rmnet_data_main.c */ //for spf11.x
|
||||
/* define at qsdk/qca/src/datarmnet/core/rmnet_config.c */ //for spf12.x
|
||||
/* set at qsdk/qca/src/data-kernel/drivers/rmnet-nss/rmnet_nss.c */
|
||||
/* need add DEPENDS:= kmod-rmnet-core in feeds/makefile */
|
||||
extern struct rmnet_nss_cb *rmnet_nss_callbacks __rcu __read_mostly;
|
||||
#endif
|
||||
//#endif
|
||||
#endif
|
||||
|
||||
static const unsigned char node_id[ETH_ALEN] = {0x02, 0x50, 0xf4, 0x00, 0x00, 0x00};
|
||||
@ -311,6 +320,7 @@ struct mhi_netdev {
|
||||
MHI_MBIM_CTX mbim_ctx;
|
||||
|
||||
u32 mru;
|
||||
u32 max_mtu;
|
||||
const char *interface_name;
|
||||
struct napi_struct napi;
|
||||
struct net_device *ndev;
|
||||
@ -342,7 +352,7 @@ struct mhi_netdev {
|
||||
uint use_rmnet_usb;
|
||||
RMNET_INFO rmnet_info;
|
||||
|
||||
#if defined(CONFIG_PINCTRL_IPQ5018)
|
||||
#if defined(CONFIG_IPQ5018_RATE_CONTROL)
|
||||
u64 first_jiffy;
|
||||
u64 bytes_received_1;
|
||||
u64 bytes_received_2;
|
||||
@ -441,16 +451,21 @@ static int bridge_arp_reply(struct net_device *net, struct sk_buff *skb, uint br
|
||||
pr_info("%s sip = %d.%d.%d.%d, tip=%d.%d.%d.%d, ipv4=%d.%d.%d.%d\n", netdev_name(net),
|
||||
sip[0], sip[1], sip[2], sip[3], tip[0], tip[1], tip[2], tip[3], ipv4[0], ipv4[1], ipv4[2], ipv4[3]);
|
||||
//wwan0 sip = 10.151.137.255, tip=10.151.138.0, ipv4=10.151.137.255
|
||||
#ifndef ANDROID_gki
|
||||
if (tip[0] == ipv4[0] && tip[1] == ipv4[1] && (tip[2]&0xFC) == (ipv4[2]&0xFC) && tip[3] != ipv4[3])
|
||||
reply = arp_create(ARPOP_REPLY, ETH_P_ARP, *((__be32 *)sip), net, *((__be32 *)tip), sha, default_modem_addr, sha);
|
||||
#endif
|
||||
|
||||
if (reply) {
|
||||
skb_reset_mac_header(reply);
|
||||
__skb_pull(reply, skb_network_offset(reply));
|
||||
reply->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
reply->pkt_type = PACKET_HOST;
|
||||
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0))
|
||||
netif_rx(reply);
|
||||
#else
|
||||
netif_rx_ni(reply);
|
||||
#endif
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
@ -840,8 +855,13 @@ static void rmnet_vnd_upate_rx_stats(struct net_device *net,
|
||||
struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64);
|
||||
|
||||
u64_stats_update_begin(&stats64->syncp);
|
||||
#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0))
|
||||
stats64->rx_packets += rx_packets;
|
||||
stats64->rx_bytes += rx_bytes;
|
||||
#else
|
||||
u64_stats_add(&stats64->rx_packets, rx_packets);
|
||||
u64_stats_add(&stats64->rx_bytes, rx_bytes);
|
||||
#endif
|
||||
u64_stats_update_end(&stats64->syncp);
|
||||
#else
|
||||
priv->self_dev->stats.rx_packets += rx_packets;
|
||||
@ -856,8 +876,13 @@ static void rmnet_vnd_upate_tx_stats(struct net_device *net,
|
||||
struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64);
|
||||
|
||||
u64_stats_update_begin(&stats64->syncp);
|
||||
#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0))
|
||||
stats64->tx_packets += tx_packets;
|
||||
stats64->tx_bytes += tx_bytes;
|
||||
#else
|
||||
u64_stats_add(&stats64->tx_packets, tx_packets);
|
||||
u64_stats_add(&stats64->tx_bytes, tx_bytes);
|
||||
#endif
|
||||
u64_stats_update_end(&stats64->syncp);
|
||||
#else
|
||||
net->stats.rx_packets += tx_packets;
|
||||
@ -866,13 +891,44 @@ static void rmnet_vnd_upate_tx_stats(struct net_device *net,
|
||||
}
|
||||
|
||||
#if defined(MHI_NETDEV_STATUS64)
|
||||
#ifdef ANDROID_gki
|
||||
static void _netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
|
||||
const struct net_device_stats *netdev_stats)
|
||||
{
|
||||
#if BITS_PER_LONG == 64
|
||||
BUILD_BUG_ON(sizeof(*stats64) < sizeof(*netdev_stats));
|
||||
memcpy(stats64, netdev_stats, sizeof(*netdev_stats));
|
||||
/* zero out counters that only exist in rtnl_link_stats64 */
|
||||
memset((char *)stats64 + sizeof(*netdev_stats), 0,
|
||||
sizeof(*stats64) - sizeof(*netdev_stats));
|
||||
#else
|
||||
size_t i, n = sizeof(*netdev_stats) / sizeof(unsigned long);
|
||||
const unsigned long *src = (const unsigned long *)netdev_stats;
|
||||
u64 *dst = (u64 *)stats64;
|
||||
|
||||
BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64));
|
||||
for (i = 0; i < n; i++)
|
||||
dst[i] = src[i];
|
||||
/* zero out counters that only exist in rtnl_link_stats64 */
|
||||
memset((char *)stats64 + n * sizeof(u64), 0,
|
||||
sizeof(*stats64) - n * sizeof(u64));
|
||||
#endif
|
||||
}
|
||||
#else
|
||||
static void my_netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
|
||||
const struct net_device_stats *netdev_stats)
|
||||
{
|
||||
netdev_stats_to_stats64(stats64, netdev_stats);
|
||||
}
|
||||
#endif
|
||||
|
||||
static struct rtnl_link_stats64 *_rmnet_vnd_get_stats64(struct net_device *net, struct rtnl_link_stats64 *stats)
|
||||
{
|
||||
struct qmap_priv *dev = netdev_priv(net);
|
||||
unsigned int start;
|
||||
int cpu;
|
||||
|
||||
netdev_stats_to_stats64(stats, &net->stats);
|
||||
my_netdev_stats_to_stats64(stats, &net->stats);
|
||||
|
||||
if (nss_cb && dev->use_qca_nss) { // rmnet_nss.c:rmnet_nss_tx() will update rx stats
|
||||
stats->rx_packets = 0;
|
||||
@ -881,6 +937,7 @@ static struct rtnl_link_stats64 *_rmnet_vnd_get_stats64(struct net_device *net,
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct pcpu_sw_netstats *stats64;
|
||||
#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0))
|
||||
u64 rx_packets, rx_bytes;
|
||||
u64 tx_packets, tx_bytes;
|
||||
|
||||
@ -898,6 +955,25 @@ static struct rtnl_link_stats64 *_rmnet_vnd_get_stats64(struct net_device *net,
|
||||
stats->rx_bytes += rx_bytes;
|
||||
stats->tx_packets += tx_packets;
|
||||
stats->tx_bytes += tx_bytes;
|
||||
#else
|
||||
u64_stats_t rx_packets, rx_bytes;
|
||||
u64_stats_t tx_packets, tx_bytes;
|
||||
|
||||
stats64 = per_cpu_ptr(dev->stats64, cpu);
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin(&stats64->syncp);
|
||||
rx_packets = stats64->rx_packets;
|
||||
rx_bytes = stats64->rx_bytes;
|
||||
tx_packets = stats64->tx_packets;
|
||||
tx_bytes = stats64->tx_bytes;
|
||||
} while (u64_stats_fetch_retry(&stats64->syncp, start));
|
||||
|
||||
stats->rx_packets += u64_stats_read(&rx_packets);
|
||||
stats->rx_bytes += u64_stats_read(&rx_bytes);
|
||||
stats->tx_packets += u64_stats_read(&tx_packets);
|
||||
stats->tx_bytes += u64_stats_read(&tx_bytes);
|
||||
#endif
|
||||
}
|
||||
|
||||
return stats;
|
||||
@ -1035,8 +1111,22 @@ static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb,
|
||||
|
||||
static int rmnet_vnd_change_mtu(struct net_device *rmnet_dev, int new_mtu)
|
||||
{
|
||||
if (new_mtu < 0 || new_mtu > 1500)
|
||||
struct mhi_netdev *mhi_netdev;
|
||||
|
||||
mhi_netdev = (struct mhi_netdev *)ndev_to_mhi(rmnet_dev);
|
||||
|
||||
if (mhi_netdev == NULL) {
|
||||
printk("warning, mhi_netdev == null\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (new_mtu < 0 )
|
||||
return -EINVAL;
|
||||
|
||||
if (new_mtu > mhi_netdev->max_mtu) {
|
||||
printk("warning, set mtu=%d greater than max mtu=%d\n", new_mtu, mhi_netdev->max_mtu);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
rmnet_dev->mtu = new_mtu;
|
||||
return 0;
|
||||
@ -1188,7 +1278,7 @@ static void rmnet_mbim_rx_handler(void *dev, struct sk_buff *skb_in)
|
||||
MSG_ERR("unsupported tci %d by now\n", tci);
|
||||
goto error;
|
||||
}
|
||||
|
||||
tci = abs(tci);
|
||||
qmap_net = pQmapDev->mpQmapNetDev[qmap_mode == 1 ? 0 : tci - 1];
|
||||
|
||||
dpe16 = ndp16->dpe16;
|
||||
@ -1317,8 +1407,21 @@ static void rmnet_qmi_rx_handler(void *dev, struct sk_buff *skb_in)
|
||||
}
|
||||
#endif
|
||||
skb_len -= dl_minimum_padding;
|
||||
if (skb_len > 1500) {
|
||||
netdev_info(ndev, "drop skb_len=%x larger than 1500\n", skb_len);
|
||||
|
||||
mux_id = map_header->mux_id - QUECTEL_QMAP_MUX_ID;
|
||||
if (mux_id >= pQmapDev->qmap_mode) {
|
||||
netdev_info(ndev, "drop qmap unknow mux_id %x\n", map_header->mux_id);
|
||||
goto error_pkt;
|
||||
}
|
||||
mux_id = abs(mux_id);
|
||||
qmap_net = pQmapDev->mpQmapNetDev[mux_id];
|
||||
if (qmap_net == NULL) {
|
||||
netdev_info(ndev, "drop qmap unknow mux_id %x\n", map_header->mux_id);
|
||||
goto skip_pkt;
|
||||
}
|
||||
|
||||
if (skb_len > qmap_net->mtu) {
|
||||
netdev_info(ndev, "drop skb_len=%x larger than qmap mtu=%d\n", skb_len, qmap_net->mtu);
|
||||
goto error_pkt;
|
||||
}
|
||||
|
||||
@ -1359,19 +1462,6 @@ static void rmnet_qmi_rx_handler(void *dev, struct sk_buff *skb_in)
|
||||
netdev_info(ndev, "unknow skb->protocol %02x\n", skb_in->data[hdr_size]);
|
||||
goto error_pkt;
|
||||
}
|
||||
|
||||
mux_id = map_header->mux_id - QUECTEL_QMAP_MUX_ID;
|
||||
if (mux_id >= pQmapDev->qmap_mode) {
|
||||
netdev_info(ndev, "drop qmap unknow mux_id %x\n", map_header->mux_id);
|
||||
goto error_pkt;
|
||||
}
|
||||
|
||||
qmap_net = pQmapDev->mpQmapNetDev[mux_id];
|
||||
|
||||
if (qmap_net == NULL) {
|
||||
netdev_info(ndev, "drop qmap unknow mux_id %x\n", map_header->mux_id);
|
||||
goto skip_pkt;
|
||||
}
|
||||
|
||||
//for Qualcomm's SFE, do not use skb_clone(), or SFE 's performace is very bad.
|
||||
//for Qualcomm's NSS, do not use skb_clone(), or NSS 's performace is very bad.
|
||||
@ -1485,6 +1575,7 @@ static struct net_device * rmnet_vnd_register_device(struct mhi_netdev *pQmapDev
|
||||
struct qmap_priv *priv;
|
||||
int err;
|
||||
int use_qca_nss = !!nss_cb;
|
||||
unsigned char temp_addr[ETH_ALEN];
|
||||
|
||||
qmap_net = alloc_etherdev(sizeof(*priv));
|
||||
if (!qmap_net)
|
||||
@ -1498,10 +1589,21 @@ static struct net_device * rmnet_vnd_register_device(struct mhi_netdev *pQmapDev
|
||||
priv->pQmapDev = pQmapDev;
|
||||
priv->qmap_version = pQmapDev->qmap_version;
|
||||
priv->mux_id = mux_id;
|
||||
sprintf(qmap_net->name, "%s.%d", real_dev->name, offset_id + 1);
|
||||
sprintf(qmap_net->name, "%.12s.%d", real_dev->name, offset_id + 1);
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
|
||||
__dev_addr_set(qmap_net, real_dev->dev_addr, ETH_ALEN);
|
||||
#else
|
||||
memcpy (qmap_net->dev_addr, real_dev->dev_addr, ETH_ALEN);
|
||||
qmap_net->dev_addr[5] = offset_id + 1;
|
||||
//eth_random_addr(qmap_net->dev_addr);
|
||||
#endif
|
||||
//qmap_net->dev_addr[5] = offset_id + 1;
|
||||
//eth_random_addr(qmap_net->dev_addr);
|
||||
memcpy(temp_addr, qmap_net->dev_addr, ETH_ALEN);
|
||||
temp_addr[5] = offset_id + 1;
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
|
||||
__dev_addr_set(qmap_net, temp_addr, ETH_ALEN);
|
||||
#else
|
||||
memcpy(qmap_net->dev_addr, temp_addr, ETH_ALEN);
|
||||
#endif
|
||||
#if defined(MHI_NETDEV_STATUS64)
|
||||
priv->stats64 = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
|
||||
if (!priv->stats64)
|
||||
@ -1528,11 +1630,16 @@ static struct net_device * rmnet_vnd_register_device(struct mhi_netdev *pQmapDev
|
||||
qmap_net->netdev_ops = &rmnet_vnd_ops;
|
||||
qmap_net->flags |= IFF_NOARP;
|
||||
qmap_net->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)
|
||||
qmap_net->max_mtu = pQmapDev->max_mtu;
|
||||
#endif
|
||||
|
||||
if (nss_cb && use_qca_nss) {
|
||||
rmnet_vnd_rawip_setup(qmap_net);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PINCTRL_IPQ9574
|
||||
rmnet_vnd_rawip_setup(qmap_net);
|
||||
#endif
|
||||
if (pQmapDev->net_type == MHI_NET_MBIM) {
|
||||
qmap_net->needed_headroom = sizeof(struct mhi_mbim_hdr);
|
||||
}
|
||||
@ -1705,8 +1812,13 @@ static void mhi_netdev_upate_rx_stats(struct mhi_netdev *mhi_netdev,
|
||||
struct pcpu_sw_netstats *stats64 = this_cpu_ptr(mhi_netdev->stats64);
|
||||
|
||||
u64_stats_update_begin(&stats64->syncp);
|
||||
#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0))
|
||||
stats64->rx_packets += rx_packets;
|
||||
stats64->rx_bytes += rx_bytes;
|
||||
#else
|
||||
u64_stats_add(&stats64->rx_packets, rx_packets);
|
||||
u64_stats_add(&stats64->rx_bytes, rx_bytes);
|
||||
#endif
|
||||
u64_stats_update_begin(&stats64->syncp);
|
||||
#else
|
||||
mhi_netdev->ndev->stats.rx_packets += rx_packets;
|
||||
@ -1720,8 +1832,13 @@ static void mhi_netdev_upate_tx_stats(struct mhi_netdev *mhi_netdev,
|
||||
struct pcpu_sw_netstats *stats64 = this_cpu_ptr(mhi_netdev->stats64);
|
||||
|
||||
u64_stats_update_begin(&stats64->syncp);
|
||||
#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0))
|
||||
stats64->tx_packets += tx_packets;
|
||||
stats64->tx_bytes += tx_bytes;
|
||||
#else
|
||||
u64_stats_add(&stats64->tx_packets, tx_packets);
|
||||
u64_stats_add(&stats64->tx_bytes, tx_bytes);
|
||||
#endif
|
||||
u64_stats_update_begin(&stats64->syncp);
|
||||
#else
|
||||
mhi_netdev->ndev->stats.tx_packets += tx_packets;
|
||||
@ -1973,7 +2090,9 @@ static netdev_tx_t mhi_netdev_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
//qmap_hex_dump(__func__, skb->data, 32);
|
||||
|
||||
#ifdef MHI_NETDEV_ONE_CARD_MODE
|
||||
if (dev->type == ARPHRD_ETHER) {
|
||||
//printk("%s dev->type=%d\n", __func__, dev->type);
|
||||
|
||||
if (dev->type == ARPHRD_ETHER) {
|
||||
skb_reset_mac_header(skb);
|
||||
|
||||
#ifdef QUECTEL_BRIDGE_MODE
|
||||
@ -2035,6 +2154,8 @@ static netdev_tx_t mhi_netdev_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
|
||||
res = mhi_queue_transfer(mhi_dev, DMA_TO_DEVICE, skb, skb->len,
|
||||
MHI_EOT);
|
||||
|
||||
//printk("%s transfer res=%d\n", __func__, res);
|
||||
if (unlikely(res)) {
|
||||
dev_kfree_skb_any(skb);
|
||||
dev->stats.tx_errors++;
|
||||
@ -2057,6 +2178,7 @@ static struct rtnl_link_stats64 * _mhi_netdev_get_stats64(struct net_device *nde
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct pcpu_sw_netstats *stats64;
|
||||
#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0))
|
||||
u64 rx_packets, rx_bytes;
|
||||
u64 tx_packets, tx_bytes;
|
||||
|
||||
@ -2074,6 +2196,25 @@ static struct rtnl_link_stats64 * _mhi_netdev_get_stats64(struct net_device *nde
|
||||
stats->rx_bytes += rx_bytes;
|
||||
stats->tx_packets += tx_packets;
|
||||
stats->tx_bytes += tx_bytes;
|
||||
#else
|
||||
u64_stats_t rx_packets, rx_bytes;
|
||||
u64_stats_t tx_packets, tx_bytes;
|
||||
|
||||
stats64 = per_cpu_ptr(mhi_netdev->stats64, cpu);
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin(&stats64->syncp);
|
||||
rx_packets = stats64->rx_packets;
|
||||
rx_bytes = stats64->rx_bytes;
|
||||
tx_packets = stats64->tx_packets;
|
||||
tx_bytes = stats64->tx_bytes;
|
||||
} while (u64_stats_fetch_retry(&stats64->syncp, start));
|
||||
|
||||
stats->rx_packets += u64_stats_read(&rx_packets);
|
||||
stats->rx_bytes += u64_stats_read(&rx_bytes);
|
||||
stats->tx_packets += u64_stats_read(&tx_packets);
|
||||
stats->tx_bytes += u64_stats_read(&tx_bytes);
|
||||
#endif
|
||||
}
|
||||
|
||||
return stats;
|
||||
@ -2182,7 +2323,11 @@ static void mhi_netdev_setup(struct net_device *dev)
|
||||
ether_setup(dev);
|
||||
|
||||
dev->ethtool_ops = &mhi_netdev_ethtool_ops;
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
|
||||
__dev_addr_set (dev, node_id, sizeof node_id);
|
||||
#else
|
||||
memcpy (dev->dev_addr, node_id, sizeof node_id);
|
||||
#endif
|
||||
/* set this after calling ether_setup */
|
||||
dev->header_ops = 0; /* No header */
|
||||
dev->hard_header_len = 0;
|
||||
@ -2269,8 +2414,15 @@ static int mhi_netdev_enable_iface(struct mhi_netdev *mhi_netdev)
|
||||
mhi_netdev->ndev->mtu = mhi_netdev->mru;
|
||||
}
|
||||
rtnl_unlock();
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)
|
||||
mhi_netdev->ndev->max_mtu = mhi_netdev->max_mtu; //first net card
|
||||
#endif
|
||||
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0))
|
||||
netif_napi_add_weight(mhi_netdev->ndev, &mhi_netdev->napi, mhi_netdev_poll, poll_weight);
|
||||
#else
|
||||
netif_napi_add(mhi_netdev->ndev, &mhi_netdev->napi, mhi_netdev_poll, poll_weight);
|
||||
#endif
|
||||
ret = register_netdev(mhi_netdev->ndev);
|
||||
if (ret) {
|
||||
MSG_ERR("Network device registration failed\n");
|
||||
@ -2367,7 +2519,7 @@ static void mhi_netdev_xfer_dl_cb(struct mhi_device *mhi_dev,
|
||||
return;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_PINCTRL_IPQ5018)
|
||||
#if defined(CONFIG_IPQ5018_RATE_CONTROL)
|
||||
if (likely(mhi_netdev->mhi_rate_control)) {
|
||||
u32 time_interval = 0;
|
||||
u32 time_difference = 0;
|
||||
@ -2377,7 +2529,11 @@ static void mhi_netdev_xfer_dl_cb(struct mhi_device *mhi_dev,
|
||||
struct net_device *ndev = mhi_netdev->ndev;
|
||||
|
||||
if (mhi_netdev->first_jiffy) {
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(5,2,0)
|
||||
second_jiffy = arch_counter_get_cntvct();
|
||||
#else
|
||||
second_jiffy = __arch_counter_get_cntvct();
|
||||
#endif
|
||||
bytes_received_2 = mhi_netdev->bytes_received_2;
|
||||
if ((second_jiffy > mhi_netdev->first_jiffy) &&
|
||||
(bytes_received_2 > mhi_netdev->bytes_received_1)) {
|
||||
@ -2418,7 +2574,12 @@ static void mhi_netdev_xfer_dl_cb(struct mhi_device *mhi_dev,
|
||||
mhi_netdev->bytes_received_1 = bytes_received_2;
|
||||
}
|
||||
} else {
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(5,2,0)
|
||||
mhi_netdev->first_jiffy = arch_counter_get_cntvct();
|
||||
#else
|
||||
mhi_netdev->first_jiffy = __arch_counter_get_cntvct();
|
||||
#endif
|
||||
|
||||
cntfrq = arch_timer_get_cntfrq();
|
||||
mhi_netdev->cntfrq_per_msec = cntfrq / 1000;
|
||||
}
|
||||
@ -2668,7 +2829,11 @@ static void mhi_netdev_remove(struct mhi_device *mhi_dev)
|
||||
}
|
||||
|
||||
rtnl_lock();
|
||||
#ifdef ANDROID_gki
|
||||
if (mhi_netdev->ndev && rtnl_dereference(mhi_netdev->ndev->rx_handler))
|
||||
#else
|
||||
if (netdev_is_rx_handler_busy(mhi_netdev->ndev))
|
||||
#endif
|
||||
netdev_rx_handler_unregister(mhi_netdev->ndev);
|
||||
rtnl_unlock();
|
||||
#endif
|
||||
@ -2726,13 +2891,16 @@ static int mhi_netdev_probe(struct mhi_device *mhi_dev,
|
||||
mhi_netdev->mhi_dev = mhi_dev;
|
||||
mhi_device_set_devdata(mhi_dev, mhi_netdev);
|
||||
|
||||
mhi_netdev->mru = 15360; ///etc/data/qnicorn_config.xml dataformat_agg_dl_size 15*1024
|
||||
mhi_netdev->mru = (15*1024); ///etc/data/qnicorn_config.xml dataformat_agg_dl_size 15*1024
|
||||
mhi_netdev->max_mtu = mhi_netdev->mru - (sizeof(struct rmnet_map_v5_csum_header) + sizeof(struct rmnet_map_header));
|
||||
if (mhi_netdev->net_type == MHI_NET_MBIM) {
|
||||
mhi_netdev->mru = ncmNTBParams.dwNtbInMaxSize;
|
||||
mhi_netdev->mbim_ctx.rx_max = mhi_netdev->mru;
|
||||
mhi_netdev->max_mtu = mhi_netdev->mru - sizeof(struct mhi_mbim_hdr);
|
||||
}
|
||||
else if (mhi_netdev->net_type == MHI_NET_ETHER) {
|
||||
mhi_netdev->mru = 8*1024;
|
||||
mhi_netdev->max_mtu = mhi_netdev->mru;
|
||||
}
|
||||
mhi_netdev->qmap_size = mhi_netdev->mru;
|
||||
|
||||
@ -2815,7 +2983,7 @@ static int mhi_netdev_probe(struct mhi_device *mhi_dev,
|
||||
#endif
|
||||
}
|
||||
|
||||
#if defined(CONFIG_PINCTRL_IPQ5018)
|
||||
#if defined(CONFIG_IPQ5018_RATE_CONTROL)
|
||||
mhi_netdev->mhi_rate_control = 1;
|
||||
#endif
|
||||
|
||||
@ -2825,7 +2993,8 @@ static int mhi_netdev_probe(struct mhi_device *mhi_dev,
|
||||
static const struct mhi_device_id mhi_netdev_match_table[] = {
|
||||
{ .chan = "IP_HW0" },
|
||||
{ .chan = "IP_SW0" },
|
||||
{ .chan = "IP_HW_ADPL" },
|
||||
// ADPL do not register as a netcard. xingduo.du 2023-02-20
|
||||
// { .chan = "IP_HW_ADPL" },
|
||||
{ },
|
||||
};
|
||||
|
||||
@ -2850,7 +3019,7 @@ int __init mhi_device_netdev_init(struct dentry *parent)
|
||||
printk(KERN_ERR "mhi_device_netdev_init: driver load must after '/etc/modules.d/42-rmnet-nss'\n");
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
mhi_netdev_create_debugfs_dir(parent);
|
||||
|
||||
return mhi_driver_register(&mhi_netdev_driver);
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <linux/fs.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/version.h>
|
||||
#if 1
|
||||
static inline void *ipc_log_context_create(int max_num_pages,
|
||||
const char *modname, uint16_t user_version)
|
||||
@ -97,6 +98,44 @@ module_param( uci_msg_lvl, uint, S_IRUGO | S_IWUSR);
|
||||
#define MAX_UCI_DEVICES (64)
|
||||
#define QUEC_MHI_UCI_ALWAYS_OPEN //by now, sdx20 can not handle "start-reset-start" operation, so the simply solution is keep start state
|
||||
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0))
|
||||
#ifdef TCGETS2
|
||||
__weak int user_termios_to_kernel_termios(struct ktermios *k,
|
||||
struct termios2 __user *u)
|
||||
{
|
||||
return copy_from_user(k, u, sizeof(struct termios2));
|
||||
}
|
||||
__weak int kernel_termios_to_user_termios(struct termios2 __user *u,
|
||||
struct ktermios *k)
|
||||
{
|
||||
return copy_to_user(u, k, sizeof(struct termios2));
|
||||
}
|
||||
__weak int user_termios_to_kernel_termios_1(struct ktermios *k,
|
||||
struct termios __user *u)
|
||||
{
|
||||
return copy_from_user(k, u, sizeof(struct termios));
|
||||
}
|
||||
__weak int kernel_termios_to_user_termios_1(struct termios __user *u,
|
||||
struct ktermios *k)
|
||||
{
|
||||
return copy_to_user(u, k, sizeof(struct termios));
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
__weak int user_termios_to_kernel_termios(struct ktermios *k,
|
||||
struct termios __user *u)
|
||||
{
|
||||
return copy_from_user(k, u, sizeof(struct termios));
|
||||
}
|
||||
__weak int kernel_termios_to_user_termios(struct termios __user *u,
|
||||
struct ktermios *k)
|
||||
{
|
||||
return copy_to_user(u, k, sizeof(struct termios));
|
||||
}
|
||||
#endif /* TCGETS2 */
|
||||
#endif
|
||||
|
||||
static DECLARE_BITMAP(uci_minors, MAX_UCI_DEVICES);
|
||||
static struct mhi_uci_drv mhi_uci_drv;
|
||||
|
||||
@ -256,7 +295,8 @@ static unsigned int mhi_uci_poll(struct file *file, poll_table *wait)
|
||||
unsigned int mask = 0;
|
||||
|
||||
poll_wait(file, &uci_dev->dl_chan.wq, wait);
|
||||
poll_wait(file, &uci_dev->ul_chan.wq, wait);
|
||||
// ADPL and QDSS do not need poll write. xingduo.du 2023-02-16
|
||||
// poll_wait(file, &uci_dev->ul_chan.wq, wait);
|
||||
|
||||
uci_chan = &uci_dev->dl_chan;
|
||||
spin_lock_bh(&uci_chan->lock);
|
||||
@ -268,22 +308,26 @@ static unsigned int mhi_uci_poll(struct file *file, poll_table *wait)
|
||||
}
|
||||
spin_unlock_bh(&uci_chan->lock);
|
||||
|
||||
uci_chan = &uci_dev->ul_chan;
|
||||
spin_lock_bh(&uci_chan->lock);
|
||||
if (!uci_dev->enabled) {
|
||||
mask |= POLLERR;
|
||||
} else if (mhi_get_no_free_descriptors(mhi_dev, DMA_TO_DEVICE) > 0) {
|
||||
MSG_VERB("Client can write to node\n");
|
||||
mask |= POLLOUT | POLLWRNORM;
|
||||
// ADPL and QDSS are single channel, ul_chan not be initilized. xingduo.du 2023-02-27
|
||||
if (mhi_dev->ul_chan) {
|
||||
poll_wait(file, &uci_dev->ul_chan.wq, wait);
|
||||
uci_chan = &uci_dev->ul_chan;
|
||||
spin_lock_bh(&uci_chan->lock);
|
||||
if (!uci_dev->enabled) {
|
||||
mask |= POLLERR;
|
||||
} else if (mhi_get_no_free_descriptors(mhi_dev, DMA_TO_DEVICE) > 0) {
|
||||
MSG_VERB("Client can write to node\n");
|
||||
mask |= POLLOUT | POLLWRNORM;
|
||||
}
|
||||
|
||||
if (!uci_dev->enabled)
|
||||
mask |= POLLHUP;
|
||||
if (uci_dev->rx_error)
|
||||
mask |= POLLERR;
|
||||
|
||||
spin_unlock_bh(&uci_chan->lock);
|
||||
}
|
||||
|
||||
if (!uci_dev->enabled)
|
||||
mask |= POLLHUP;
|
||||
if (uci_dev->rx_error)
|
||||
mask |= POLLERR;
|
||||
|
||||
spin_unlock_bh(&uci_chan->lock);
|
||||
|
||||
MSG_LOG("Client attempted to poll, returning mask 0x%x\n", mask);
|
||||
|
||||
return mask;
|
||||
@ -778,6 +822,10 @@ static void mhi_dl_xfer_cb(struct mhi_device *mhi_dev,
|
||||
unsigned nr_trb = uci_dev->nr_trb;
|
||||
|
||||
buf = &uci_dev->uci_buf[nr_trb];
|
||||
if (buf == NULL) {
|
||||
MSG_ERR("buf = NULL");
|
||||
return;
|
||||
}
|
||||
if (buf->nr_trb != nr_trb || buf->data != mhi_result->buf_addr)
|
||||
{
|
||||
uci_dev->rx_error++;
|
||||
@ -860,7 +908,8 @@ static void mhi_dl_xfer_cb(struct mhi_device *mhi_dev,
|
||||
wake_up(&uci_chan->wq);
|
||||
}
|
||||
|
||||
#define DIAG_MAX_PCIE_PKT_SZ 2048 //define by module
|
||||
// repaire sdx6x module can not read qdb file. xingduo.du 2023-01-18
|
||||
#define DIAG_MAX_PCIE_PKT_SZ 8192 //define by module
|
||||
|
||||
/* .driver_data stores max mtu */
|
||||
static const struct mhi_device_id mhi_uci_match_table[] = {
|
||||
@ -872,6 +921,12 @@ static const struct mhi_device_id mhi_uci_match_table[] = {
|
||||
{ .chan = "QMI0", .driver_data = 0x1000 },
|
||||
{ .chan = "QMI1", .driver_data = 0x1000 },
|
||||
{ .chan = "DUN", .driver_data = 0x1000 },
|
||||
#ifdef ENABLE_ADPL
|
||||
{ .chan = "ADPL", .driver_data = 0x1000 },
|
||||
#endif
|
||||
#ifdef ENABLE_QDSS
|
||||
{ .chan = "QDSS", .driver_data = 0x1000 },
|
||||
#endif
|
||||
{},
|
||||
};
|
||||
|
||||
@ -896,7 +951,11 @@ int mhi_device_uci_init(void)
|
||||
return ret;
|
||||
|
||||
mhi_uci_drv.major = ret;
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 4, 0))
|
||||
mhi_uci_drv.class = class_create(MHI_UCI_DRIVER_NAME);
|
||||
#else
|
||||
mhi_uci_drv.class = class_create(THIS_MODULE, MHI_UCI_DRIVER_NAME);
|
||||
#endif
|
||||
if (IS_ERR(mhi_uci_drv.class)) {
|
||||
unregister_chrdev(mhi_uci_drv.major, MHI_UCI_DRIVER_NAME);
|
||||
return -ENODEV;
|
||||
|
Loading…
Reference in New Issue
Block a user