package: add wwan Quectel modem packages and luci

This commit is contained in:
coolsnowwolf 2023-02-02 19:43:57 +08:00
parent f143a15985
commit a8b2e68da8
227 changed files with 91084 additions and 0 deletions

View File

@ -0,0 +1,20 @@
#
# Copyright (C) 2015 OpenWrt.org
#
# This is free software, licensed under the GNU General Public License v2.
# See /LICENSE for more information.
#
include $(TOPDIR)/rules.mk
LUCI_TITLE:=Modem Server
LUCI_DEPENDS:=+luci-compat +kmod-usb-net +kmod-usb-net-cdc-ether +kmod-usb-acm \
+kmod-usb-net-qmi-wwan +kmod-usb-net-rndis +kmod-usb-serial-qualcomm \
+kmod-usb-net-sierrawireless +kmod-usb-ohci +kmod-usb-serial \
+kmod-usb-serial-option \
+kmod-usb2 +kmod-usb3 \
+quectel-CM-5G +kmod-gobinet
include $(TOPDIR)/feeds/luci/luci.mk
# call BuildPackage - OpenWrt buildroot signature

View File

@ -0,0 +1,9 @@
module("luci.controller.gobinetmodem", package.seeall)
function index()
if not nixio.fs.access("/etc/config/gobinetmodem") then
return
end
entry({"admin", "network", "gobinetmodem"}, cbi("gobinetmodem"), _("Gobinet Modem Server"), 80).dependent=false
end

View File

@ -0,0 +1,39 @@
-- Copyright 2016 David Thornley <david.thornley@touchstargroup.com>
-- Licensed to the public under the Apache License 2.0.
mp=Map("gobinetmodem",translate("gobinet Modem Server"))
mp.description=translate("Modem Server For OpenWrt")
s=mp:section(TypedSection,"service", "Base Setting")
s.anonymous = true
enabled=s:option(Flag,"enabled",translate("Enable"))
enabled.default=0
enabled.rmempty=false
apn=s:option(Value,"apn",translate("APN"))
apn.rmempty=true
pincode=s:option(Value,"pincode",translate("PIN"))
pincode.rmempty=true
username=s:option(Value,"username",translate("PAP/CHAP username"))
username.rmempty=true
password=s:option(Value,"password",translate("PAP/CHAP password"))
password.rmempty=true
auth=s:option(Value,"auth",translate("Authentication Type"))
password.rmempty=true
auth:value("",translate("-- Please choose --"))
auth:value("both","PAP/CHAP (both)")
auth:value("pap","PAP")
auth:value("chap","CHAP")
auth:value("none","NONE")
tool=s:option(Value,"tool",translate("Tools"))
tool:value("quectel-CM","quectel-CM")
tool.rmempty=true
return mp

View File

@ -0,0 +1,24 @@
msgid ""
msgstr ""
"Project-Id-Version: \n"
"POT-Creation-Date: \n"
"PO-Revision-Date: \n"
"Last-Translator: dingpengyu <dingpengyu06@gmail.com>\n"
"Language-Team: \n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Language: zh_CN\n"
"X-Generator: Poedit 2.3.1\n"
msgid "Base Setting"
msgstr "基本设置"
msgid "gobinet Modem Server"
msgstr "gobinet移动网络拨号服务"
msgid "Modem Server For OpenWrt"
msgstr "OpenWrt移动网络拨号服务"
msgid "Tools"
msgstr "拨号工具"

View File

@ -0,0 +1,4 @@
config service
option tool 'quectel-CM'
option enabled '0'

View File

@ -0,0 +1,80 @@
#!/bin/sh /etc/rc.common
# Copyright (C) 2006-2014 OpenWrt.org
START=99
STOP=16
USE_PROCD=1
#使用procd启动
run_4g()
{
local enabled
config_get_bool enabled $1 enabled
echo "run 4G" >> /tmp/log4g
if [ "$enabled" = "1" ]; then
local user
local password
local apn
local auth
local pincode
local device
local tool
# echo "enable 4G" >> /tmp/log4g
config_get user $1 user
config_get password $1 password
config_get apn $1 apn
config_get auth $1 auth
config_get pincode $1 pincode
config_get device $1 device
config_get tool $1 tool
config_get tty $1 tty
config_get atcmd $1 atcmd
devname="$(basename "$device")"
devpath="$(readlink -f /sys/class/usbmisc/$devname/device/)"
ifname="$( ls "$devpath"/net )"
if [ "$tool" = "at" ];then
at_tool "$atcmd" -d $tty
else
procd_open_instance
#创建一个实例, 在procd看来一个应用程序可以多个实\E4\BE?
#ubus call service list 可以查看实例
procd_set_param command $tool -i $ifname -s $apn
if [ "$password" != "" ];then
procd_append_param command $user $password $auth
fi
if [ "$pincode" != "" ]; then
procd_append_param command -p $pincode
fi
# procd_append_param command -f /tmp/4g.log
procd_set_param respawn
echo "quectel-CM has started."
procd_close_instance
#关闭实例
fi
fi
}
service_triggers()
{
procd_add_reload_trigger "gobinetmodem"
}
start_service() {
config_load gobinetmodem
config_foreach run_4g service
}
stop_service()
{
echo "4G stop" >> /tmp/log4g
killall quectel-CM
echo "quectel-CM has stoped."
}

View File

@ -0,0 +1,12 @@
#!/bin/sh
uci -q batch <<-EOF >/dev/null
delete ucitrack.@gobinetmodem[-1]
add ucitrack gobinetmodem
set ucitrack.@gobinetmodem[-1].init=gobinetmodem
commit ucitrack
EOF
rm -f /tmp/luci-indexcache
exit 0

View File

@ -0,0 +1,15 @@
#
# Copyright (C) 2015 OpenWrt.org
#
# This is free software, licensed under the GNU General Public License v2.
# See /LICENSE for more information.
#
include $(TOPDIR)/rules.mk
LUCI_TITLE:=PCI Modem Server
LUCI_DEPENDS:=+kmod-pcie_mhi +pciutils +quectel-CM-5G
include $(TOPDIR)/feeds/luci/luci.mk
# call BuildPackage - OpenWrt buildroot signature

View File

@ -0,0 +1,9 @@
module("luci.controller.pcimodem", package.seeall)
function index()
if not nixio.fs.access("/etc/config/pcimodem") then
return
end
entry({"admin", "network", "pcimodem"}, cbi("pcimodem"), _("PCI Modem Server"), 80).dependent=false
end

View File

@ -0,0 +1,39 @@
-- Copyright 2016 David Thornley <david.thornley@touchstargroup.com>
-- Licensed to the public under the Apache License 2.0.
mp=Map("pcimodem",translate("PCI Modem Server"))
mp.description=translate("Modem Server For OpenWrt")
s=mp:section(TypedSection,"service", "Base Setting")
s.anonymous = true
enabled=s:option(Flag,"enabled",translate("Enable"))
enabled.default=0
enabled.rmempty=false
apn=s:option(Value,"apn",translate("APN"))
apn.rmempty=true
pincode=s:option(Value,"pincode",translate("PIN"))
pincode.rmempty=true
username=s:option(Value,"username",translate("PAP/CHAP username"))
username.rmempty=true
password=s:option(Value,"password",translate("PAP/CHAP password"))
password.rmempty=true
auth=s:option(Value,"auth",translate("Authentication Type"))
password.rmempty=true
auth:value("",translate("-- Please choose --"))
auth:value("both","PAP/CHAP (both)")
auth:value("pap","PAP")
auth:value("chap","CHAP")
auth:value("none","NONE")
tool=s:option(Value,"tool",translate("Tools"))
tool:value("quectel-CM","quectel-CM")
tool.rmempty=true
return mp

View File

@ -0,0 +1,24 @@
msgid ""
msgstr ""
"Project-Id-Version: \n"
"POT-Creation-Date: \n"
"PO-Revision-Date: \n"
"Last-Translator: dingpengyu <dingpengyu06@gmail.com>\n"
"Language-Team: \n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Language: zh_CN\n"
"X-Generator: Poedit 2.3.1\n"
msgid "Base Setting"
msgstr "基本设置"
msgid "PCI Modem Server"
msgstr "PCI移动网络拨号服务"
msgid "Modem Server For OpenWrt"
msgstr "OpenWrt移动网络拨号服务"
msgid "Tools"
msgstr "拨号工具"

View File

@ -0,0 +1,4 @@
config service
option tool 'quectel-CM'
option enabled '0'

View File

@ -0,0 +1,75 @@
#!/bin/sh /etc/rc.common
# Copyright (C) 2006-2014 OpenWrt.org
START=99
STOP=16
USE_PROCD=1
#使用procd启动
run_5g()
{
local enabled
config_get_bool enabled $1 enabled
echo "run 4G" >> /tmp/log4g
if [ "$enabled" = "1" ]; then
local user
local password
local apn
local auth
local pincode
local tool
# echo "enable 5G" >> /tmp/log5g
config_get user $1 user
config_get password $1 password
config_get apn $1 apn
config_get auth $1 auth
config_get pincode $1 pincode
config_get tool $1 tool
config_get tty $1 tty
config_get atcmd $1 atcmd
if [ "$tool" = "at" ];then
at_tool "$atcmd" -d $tty
else
procd_open_instance
#创建一个实例, 在procd看来一个应用程序可以多个实\E4\BE?
#ubus call service list 可以查看实例
procd_set_param command $tool -i rmnet_mhi0 -s $apn
if [ "$password" != "" ];then
procd_append_param command $user $password $auth
fi
if [ "$pincode" != "" ]; then
procd_append_param command -p $pincode
fi
# procd_append_param command -f /tmp/4g.log
procd_set_param respawn
echo "quectel-CM has started."
procd_close_instance
#关闭实例
fi
fi
}
service_triggers()
{
procd_add_reload_trigger "pcimodem"
}
start_service() {
config_load pcimodem
config_foreach run_5g service
}
stop_service()
{
echo "5G stop" >> /tmp/log5g
killall quectel-CM
echo "quectel-CM has stoped."
}

View File

@ -0,0 +1,12 @@
#!/bin/sh
uci -q batch <<-EOF >/dev/null
delete ucitrack.@pcimodem[-1]
add ucitrack pcimodem
set ucitrack.@pcimodem[-1].init=pcimodem
commit ucitrack
EOF
rm -f /tmp/luci-indexcache
exit 0

View File

@ -0,0 +1,15 @@
#
# Copyright (C) 2015 OpenWrt.org
#
# This is free software, licensed under the GNU General Public License v2.
# See /LICENSE for more information.
#
include $(TOPDIR)/rules.mk
LUCI_TITLE:=SPD Modem Server
LUCI_DEPENDS:=
include $(TOPDIR)/feeds/luci/luci.mk
# call BuildPackage - OpenWrt buildroot signature

View File

@ -0,0 +1,9 @@
module("luci.controller.spdmodem", package.seeall)
function index()
if not nixio.fs.access("/etc/config/spdmodem") then
return
end
entry({"admin", "network", "spdmodem"}, cbi("spdmodem"), _("SPD Modem Server"), 80).dependent=false
end

View File

@ -0,0 +1,39 @@
-- Copyright 2016 David Thornley <david.thornley@touchstargroup.com>
-- Licensed to the public under the Apache License 2.0.
mp=Map("spdmodem",translate("SPD Modem Server"))
mp.description=translate("Modem Server For OpenWrt")
s=mp:section(TypedSection,"service", "Base Setting")
s.anonymous = true
enabled=s:option(Flag,"enabled",translate("Enable"))
enabled.default=0
enabled.rmempty=false
apn=s:option(Value,"apn",translate("APN"))
apn.rmempty=true
pincode=s:option(Value,"pincode",translate("PIN"))
pincode.rmempty=true
username=s:option(Value,"username",translate("PAP/CHAP username"))
username.rmempty=true
password=s:option(Value,"password",translate("PAP/CHAP password"))
password.rmempty=true
auth=s:option(Value,"auth",translate("Authentication Type"))
password.rmempty=true
auth:value("",translate("-- Please choose --"))
auth:value("both","PAP/CHAP (both)")
auth:value("pap","PAP")
auth:value("chap","CHAP")
auth:value("none","NONE")
tool=s:option(Value,"tool",translate("Tools"))
tool:value("quectel-CM","quectel-CM")
tool.rmempty=true
return mp

View File

@ -0,0 +1,24 @@
msgid ""
msgstr ""
"Project-Id-Version: \n"
"POT-Creation-Date: \n"
"PO-Revision-Date: \n"
"Last-Translator: dingpengyu <dingpengyu06@gmail.com>\n"
"Language-Team: \n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Language: zh_CN\n"
"X-Generator: Poedit 2.3.1\n"
msgid "Base Setting"
msgstr "基本设置"
msgid "SPD Modem Server"
msgstr "SPD移动网络拨号服务"
msgid "Modem Server For OpenWrt"
msgstr "OpenWrt移动网络拨号服务"
msgid "Tools"
msgstr "拨号工具"

View File

@ -0,0 +1,4 @@
config service
option tool 'quectel-CM'
option enabled '0'

View File

@ -0,0 +1,75 @@
#!/bin/sh /etc/rc.common
# Copyright (C) 2006-2014 OpenWrt.org
START=99
STOP=16
USE_PROCD=1
#使用procd启动
run_5g()
{
local enabled
config_get_bool enabled $1 enabled
echo "run 4G" >> /tmp/log4g
if [ "$enabled" = "1" ]; then
local user
local password
local apn
local auth
local pincode
local tool
# echo "enable 5G" >> /tmp/log5g
config_get user $1 user
config_get password $1 password
config_get apn $1 apn
config_get auth $1 auth
config_get pincode $1 pincode
config_get tool $1 tool
config_get tty $1 tty
config_get atcmd $1 atcmd
if [ "$tool" = "at" ];then
at_tool "$atcmd" -d $tty
else
procd_open_instance
#创建一个实例, 在procd看来一个应用程序可以多个实\E4\BE?
#ubus call service list 可以查看实例
procd_set_param command $tool -i -s $apn
if [ "$password" != "" ];then
procd_append_param command $user $password $auth
fi
if [ "$pincode" != "" ]; then
procd_append_param command -p $pincode
fi
# procd_append_param command -f /tmp/4g.log
procd_set_param respawn
echo "quectel-CM has started."
procd_close_instance
#关闭实例
fi
fi
}
service_triggers()
{
procd_add_reload_trigger "spdmodem"
}
start_service() {
config_load spdmodem
config_foreach run_5g service
}
stop_service()
{
echo "5G stop" >> /tmp/log5g
killall quectel-CM
echo "quectel-CM has stoped."
}

View File

@ -0,0 +1,12 @@
#!/bin/sh
uci -q batch <<-EOF >/dev/null
delete ucitrack.@spdmodem[-1]
add ucitrack spdmodem
set ucitrack.@spdmodem[-1].init=spdmodem
commit ucitrack
EOF
rm -f /tmp/luci-indexcache
exit 0

View File

@ -0,0 +1,20 @@
#
# Copyright (C) 2015 OpenWrt.org
#
# This is free software, licensed under the GNU General Public License v2.
# See /LICENSE for more information.
#
include $(TOPDIR)/rules.mk
LUCI_TITLE:=Modem Server
LUCI_DEPENDS:=+luci-compat +kmod-usb-net +kmod-usb-net-cdc-ether +kmod-usb-acm \
+kmod-usb-net-qmi-wwan +kmod-usb-net-rndis +kmod-usb-serial-qualcomm \
+kmod-usb-net-sierrawireless +kmod-usb-ohci +kmod-usb-serial \
+kmod-usb-serial-option +kmod-usb-wdm \
+kmod-usb2 +kmod-usb3 \
+quectel-CM-5G +kmod-qmi_wwan_q +kmod-usb-net-cdc-mbim
include $(TOPDIR)/feeds/luci/luci.mk
# call BuildPackage - OpenWrt buildroot signature

View File

@ -0,0 +1,9 @@
module("luci.controller.usbmodem", package.seeall)
function index()
if not nixio.fs.access("/etc/config/usbmodem") then
return
end
entry({"admin", "network", "usbmodem"}, cbi("usbmodem"), _("USB Modem Server"), 80).dependent=false
end

View File

@ -0,0 +1,51 @@
-- Copyright 2016 David Thornley <david.thornley@touchstargroup.com>
-- Licensed to the public under the Apache License 2.0.
mp=Map("usbmodem",translate("USB Modem Server"))
mp.description=translate("Modem Server For OpenWrt")
s=mp:section(TypedSection,"service", "Base Setting")
s.anonymous = true
enabled=s:option(Flag,"enabled",translate("Enable"))
enabled.default=0
enabled.rmempty=false
device=s:option(Value, "device", translate("Modem device"))
device.rmempty = false
local device_suggestions = nixio.fs.glob("/dev/cdc-wdm*")
if device_suggestions then
local node
for node in device_suggestions do
device:value(node)
end
end
apn=s:option(Value,"apn",translate("APN"))
apn.rmempty=true
pincode=s:option(Value,"pincode",translate("PIN"))
pincode.rmempty=true
username=s:option(Value,"username",translate("PAP/CHAP username"))
username.rmempty=true
password=s:option(Value,"password",translate("PAP/CHAP password"))
password.rmempty=true
auth=s:option(Value,"auth",translate("Authentication Type"))
password.rmempty=true
auth:value("",translate("-- Please choose --"))
auth:value("both","PAP/CHAP (both)")
auth:value("pap","PAP")
auth:value("chap","CHAP")
auth:value("none","NONE")
tool=s:option(Value,"tool",translate("Tools"))
tool:value("quectel-CM","quectel-CM")
tool.rmempty=true
return mp

View File

@ -0,0 +1,24 @@
msgid ""
msgstr ""
"Project-Id-Version: \n"
"POT-Creation-Date: \n"
"PO-Revision-Date: \n"
"Last-Translator: dingpengyu <dingpengyu06@gmail.com>\n"
"Language-Team: \n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Language: zh_CN\n"
"X-Generator: Poedit 2.3.1\n"
msgid "Base Setting"
msgstr "基本设置"
msgid "USB Modem Server"
msgstr "USB移动网络拨号服务"
msgid "Modem Server For OpenWrt"
msgstr "OpenWrt移动网络拨号服务"
msgid "Tools"
msgstr "拨号工具"

View File

@ -0,0 +1,5 @@
config service
option tool 'quectel-CM'
option device '/dev/cdc-wdm0'
option enabled '0'

View File

@ -0,0 +1,80 @@
#!/bin/sh /etc/rc.common
# Copyright (C) 2006-2014 OpenWrt.org
START=99
STOP=16
USE_PROCD=1
#使用procd启动
run_4g()
{
local enabled
config_get_bool enabled $1 enabled
echo "run 4G" >> /tmp/log4g
if [ "$enabled" = "1" ]; then
local user
local password
local apn
local auth
local pincode
local device
local tool
# echo "enable 4G" >> /tmp/log4g
config_get user $1 user
config_get password $1 password
config_get apn $1 apn
config_get auth $1 auth
config_get pincode $1 pincode
config_get device $1 device
config_get tool $1 tool
config_get tty $1 tty
config_get atcmd $1 atcmd
devname="$(basename "$device")"
devpath="$(readlink -f /sys/class/usbmisc/$devname/device/)"
ifname="$( ls "$devpath"/net )"
if [ "$tool" = "at" ];then
at_tool "$atcmd" -d $tty
else
procd_open_instance
#创建一个实例, 在procd看来一个应用程序可以多个实\E4\BE?
#ubus call service list 可以查看实例
procd_set_param command $tool -i $ifname -s $apn
if [ "$password" != "" ];then
procd_append_param command $user $password $auth
fi
if [ "$pincode" != "" ]; then
procd_append_param command -p $pincode
fi
# procd_append_param command -f /tmp/4g.log
procd_set_param respawn
echo "quectel-CM has started."
procd_close_instance
#关闭实例
fi
fi
}
service_triggers()
{
procd_add_reload_trigger "usbmodem"
}
start_service() {
config_load usbmodem
config_foreach run_4g service
}
stop_service()
{
echo "4G stop" >> /tmp/log4g
killall quectel-CM
echo "quectel-CM has stoped."
}

View File

@ -0,0 +1,12 @@
#!/bin/sh
uci -q batch <<-EOF >/dev/null
delete ucitrack.@usbmodem[-1]
add ucitrack usbmodem
set ucitrack.@usbmodem[-1].init=usbmodem
commit ucitrack
EOF
rm -f /tmp/luci-indexcache
exit 0

View File

@ -0,0 +1,53 @@
#
# Copyright (C) 2007-2013 OpenWrt.org
# Copyright (C) 2010 Vertical Communications
#
# This is free software, licensed under the GNU General Public License v2.
# See /LICENSE for more information.
#
include $(TOPDIR)/rules.mk
PKG_NAME:=luci-proto-3x
PKG_VERSION:=1.0
PKG_RELEASE:=1
PKG_MAINTAINER:=Dairyman
PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)-$(PKG_VERSION)-$(PKG_RELEASE)
include $(INCLUDE_DIR)/package.mk
define Package/luci-proto-3x/Default
VERSION:=$(PKG_VERSION)-$(PKG_RELEASE)
URL:=http://openwrt.org/
MAINTAINER:=Dairyman
endef
define Package/luci-proto-3x
$(call Package/luci-proto-3x/Default)
SECTION:=net
CATEGORY:=ROOter
SUBMENU:=Protocols
TITLE:=Support for 3x
endef
define Package/luci-proto-3x/description
This package contains LuCI support for 3x
endef
define Build/Prepare
mkdir -p $(PKG_BUILD_DIR)
endef
define Build/Configure
endef
define Build/Compile/Default
endef
Build/Compile = $(Build/Compile/Default)
define Package/luci-proto-3x/install
$(CP) ./files/* $(1)/
endef
$(eval $(call BuildPackage,luci-proto-3x))

View File

@ -0,0 +1,146 @@
-- Copyright 2011 Jo-Philipp Wich <jow@openwrt.org>
-- Licensed to the public under the Apache License 2.0.
local map, section, net = ...
local device, apn, service, pincode, username, password, dialnumber
local ipv6, maxwait, defaultroute, metric, peerdns, dns,
keepalive_failure, keepalive_interval, demand
device = section:taboption("general", Value, "device", translate("Modem device"))
device.rmempty = false
local device_suggestions = nixio.fs.glob("/dev/tty[A-Z]*")
or nixio.fs.glob("/dev/tts/*")
if device_suggestions then
local node
for node in device_suggestions do
device:value(node)
end
end
service = section:taboption("general", Value, "service", translate("Service Type"))
service:value("", translate("-- Please choose --"))
service:value("umts", "UMTS/GPRS")
service:value("umts_only", translate("UMTS only"))
service:value("gprs_only", translate("GPRS only"))
service:value("evdo", "CDMA/EV-DO")
apn = section:taboption("general", Value, "apn", translate("APN"))
pincode = section:taboption("general", Value, "pincode", translate("PIN"))
username = section:taboption("general", Value, "username", translate("PAP/CHAP username"))
password = section:taboption("general", Value, "password", translate("PAP/CHAP password"))
password.password = true
dialnumber = section:taboption("general", Value, "dialnumber", translate("Dial number"))
dialnumber.placeholder = "*99***1#"
if luci.model.network:has_ipv6() then
ipv6 = section:taboption("advanced", Flag, "ipv6",
translate("Enable IPv6 negotiation on the PPP link"))
ipv6.default = ipv6.disabled
end
maxwait = section:taboption("advanced", Value, "maxwait",
translate("Modem init timeout"),
translate("Maximum amount of seconds to wait for the modem to become ready"))
maxwait.placeholder = "20"
maxwait.datatype = "min(1)"
defaultroute = section:taboption("advanced", Flag, "defaultroute",
translate("Use default gateway"),
translate("If unchecked, no default route is configured"))
defaultroute.default = defaultroute.enabled
metric = section:taboption("advanced", Value, "metric",
translate("Use gateway metric"))
metric.placeholder = "0"
metric.datatype = "uinteger"
metric:depends("defaultroute", defaultroute.enabled)
peerdns = section:taboption("advanced", Flag, "peerdns",
translate("Use DNS servers advertised by peer"),
translate("If unchecked, the advertised DNS server addresses are ignored"))
peerdns.default = peerdns.enabled
dns = section:taboption("advanced", DynamicList, "dns",
translate("Use custom DNS servers"))
dns:depends("peerdns", "")
dns.datatype = "ipaddr"
dns.cast = "string"
keepalive_failure = section:taboption("advanced", Value, "_keepalive_failure",
translate("LCP echo failure threshold"),
translate("Presume peer to be dead after given amount of LCP echo failures, use 0 to ignore failures"))
function keepalive_failure.cfgvalue(self, section)
local v = m:get(section, "keepalive")
if v and #v > 0 then
return tonumber(v:match("^(%d+)[ ,]+%d+") or v)
end
end
function keepalive_failure.write() end
function keepalive_failure.remove() end
keepalive_failure.placeholder = "0"
keepalive_failure.datatype = "uinteger"
keepalive_interval = section:taboption("advanced", Value, "_keepalive_interval",
translate("LCP echo interval"),
translate("Send LCP echo requests at the given interval in seconds, only effective in conjunction with failure threshold"))
function keepalive_interval.cfgvalue(self, section)
local v = m:get(section, "keepalive")
if v and #v > 0 then
return tonumber(v:match("^%d+[ ,]+(%d+)"))
end
end
function keepalive_interval.write(self, section, value)
local f = tonumber(keepalive_failure:formvalue(section)) or 0
local i = tonumber(value) or 5
if i < 1 then i = 1 end
if f > 0 then
m:set(section, "keepalive", "%d %d" %{ f, i })
else
m:del(section, "keepalive")
end
end
keepalive_interval.remove = keepalive_interval.write
keepalive_interval.placeholder = "5"
keepalive_interval.datatype = "min(1)"
demand = section:taboption("advanced", Value, "demand",
translate("Inactivity timeout"),
translate("Close inactive connection after the given amount of seconds, use 0 to persist connection"))
demand.placeholder = "0"
demand.datatype = "uinteger"

View File

@ -0,0 +1,49 @@
-- Copyright 2018 Florian Eckert <fe@dev.tdt.de>
-- Licensed to the public under the Apache License 2.0.
local netmod = luci.model.network
local interface = luci.model.network.interface
local proto = netmod:register_protocol("3x")
function proto.get_i18n(self)
return luci.i18n.translate("UMTS/GPRS/EV-DO")
end
function proto.ifname(self)
return "3x-" .. self.sid
end
function proto.get_interface(self)
return interface(self:ifname(), self)
end
function proto.is_installed(self)
return nixio.fs.access("/lib/netifd/proto/3x.sh")
end
function proto.opkg_package(self)
return "comgt"
end
function proto.is_floating(self)
return true
end
function proto.is_virtual(self)
return true
end
function proto.get_interfaces(self)
return nil
end
function proto.contains_interface(self, ifc)
if self:is_floating() then
return (netmod:ifnameof(ifc) == self:ifname())
else
return netmod.protocol.contains_interface(self, ifc)
end
end
netmod:register_pattern_virtual("^3x%-%w")

View File

@ -0,0 +1,11 @@
'use strict';'require rpc';'require uci';'require form';'require network';var callFileList=rpc.declare({object:'file',method:'list',params:['path'],expect:{entries:[]},filter:function(list,params){var rv=[];for(var i=0;i<list.length;i++)
if(list[i].name.match(/^tty[A-Z]/)||list[i].name.match(/^cdc-wdm/)||list[i].name.match(/^[0-9]+$/))
rv.push(params.path+list[i].name);return rv.sort();}});network.registerPatternVirtual(/^3x-.+$/);function write_keepalive(section_id,value){var f_opt=this.map.lookupOption('_keepalive_failure',section_id),i_opt=this.map.lookupOption('_keepalive_interval',section_id),f=(f_opt!=null)?+f_opt[0].formvalue(section_id):null,i=(i_opt!=null)?+i_opt[0].formvalue(section_id):null;if(f==null||f==''||isNaN(f))
f=0;if(i==null||i==''||isNaN(i)||i<1)
i=1;if(f>0)
uci.set('network',section_id,'keepalive','%d %d'.format(f,i));else
uci.unset('network',section_id,'keepalive');}
return network.registerProtocol('3x',{getI18n:function(){return _('UMTS/GPRS/EV-DO');},getIfname:function(){return this._ubus('l3_device')||'3x-%s'.format(this.sid);},getOpkgPackage:function(){return'comgt';},isFloating:function(){return true;},isVirtual:function(){return true;},getDevices:function(){return null;},containsDevice:function(ifname){return(network.getIfnameOf(ifname)==this.getIfname());},renderFormOptions:function(s){var o;o=s.taboption('general',form.Value,'device',_('Modem device'));o.rmempty=false;o.load=function(section_id){return callFileList('/dev/').then(L.bind(function(devices){for(var i=0;i<devices.length;i++)
this.value(devices[i]);return callFileList('/dev/tts/');},this)).then(L.bind(function(devices){for(var i=0;i<devices.length;i++)
this.value(devices[i]);return form.Value.prototype.load.apply(this,[section_id]);},this));};o=s.taboption('general',form.Value,'service',_('Service Type'));o.value('',_('-- Please choose --'));o.value('umts','UMTS/GPRS');o.value('umts_only',_('UMTS only'));o.value('gprs_only',_('GPRS only'));o.value('evdo','CDMA/EV-DO');s.taboption('general',form.Value,'apn',_('APN'));s.taboption('general',form.Value,'pincode',_('PIN'));s.taboption('general',form.Value,'username',_('PAP/CHAP username'));o=s.taboption('general',form.Value,'password',_('PAP/CHAP password'));o.password=true;o=s.taboption('general',form.Value,'dialnumber',_('Dial number'));o.placeholder='*99***1#';if(L.hasSystemFeature('ipv6')){o=s.taboption('advanced',form.ListValue,'ipv6',_('Obtain IPv6-Address'));o.value('auto',_('Automatic'));o.value('0',_('Disabled'));o.value('1',_('Manual'));o.default='auto';}
o=s.taboption('advanced',form.Value,'delay',_('Modem init timeout'),_('Maximum amount of seconds to wait for the modem to become ready'));o.placeholder='10';o.datatype='min(1)';o=s.taboption('advanced',form.Flag,'defaultroute',_('Use default gateway'),_('If unchecked, no default route is configured'));o.default=o.enabled;o=s.taboption('advanced',form.Value,'metric',_('Use gateway metric'));o.placeholder='0';o.datatype='uinteger';o.depends('defaultroute','1');o=s.taboption('advanced',form.Flag,'peerdns',_('Use DNS servers advertised by peer'),_('If unchecked, the advertised DNS server addresses are ignored'));o.default=o.enabled;o=s.taboption('advanced',form.DynamicList,'dns',_('Use custom DNS servers'));o.depends('peerdns','0');o.datatype='ipaddr';o=s.taboption('advanced',form.Value,'_keepalive_failure',_('LCP echo failure threshold'),_('Presume peer to be dead after given amount of LCP echo failures, use 0 to ignore failures'));o.placeholder='0';o.datatype='uinteger';o.write=write_keepalive;o.remove=write_keepalive;o.cfgvalue=function(section_id){var v=uci.get('network',section_id,'keepalive');if(typeof(v)=='string'&&v!=''){var m=v.match(/^(\d+)[ ,]\d+$/);return m?m[1]:v;}};o=s.taboption('advanced',form.Value,'_keepalive_interval',_('LCP echo interval'),_('Send LCP echo requests at the given interval in seconds, only effective in conjunction with failure threshold'));o.placeholder='5';o.datatype='min(1)';o.write=write_keepalive;o.remove=write_keepalive;o.cfgvalue=function(section_id){var v=uci.get('network',section_id,'keepalive');if(typeof(v)=='string'&&v!=''){var m=v.match(/^\d+[ ,](\d+)$/);return m?m[1]:v;}};o=s.taboption('advanced',form.Value,'demand',_('Inactivity timeout'),_('Close inactive connection after the given amount of seconds, use 0 to persist connection'));o.placeholder='0';o.datatype='uinteger';}});

View File

@ -0,0 +1,53 @@
#
# Copyright (C) 2007-2013 OpenWrt.org
# Copyright (C) 2010 Vertical Communications
#
# This is free software, licensed under the GNU General Public License v2.
# See /LICENSE for more information.
#
include $(TOPDIR)/rules.mk
PKG_NAME:=luci-proto-mbim
PKG_VERSION:=1.0
PKG_RELEASE:=1
PKG_MAINTAINER:=Dairyman
PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)-$(PKG_VERSION)-$(PKG_RELEASE)
include $(INCLUDE_DIR)/package.mk
define Package/luci-proto-mbim/Default
VERSION:=$(PKG_VERSION)-$(PKG_RELEASE)
URL:=http://openwrt.org/
MAINTAINER:=Dairyman
endef
define Package/luci-proto-mbim
$(call Package/luci-proto-mbim/Default)
SECTION:=net
CATEGORY:=ROOter
SUBMENU:=Protocols
TITLE:=Support for mbim
endef
define Package/luci-proto-mbim/description
This package contains LuCI support for mbim
endef
define Build/Prepare
mkdir -p $(PKG_BUILD_DIR)
endef
define Build/Configure
endef
define Build/Compile/Default
endef
Build/Compile = $(Build/Compile/Default)
define Package/luci-proto-mbim/install
$(CP) ./files/* $(1)/
endef
$(eval $(call BuildPackage,luci-proto-mbim))

View File

@ -0,0 +1,45 @@
-- Copyright 2016 David Thornley <david.thornley@touchstargroup.com>
-- Licensed to the public under the Apache License 2.0.
local map, section, net = ...
local device, apn, pincode, username, password
local auth, ipv6
device = section:taboption("general", Value, "device", translate("Modem device"))
device.rmempty = false
local device_suggestions = nixio.fs.glob("/dev/cdc-wdm*")
if device_suggestions then
local node
for node in device_suggestions do
device:value(node)
end
end
apn = section:taboption("general", Value, "apn", translate("APN"))
pincode = section:taboption("general", Value, "pincode", translate("PIN"))
username = section:taboption("general", Value, "username", translate("PAP/CHAP username"))
password = section:taboption("general", Value, "password", translate("PAP/CHAP password"))
password.password = true
auth = section:taboption("general", Value, "auth", translate("Authentication Type"))
auth:value("", translate("-- Please choose --"))
auth:value("both", "PAP/CHAP (both)")
auth:value("pap", "PAP")
auth:value("chap", "CHAP")
auth:value("none", "NONE")
if luci.model.network:has_ipv6() then
ipv6 = section:taboption("advanced", Flag, "ipv6", translate("Enable IPv6 negotiation"))
ipv6.default = ipv6.disabled
end

View File

@ -0,0 +1,55 @@
-- Copyright 2016 David Thornley <david.thornley@touchstargroup.com>
-- Licensed to the public under the Apache License 2.0.
local netmod = luci.model.network
local interface = luci.model.network.interface
local proto = netmod:register_protocol("mbim")
function proto.get_i18n(self)
return luci.i18n.translate("MBIM Cellular")
end
function proto.ifname(self)
local base = netmod._M.protocol
local ifname = base.ifname(self) -- call base class "protocol.ifname(self)"
-- Note: ifname might be nil if the adapter could not be determined through ubus (default name to mbim-wan in this case)
if ifname == nil then
ifname = "mbim-" .. self.sid
end
return ifname
end
function proto.get_interface(self)
return interface(self:ifname(), self)
end
function proto.opkg_package(self)
return "rmbim"
end
function proto.is_installed(self)
return nixio.fs.access("/lib/netifd/proto/mbim.sh")
end
function proto.is_floating(self)
return true
end
function proto.is_virtual(self)
return true
end
function proto.get_interfaces(self)
return nil
end
function proto.contains_interface(self, ifc)
return (netmod:ifnameof(ifc) == self:ifname())
end
netmod:register_pattern_virtual("^mbim%-%w")
netmod:register_error_code("CALL_FAILED", luci.i18n.translate("Call failed"))
netmod:register_error_code("NO_CID", luci.i18n.translate("Unable to obtain client ID"))
netmod:register_error_code("PLMN_FAILED", luci.i18n.translate("Setting PLMN failed"))

View File

@ -0,0 +1,107 @@
'use strict';
'require rpc';
'require form';
'require network';
var callFileList = rpc.declare({
object: 'file',
method: 'list',
params: [ 'path' ],
expect: { entries: [] },
filter: function(list, params) {
var rv = [];
for (var i = 0; i < list.length; i++)
if (list[i].name.match(/^cdc-wdm/))
rv.push(params.path + list[i].name);
return rv.sort();
}
});
network.registerPatternVirtual(/^mbim-.+$/);
network.registerErrorCode('CALL_FAILED', _('Call failed'));
network.registerErrorCode('NO_CID', _('Unable to obtain client ID'));
network.registerErrorCode('PLMN_FAILED', _('Setting PLMN failed'));
return network.registerProtocol('mbim', {
getI18n: function() {
return _('MBIM Cellular');
},
getIfname: function() {
return this._ubus('l3_device') || 'mbim-%s'.format(this.sid);
},
getOpkgPackage: function() {
return 'rmbim';
},
isFloating: function() {
return true;
},
isVirtual: function() {
return true;
},
getDevices: function() {
return null;
},
containsDevice: function(ifname) {
return (network.getIfnameOf(ifname) == this.getIfname());
},
renderFormOptions: function(s) {
var dev = this.getL3Device() || this.getDevice(), o;
o = s.taboption('general', form.Value, 'device', _('Modem device'));
o.rmempty = false;
o.load = function(section_id) {
return callFileList('/dev/').then(L.bind(function(devices) {
for (var i = 0; i < devices.length; i++)
this.value(devices[i]);
return form.Value.prototype.load.apply(this, [section_id]);
}, this));
};
s.taboption('general', form.Value, 'apn', _('APN'));
s.taboption('general', form.Value, 'pincode', _('PIN'));
o = s.taboption('general', form.ListValue, 'auth', _('Authentication Type'));
o.value('both', 'PAP/CHAP');
o.value('pap', 'PAP');
o.value('chap', 'CHAP');
o.value('none', 'NONE');
o.default = 'none';
o = s.taboption('general', form.Value, 'username', _('PAP/CHAP username'));
o.depends('auth', 'pap');
o.depends('auth', 'chap');
o.depends('auth', 'both');
o = s.taboption('general', form.Value, 'password', _('PAP/CHAP password'));
o.depends('auth', 'pap');
o.depends('auth', 'chap');
o.depends('auth', 'both');
o.password = true;
if (L.hasSystemFeature('ipv6')) {
o = s.taboption('advanced', form.Flag, 'ipv6', _('Enable IPv6 negotiation'));
o.default = o.disabled;
}
o = s.taboption('advanced', form.Value, 'delay', _('Modem init timeout'), _('Maximum amount of seconds to wait for the modem to become ready'));
o.placeholder = '10';
o.datatype = 'min(1)';
o = s.taboption('advanced', form.Value, 'mtu', _('Override MTU'));
o.placeholder = dev ? (dev.getMTU() || '1500') : '1500';
o.datatype = 'max(9200)';
o = s.taboption('general', form.ListValue, 'pdptype', _('PDP Type'));
o.value('ipv4v6', 'IPv4/IPv6');
o.value('ipv4', 'IPv4');
o.value('ipv6', 'IPv6');
o.default = 'ipv4v6';
}
});

View File

@ -0,0 +1,47 @@
#
# Copyright (C) 2015 OpenWrt.org
#
# This is free software, licensed under the GNU General Public License v2.
# See /LICENSE for more information.
#
include $(TOPDIR)/rules.mk
PKG_NAME:=gobinet
PKG_VERSION:=1.6.3
PKG_RELEASE:=1
include $(INCLUDE_DIR)/kernel.mk
include $(INCLUDE_DIR)/package.mk
define KernelPackage/gobinet
SUBMENU:=Gobinet Support
TITLE:=Quectel Linux USB Gobinet Driver
DEPENDS:=+kmod-usb-net
FILES:=$(PKG_BUILD_DIR)/GobiNet.ko
AUTOLOAD:=$(call AutoLoad,81,GobiNet)
endef
define KernelPackage/gobinet/description
Quectel Linux USB gobinet Driver
endef
MAKE_OPTS:= \
ARCH="$(LINUX_KARCH)" \
CROSS_COMPILE="$(TARGET_CROSS)" \
CXXFLAGS="$(TARGET_CXXFLAGS)" \
M="$(PKG_BUILD_DIR)" \
$(EXTRA_KCONFIG)
define Build/Prepare
mkdir -p $(PKG_BUILD_DIR)
$(CP) ./src/* $(PKG_BUILD_DIR)/
endef
define Build/Compile
$(MAKE) -C "$(LINUX_DIR)" \
$(MAKE_OPTS) \
modules
endef
$(eval $(call KernelPackage,gobinet))

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,43 @@
obj-m := GobiNet.o
GobiNet-objs := GobiUSBNet.o QMIDevice.o QMI.o
PWD := $(shell pwd)
OUTPUTDIR=/lib/modules/`uname -r`/kernel/drivers/net/usb/
ifeq ($(ARCH),)
ARCH := $(shell uname -m)
endif
ifeq ($(CROSS_COMPILE),)
CROSS_COMPILE :=
endif
ifeq ($(KDIR),)
KDIR := /lib/modules/$(shell uname -r)/build
ifeq ($(ARCH),i686)
ifeq ($(wildcard $KDIR/arch/$ARCH),)
ARCH=i386
endif
endif
endif
$(shell rm -rf usbnet.h)
ifneq ($(wildcard $(KDIR)/drivers/usb/net/usbnet.h),)
$(shell ln -s $(KDIR)/drivers/usb/net/usbnet.h usbnet.h)
endif
ifneq ($(wildcard $(KDIR)/drivers/net/usb/usbnet.h),)
$(shell ln -s $(KDIR)/drivers/net/usb/usbnet.h usbnet.h)
endif
default:
ln -sf makefile Makefile
$(MAKE) ARCH=${ARCH} CROSS_COMPILE=${CROSS_COMPILE} -C $(KDIR) M=$(PWD) modules
install: default
mkdir -p $(OUTPUTDIR)
cp -f GobiNet.ko $(OUTPUTDIR)
depmod
modprobe -r GobiNet
modprobe GobiNet
clean:
rm -rf Makefile usbnet.h
rm -rf *.o *~ core .depend .*.cmd *.ko *.mod.c .tmp_versions Module.* modules.order

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,337 @@
/*===========================================================================
FILE:
QMI.h
DESCRIPTION:
Qualcomm QMI driver header
FUNCTIONS:
Generic QMUX functions
ParseQMUX
FillQMUX
Generic QMI functions
GetTLV
ValidQMIMessage
GetQMIMessageID
Get sizes of buffers needed by QMI requests
QMUXHeaderSize
QMICTLGetClientIDReqSize
QMICTLReleaseClientIDReqSize
QMICTLReadyReqSize
QMIWDSSetEventReportReqSize
QMIWDSGetPKGSRVCStatusReqSize
QMIDMSGetMEIDReqSize
QMICTLSyncReqSize
Fill Buffers with QMI requests
QMICTLGetClientIDReq
QMICTLReleaseClientIDReq
QMICTLReadyReq
QMIWDSSetEventReportReq
QMIWDSGetPKGSRVCStatusReq
QMIDMSGetMEIDReq
QMICTLSetDataFormatReq
QMICTLSyncReq
Parse data from QMI responses
QMICTLGetClientIDResp
QMICTLReleaseClientIDResp
QMIWDSEventResp
QMIDMSGetMEIDResp
Copyright (c) 2011, Code Aurora Forum. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Code Aurora Forum nor
the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
===========================================================================*/
#pragma once
/*=========================================================================*/
// Definitions
/*=========================================================================*/
extern int quec_debug;
// DBG macro
#define DBG( format, arg... ) do { \
if (quec_debug == 1)\
{ \
printk( KERN_INFO "GobiNet::%s " format, __FUNCTION__, ## arg ); \
} }while(0)
#if 0
#define VDBG( format, arg... ) do { \
if (debug == 1)\
{ \
printk( KERN_INFO "GobiNet::%s " format, __FUNCTION__, ## arg ); \
} } while(0)
#else
#define VDBG( format, arg... ) do { } while(0)
#endif
#define INFO( format, arg... ) do { \
printk( KERN_INFO "GobiNet::%s " format, __FUNCTION__, ## arg ); \
}while(0)
// QMI Service Types
#define QMICTL 0
#define QMIWDS 1
#define QMIDMS 2
#define QMINAS 3
#define QMIUIM 11
#define QMIWDA 0x1A
#define u8 unsigned char
#define u16 unsigned short
#define u32 unsigned int
#define u64 unsigned long long
#define bool u8
#define true 1
#define false 0
#define ENOMEM 12
#define EFAULT 14
#define EINVAL 22
#ifndef ENOMSG
#define ENOMSG 42
#endif
#define ENODATA 61
#define TLV_TYPE_LINK_PROTO 0x10
/*=========================================================================*/
// Struct sQMUX
//
// Structure that defines a QMUX header
/*=========================================================================*/
typedef struct sQMUX
{
/* T\F, always 1 */
u8 mTF;
/* Size of message */
u16 mLength;
/* Control flag */
u8 mCtrlFlag;
/* Service Type */
u8 mQMIService;
/* Client ID */
u8 mQMIClientID;
}__attribute__((__packed__)) sQMUX;
#if 0
/*=========================================================================*/
// Generic QMUX functions
/*=========================================================================*/
// Remove QMUX headers from a buffer
int ParseQMUX(
u16 * pClientID,
void * pBuffer,
u16 buffSize );
// Fill buffer with QMUX headers
int FillQMUX(
u16 clientID,
void * pBuffer,
u16 buffSize );
/*=========================================================================*/
// Generic QMI functions
/*=========================================================================*/
// Get data buffer of a specified TLV from a QMI message
int GetTLV(
void * pQMIMessage,
u16 messageLen,
u8 type,
void * pOutDataBuf,
u16 bufferLen );
// Check mandatory TLV in a QMI message
int ValidQMIMessage(
void * pQMIMessage,
u16 messageLen );
// Get the message ID of a QMI message
int GetQMIMessageID(
void * pQMIMessage,
u16 messageLen );
/*=========================================================================*/
// Get sizes of buffers needed by QMI requests
/*=========================================================================*/
// Get size of buffer needed for QMUX
u16 QMUXHeaderSize( void );
// Get size of buffer needed for QMUX + QMICTLGetClientIDReq
u16 QMICTLGetClientIDReqSize( void );
// Get size of buffer needed for QMUX + QMICTLReleaseClientIDReq
u16 QMICTLReleaseClientIDReqSize( void );
// Get size of buffer needed for QMUX + QMICTLReadyReq
u16 QMICTLReadyReqSize( void );
// Get size of buffer needed for QMUX + QMIWDSSetEventReportReq
u16 QMIWDSSetEventReportReqSize( void );
// Get size of buffer needed for QMUX + QMIWDSGetPKGSRVCStatusReq
u16 QMIWDSGetPKGSRVCStatusReqSize( void );
u16 QMIWDSSetQMUXBindMuxDataPortSize( void );
// Get size of buffer needed for QMUX + QMIDMSGetMEIDReq
u16 QMIDMSGetMEIDReqSize( void );
// Get size of buffer needed for QMUX + QMIWDASetDataFormatReq
u16 QMIWDASetDataFormatReqSize( int qmap_mode );
// Get size of buffer needed for QMUX + QMICTLSyncReq
u16 QMICTLSyncReqSize( void );
/*=========================================================================*/
// Fill Buffers with QMI requests
/*=========================================================================*/
// Fill buffer with QMI CTL Get Client ID Request
int QMICTLGetClientIDReq(
void * pBuffer,
u16 buffSize,
u8 transactionID,
u8 serviceType );
// Fill buffer with QMI CTL Release Client ID Request
int QMICTLReleaseClientIDReq(
void * pBuffer,
u16 buffSize,
u8 transactionID,
u16 clientID );
// Fill buffer with QMI CTL Get Version Info Request
int QMICTLReadyReq(
void * pBuffer,
u16 buffSize,
u8 transactionID );
// Fill buffer with QMI WDS Set Event Report Request
int QMIWDSSetEventReportReq(
void * pBuffer,
u16 buffSize,
u16 transactionID );
// Fill buffer with QMI WDS Get PKG SRVC Status Request
int QMIWDSGetPKGSRVCStatusReq(
void * pBuffer,
u16 buffSize,
u16 transactionID );
u16 QMIWDSSetQMUXBindMuxDataPortReq(
void * pBuffer,
u16 buffSize,
u8 MuxId,
u16 transactionID );
// Fill buffer with QMI DMS Get Serial Numbers Request
int QMIDMSGetMEIDReq(
void * pBuffer,
u16 buffSize,
u16 transactionID );
// Fill buffer with QMI WDA Set Data Format Request
int QMIWDASetDataFormatReq(
void * pBuffer,
u16 buffSize,
bool bRawIPMode, int qmap_mode, u32 rx_size,
u16 transactionID );
#if 0
int QMIWDASetDataQmapReq(
void * pBuffer,
u16 buffSize,
u16 transactionID );
#endif
int QMICTLSyncReq(
void * pBuffer,
u16 buffSize,
u16 transactionID );
/*=========================================================================*/
// Parse data from QMI responses
/*=========================================================================*/
// Parse the QMI CTL Get Client ID Resp
int QMICTLGetClientIDResp(
void * pBuffer,
u16 buffSize,
u16 * pClientID );
// Verify the QMI CTL Release Client ID Resp is valid
int QMICTLReleaseClientIDResp(
void * pBuffer,
u16 buffSize );
// Parse the QMI WDS Set Event Report Resp/Indication or
// QMI WDS Get PKG SRVC Status Resp/Indication
int QMIWDSEventResp(
void * pBuffer,
u16 buffSize,
u32 * pTXOk,
u32 * pRXOk,
u32 * pTXErr,
u32 * pRXErr,
u32 * pTXOfl,
u32 * pRXOfl,
u64 * pTXBytesOk,
u64 * pRXBytesOk,
bool * pbLinkState,
bool * pbReconfigure );
// Parse the QMI DMS Get Serial Numbers Resp
int QMIDMSGetMEIDResp(
void * pBuffer,
u16 buffSize,
char * pMEID,
int meidSize );
// Parse the QMI DMS Get Serial Numbers Resp
int QMIWDASetDataFormatResp(
void * pBuffer,
u16 buffSize, bool bRawIPMode, int *qmap_enabled, int *rx_size, int *tx_size);
// Pasre the QMI CTL Sync Response
int QMICTLSyncResp(
void *pBuffer,
u16 buffSize );
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,368 @@
/*===========================================================================
FILE:
QMIDevice.h
DESCRIPTION:
Functions related to the QMI interface device
FUNCTIONS:
Generic functions
IsDeviceValid
PrintHex
GobiSetDownReason
GobiClearDownReason
GobiTestDownReason
Driver level asynchronous read functions
ResubmitIntURB
ReadCallback
IntCallback
StartRead
KillRead
Internal read/write functions
ReadAsync
UpSem
ReadSync
WriteSyncCallback
WriteSync
Internal memory management functions
GetClientID
ReleaseClientID
FindClientMem
AddToReadMemList
PopFromReadMemList
AddToNotifyList
NotifyAndPopNotifyList
AddToURBList
PopFromURBList
Internal userspace wrapper functions
UserspaceunlockedIOCTL
Userspace wrappers
UserspaceOpen
UserspaceIOCTL
UserspaceClose
UserspaceRead
UserspaceWrite
UserspacePoll
Initializer and destructor
RegisterQMIDevice
DeregisterQMIDevice
Driver level client management
QMIReady
QMIWDSCallback
SetupQMIWDSCallback
QMIDMSGetMEID
Copyright (c) 2011, Code Aurora Forum. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Code Aurora Forum nor
the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
===========================================================================*/
//---------------------------------------------------------------------------
// Pragmas
//---------------------------------------------------------------------------
#pragma once
//---------------------------------------------------------------------------
// Include Files
//---------------------------------------------------------------------------
#include "Structs.h"
#include "QMI.h"
/*=========================================================================*/
// Generic functions
/*=========================================================================*/
#ifdef __QUECTEL_INTER__
// Basic test to see if device memory is valid
static bool IsDeviceValid( sGobiUSBNet * pDev );
/*=========================================================================*/
// Driver level asynchronous read functions
/*=========================================================================*/
// Resubmit interrupt URB, re-using same values
static int ResubmitIntURB( struct urb * pIntURB );
// Read callback
// Put the data in storage and notify anyone waiting for data
#if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,18 ))
static void ReadCallback( struct urb * pReadURB );
#else
static void ReadCallback(struct urb *pReadURB, struct pt_regs *regs);
#endif
// Inturrupt callback
// Data is available, start a read URB
#if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,18 ))
static void IntCallback( struct urb * pIntURB );
#else
static void IntCallback(struct urb *pIntURB, struct pt_regs *regs);
#endif
/*=========================================================================*/
// Internal read/write functions
/*=========================================================================*/
// Start asynchronous read
// Reading client's data store, not device
static int ReadAsync(
sGobiUSBNet * pDev,
u16 clientID,
u16 transactionID,
void (*pCallback)(sGobiUSBNet *, u16, void *),
void * pData );
// Notification function for synchronous read
static void UpSem(
sGobiUSBNet * pDev,
u16 clientID,
void * pData );
// Start synchronous read
// Reading client's data store, not device
static int ReadSync(
sGobiUSBNet * pDev,
void ** ppOutBuffer,
u16 clientID,
u16 transactionID );
// Write callback
#if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,18 ))
static void WriteSyncCallback( struct urb * pWriteURB );
#else
static void WriteSyncCallback(struct urb *pWriteURB, struct pt_regs *regs);
#endif
// Start synchronous write
static int WriteSync(
sGobiUSBNet * pDev,
char * pInWriteBuffer,
int size,
u16 clientID );
/*=========================================================================*/
// Internal memory management functions
/*=========================================================================*/
// Create client and allocate memory
static int GetClientID(
sGobiUSBNet * pDev,
u8 serviceType );
// Release client and free memory
static void ReleaseClientID(
sGobiUSBNet * pDev,
u16 clientID );
// Find this client's memory
static sClientMemList * FindClientMem(
sGobiUSBNet * pDev,
u16 clientID );
// Add Data to this client's ReadMem list
static bool AddToReadMemList(
sGobiUSBNet * pDev,
u16 clientID,
u16 transactionID,
void * pData,
u16 dataSize );
// Remove data from this client's ReadMem list if it matches
// the specified transaction ID.
static bool PopFromReadMemList(
sGobiUSBNet * pDev,
u16 clientID,
u16 transactionID,
void ** ppData,
u16 * pDataSize );
// Add Notify entry to this client's notify List
static bool AddToNotifyList(
sGobiUSBNet * pDev,
u16 clientID,
u16 transactionID,
void (* pNotifyFunct)(sGobiUSBNet *, u16, void *),
void * pData );
// Remove first Notify entry from this client's notify list
// and Run function
static bool NotifyAndPopNotifyList(
sGobiUSBNet * pDev,
u16 clientID,
u16 transactionID );
// Add URB to this client's URB list
static bool AddToURBList(
sGobiUSBNet * pDev,
u16 clientID,
struct urb * pURB );
// Remove URB from this client's URB list
static struct urb * PopFromURBList(
sGobiUSBNet * pDev,
u16 clientID );
/*=========================================================================*/
// Internal userspace wrappers
/*=========================================================================*/
// Userspace unlocked ioctl
static long UserspaceunlockedIOCTL(
struct file * pFilp,
unsigned int cmd,
unsigned long arg );
/*=========================================================================*/
// Userspace wrappers
/*=========================================================================*/
// Userspace open
static int UserspaceOpen(
struct inode * pInode,
struct file * pFilp );
#if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,36 ))
// Userspace ioctl
static int UserspaceIOCTL(
struct inode * pUnusedInode,
struct file * pFilp,
unsigned int cmd,
unsigned long arg );
#endif
// Userspace close
#define quectel_no_for_each_process
#ifdef quectel_no_for_each_process
static int UserspaceClose(
struct inode * pInode,
struct file * pFilp );
#else
#if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,14 ))
static int UserspaceClose(
struct file * pFilp,
fl_owner_t unusedFileTable );
#else
static int UserspaceClose( struct file * pFilp );
#endif
#endif
// Userspace read (synchronous)
static ssize_t UserspaceRead(
struct file * pFilp,
char __user * pBuf,
size_t size,
loff_t * pUnusedFpos );
// Userspace write (synchronous)
static ssize_t UserspaceWrite(
struct file * pFilp,
const char __user * pBuf,
size_t size,
loff_t * pUnusedFpos );
static unsigned int UserspacePoll(
struct file * pFilp,
struct poll_table_struct * pPollTable );
/*=========================================================================*/
// Driver level client management
/*=========================================================================*/
// Check if QMI is ready for use
static bool QMIReady(
sGobiUSBNet * pDev,
u16 timeout );
// QMI WDS callback function
static void QMIWDSCallback(
sGobiUSBNet * pDev,
u16 clientID,
void * pData );
// Fire off reqests and start async read for QMI WDS callback
static int SetupQMIWDSCallback( sGobiUSBNet * pDev );
// Register client, send req and parse MEID response, release client
static int QMIDMSGetMEID( sGobiUSBNet * pDev );
// Register client, send req and parse Data format response, release client
static int QMIWDASetDataFormat( sGobiUSBNet * pDev, int qmap_mode, int *rx_urb_size );
#endif
// Print Hex data, for debug purposes
void QuecPrintHex(
void * pBuffer,
u16 bufSize );
// Sets mDownReason and turns carrier off
void QuecGobiSetDownReason(
sGobiUSBNet * pDev,
u8 reason );
// Clear mDownReason and may turn carrier on
void QuecGobiClearDownReason(
sGobiUSBNet * pDev,
u8 reason );
// Tests mDownReason and returns whether reason is set
bool QuecGobiTestDownReason(
sGobiUSBNet * pDev,
u8 reason );
// Start continuous read "thread"
int QuecStartRead( sGobiUSBNet * pDev );
// Kill continuous read "thread"
void QuecKillRead( sGobiUSBNet * pDev );
/*=========================================================================*/
// Initializer and destructor
/*=========================================================================*/
// QMI Device initialization function
int QuecRegisterQMIDevice( sGobiUSBNet * pDev );
// QMI Device cleanup function
void QuecDeregisterQMIDevice( sGobiUSBNet * pDev );
int QuecQMIWDASetDataFormat( sGobiUSBNet * pDev, int qmap_mode, int *rx_urb_size );
#define PrintHex QuecPrintHex
#define GobiSetDownReason QuecGobiSetDownReason
#define GobiClearDownReason QuecGobiClearDownReason
#define GobiTestDownReason QuecGobiTestDownReason
#define StartRead QuecStartRead
#define KillRead QuecKillRead
#define RegisterQMIDevice QuecRegisterQMIDevice
#define DeregisterQMIDevice QuecDeregisterQMIDevice

View File

@ -0,0 +1,78 @@
Gobi3000 network driver 2011-07-29-1026
This readme covers important information concerning
the Gobi Net driver.
Table of Contents
1. What's new in this release
2. Known issues
3. Known platform issues
-------------------------------------------------------------------------------
1. WHAT'S NEW
This Release (Gobi3000 network driver 2011-07-29-1026)
a. Signal the device to leave low power mode on enumeration
b. Add "txQueueLength" parameter, which will set the Tx Queue Length
c. Send SetControlLineState message during driver/device removal
d. Change to new date-based versioning scheme
Prior Release (Gobi3000 network driver 1.0.60) 06/29/2011
a. Add UserspacePoll() function, to support select()
b. Fix possible deadlock on GobiUSBNetTXTimeout()
c. Fix memory leak on data transmission
Prior Release (Gobi3000 network driver 1.0.50) 05/18/2011
a. Add support for kernels up to 2.6.38
b. Add support for dynamic interface binding
Prior Release (Gobi3000 network driver 1.0.40) 02/28/2011
a. In cases of QMI read errors, discard the error and continue reading.
b. Add "interruptible" parameter, which may be disabled for debugging purposes.
Prior Release (Gobi3000 network driver 1.0.30) 01/05/2011
a. Fix rare kernel PANIC if a process terminates while file handle close
or device removal is in progress.
Prior Release (Gobi3000 network driver 1.0.20) 11/01/2010
a. Fix possible kernel WARNING if device removed before QCWWANDisconnect().
b. Fix multiple memory leaks in error cases.
Prior Release (Gobi3000 network driver 1.0.10) 09/17/2010
a. Initial release
-------------------------------------------------------------------------------
2. KNOWN ISSUES
No known issues.
-------------------------------------------------------------------------------
3. KNOWN PLATFORM ISSUES
a. Enabling autosuspend:
Autosuspend is supported by the Gobi3000 module and its drivers,
but by default it is not enabled by the open source kernel. As such,
the Gobi3000 module will not enter autosuspend unless the
user specifically turns on autosuspend with the command:
echo auto > /sys/bus/usb/devices/.../power/level
b. Ksoftirq using 100% CPU:
There is a known issue with the open source usbnet driver that can
result in infinite software interrupts. The fix for this is to test
(in the usbnet_bh() function) if the usb_device can submit URBs before
attempting to submit the response URB buffers.
c. NetworkManager does not recognize connection after resume:
After resuming from sleep/hibernate, NetworkManager may not recognize new
network connections by the Gobi device. This is a system issue not specific
to the Gobi device, which may result in dhcp not being run and the default
route not being updated. One way to fix this is to simply restart the
NetworkManager service.
-------------------------------------------------------------------------------

View File

@ -0,0 +1,166 @@
Release Notes
[V1.6.3]
Date: 9/26/2021
enhancement:
1. change version to 1.6.3
fix:
[V1.6.2.16]
Date: 9/17/2021
enhancement:
fix:
1. add sdx6x platform support
[V1.6.2.15]
Date: 3/23/2021
enhancement:
fix:
1. add sdx12 platform support
[V1.6.2.14]
Date: 3/18/2021
enhancement:
fix:
1. fix kasam: use-after-free when do modem reboot stress test
2. wait qmi_sync_thread() finish in DeregisterQMIDevice(), usb will disconnect when driver is still in qmi_sync_thread()
[V1.6.2.13]
Date: 12/31/2020
enhancement:
fix:
1. fix quectel-CM open error when driver is still in qmi_sync_thread() but SOC enter sleep.
[V1.6.2.12]
Date: 12/31/2020
enhancement:
fix:
1. for multi-pdn-call, can not ping when usb resume for usb suspend state.
[V1.6.2.11]
Date: 11/7/2020
enhancement:
1. support QUECTEL_QMI_MERGE, for some SOC, control endpoint only support read max 64 bytes QMI.
for QMI that size > 64, we need read serval times, and merge.
fix:
[V1.6.2.10]
Date: 9/15/2020
enhancement:
fix:
1. for X55, fix panic on kernel V2.6 ~ V3.2
[V1.6.2.9]
Date: 7/24/2020
enhancement:
fix:
1. for X55, fix errors on Big Endian SOC.
[V1.6.2.8]
Date: 7/2/2020
enhancement:
1. support QMAPV5, UL AGG (porting from qmi_wwan_q)
fix:
1. fix errors kernel V2.6 .
[V1.6.2.7]
Date: 6/9/2020
enhancement:
fix:
1. when send qmi ctl request, clear qmi ctl response which's TID is same
[V1.6.2.6]
Date: 5/19/2020
enhancement:
1. support bridge mode for multi-pdn-call
fix:
[V1.6.2.5]
Date: 4/26/2020
enhancement:
1. fix netcard name as usbX (from ethX)
fix:
......
[Quectel_WCDMA&LTE_Linux&Android_GobiNet_Driver_V1.5.0]
Date: 2018/04/17
enhancement::
1. support EG20&RG500
2. fix set rx_urb_size as 1520. do not change accroding to MTU
[Quectel_WCDMA&LTE_Linux&Android_GobiNet_Driver_V1.4.3]
Date: 2018/04/16
enhancement::
1. increase QMAP's rx_urb_size to 32KB
[Quectel_WCDMA&LTE_Linux&Android_GobiNet_Driver_V1.4.2]
Date: 2018/04/03
bug fix:
1. fix qmi client can not be released when quectel-CM killed by ¡®kill -9¡¯
[Quectel_WCDMA&LTE_Linux&Android_GobiNet_Driver_V1.4.1]
Date: 2018/02/20
bug fix:
1. fix a compiler error on Kernel lager than 4.11
[Quectel_WCDMA&LTE_Linux&Android_GobiNet_Driver_V1.4.0]
Date: 2018/12/17
bug fix:
1. fix a USB DMA error when built as GobiNet.ko on Kernel lager than 4.15
[Quectel_WCDMA&LTE_Linux&Android_GobiNet_Driver_V1.3.8]
[Quectel_WCDMA&LTE_Linux&Android_GobiNet_Driver_V1.3.7]
Date: 2018/09/25
enhancement:
1. check skb length in tx_fixup functions.
2. when QMAP enabled, set FLAG_RX_ASSEMBLE to advoid 'RX errors' of ifconfig
[Quectel_WCDMA&LTE_Linux&Android_GobiNet_Driver_V1.3.6]
Date: 2018/09/11
enhancement:
1. support EG12 EM12
2. optimization QMAP source code
3. fix compile errors and warnnings on kernel version 4.15
[Quectel_WCDMA&LTE_Linux&Android_GobiNet_Driver_V1.3.5]
Date: 2018/05/12
enhancement:
1. provide two method to enable QMAP function.
1.1 set module parameters 'qmap_mode' to X(1~4) to enable QMAP.
1.2 ifconfig usb0 down, then 'echo X > /sys/class/usbX/qmap_mode' to enable QMAP
for above two method, X(1) used to enable 'IP Aggregation' and X(2~4) to enable 'IP Mux'
2. support bridge mode, also provide two method to enable bridge mode.
2.1 set module parameters 'bridge_mode' to 1 to enable bridge mode.
2.2 'echo 1 > /sys/class/usbX/bridge_mode' to enable bridge mode.
bridge mode setups:
brctl addbr br0; brctl addif br0 eth0; brctl addif usb0; ./quectel-CM; ifconfig br0 up; ifconfig eth0 up
then connect eth0 to PC by ethernet cable. and PC run DHCP tool to obtain network public IP address.
'WCDMA&LTE_QConnectManager_Linux&Android_V1.1.40' and later version is required to use QMAP and bridge mode.
[Quectel_WCDMA&LTE_Linux&Android_GobiNet_Driver_V1.3.4]
Date: 2018/05/07
enhancement:
1. support use 'AT$QCRMCALL=1,1' to setup data call.
when use 'AT$QCRMCALL=1,1', must set module parameters 'qcrmcall_mode' to 1,
and GobiNet Driver will do not tx&rx QMI.
[Quectel_WCDMA&LTE_Linux&Android_GobiNet_Driver_V1.3.3]
Date: 2018/04/04
optimization:
1. optimization QMAP source code
[Quectel_WCDMA&LTE_Linux&Android_GobiNet_Driver_V1.3.2]
Date: 2018/03/23
enhancement:
1. support Qualcomm Mux and Aggregation Protocol (QMAP)
1.1 IP Mux: GobiNet Driver register multiple netcards, one netcards corresponding to one PDP.
and GobiNet Driver will tx/rx multiple IP packets maybe belong to different PDPs in one URB.
1.2 IP Aggregation: GobiNet Driver will rx multiple IP packets in one URB, used to increase throughput theoretically by reducing the number of usb interrupts.
the max rx URB size of MDM9x07 is 4KB, the max rx URB size of MDM9x40&SDX20 is 16KB
[Quectel_WCDMA&LTE_Linux&Android_GobiNet_Driver_V1.3.1]
Date: 2017/11/20
enhancement:
1. support BG96

View File

@ -0,0 +1,529 @@
/*===========================================================================
FILE:
Structs.h
DESCRIPTION:
Declaration of structures used by the Qualcomm Linux USB Network driver
FUNCTIONS:
none
Copyright (c) 2011, Code Aurora Forum. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Code Aurora Forum nor
the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
===========================================================================*/
//---------------------------------------------------------------------------
// Pragmas
//---------------------------------------------------------------------------
#pragma once
//---------------------------------------------------------------------------
// Include Files
//---------------------------------------------------------------------------
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/mii.h>
#include <linux/usb.h>
#include <linux/version.h>
#include <linux/cdev.h>
#include <linux/kthread.h>
#include <linux/poll.h>
#include <linux/completion.h>
#include <linux/hrtimer.h>
#include <linux/interrupt.h>
#define QUECTEL_WWAN_QMAP 4 //MAX is 7
#ifdef QUECTEL_WWAN_QMAP
#define QUECTEL_QMAP_MUX_ID 0x81
#endif
//#define QUECTEL_QMI_MERGE
#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
#define QUECTEL_BRIDGE_MODE
#endif
#if (LINUX_VERSION_CODE <= KERNEL_VERSION( 2,6,21 ))
static inline void skb_reset_mac_header(struct sk_buff *skb)
{
skb->mac.raw = skb->data;
}
#endif
#if (LINUX_VERSION_CODE <= KERNEL_VERSION( 2,6,22 ))
#define bool u8
#ifndef URB_FREE_BUFFER
#define URB_FREE_BUFFER_BY_SELF //usb_free_urb will not free, should free by self
#define URB_FREE_BUFFER 0x0100 /* Free transfer buffer with the URB */
#endif
/**
* usb_endpoint_type - get the endpoint's transfer type
* @epd: endpoint to be checked
*
* Returns one of USB_ENDPOINT_XFER_{CONTROL, ISOC, BULK, INT} according
* to @epd's transfer type.
*/
static inline int usb_endpoint_type(const struct usb_endpoint_descriptor *epd)
{
return epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
}
#endif
#if (LINUX_VERSION_CODE <= KERNEL_VERSION( 2,6,18 ))
/**
* usb_endpoint_dir_in - check if the endpoint has IN direction
* @epd: endpoint to be checked
*
* Returns true if the endpoint is of type IN, otherwise it returns false.
*/
static inline int usb_endpoint_dir_in(const struct usb_endpoint_descriptor *epd)
{
return ((epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN);
}
/**
* usb_endpoint_dir_out - check if the endpoint has OUT direction
* @epd: endpoint to be checked
*
* Returns true if the endpoint is of type OUT, otherwise it returns false.
*/
static inline int usb_endpoint_dir_out(
const struct usb_endpoint_descriptor *epd)
{
return ((epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_OUT);
}
/**
* usb_endpoint_xfer_int - check if the endpoint has interrupt transfer type
* @epd: endpoint to be checked
*
* Returns true if the endpoint is of type interrupt, otherwise it returns
* false.
*/
static inline int usb_endpoint_xfer_int(
const struct usb_endpoint_descriptor *epd)
{
return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
USB_ENDPOINT_XFER_INT);
}
static inline int usb_autopm_set_interface(struct usb_interface *intf)
{ return 0; }
static inline int usb_autopm_get_interface(struct usb_interface *intf)
{ return 0; }
static inline int usb_autopm_get_interface_async(struct usb_interface *intf)
{ return 0; }
static inline void usb_autopm_put_interface(struct usb_interface *intf)
{ }
static inline void usb_autopm_put_interface_async(struct usb_interface *intf)
{ }
static inline void usb_autopm_enable(struct usb_interface *intf)
{ }
static inline void usb_autopm_disable(struct usb_interface *intf)
{ }
static inline void usb_mark_last_busy(struct usb_device *udev)
{ }
#endif
#if (LINUX_VERSION_CODE <= KERNEL_VERSION( 2,6,24 ))
#include "usbnet.h"
#else
#include <linux/usb/usbnet.h>
#endif
#if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,25 ))
#include <linux/fdtable.h>
#else
#include <linux/file.h>
#endif
// Used in recursion, defined later below
struct sGobiUSBNet;
#if defined(QUECTEL_WWAN_QMAP)
#define QUECTEL_UL_DATA_AGG 1
#if defined(QUECTEL_UL_DATA_AGG)
struct ul_agg_ctx {
/* QMIWDS_ADMIN_SET_DATA_FORMAT_RESP TLV_0x17 and TLV_0x18 */
uint ul_data_aggregation_max_datagrams; //UplinkDataAggregationMaxDatagramsTlv
uint ul_data_aggregation_max_size; //UplinkDataAggregationMaxSizeTlv
uint dl_minimum_padding;
};
#endif
#endif
/*=========================================================================*/
// Struct sReadMemList
//
// Structure that defines an entry in a Read Memory linked list
/*=========================================================================*/
typedef struct sReadMemList
{
/* Data buffer */
void * mpData;
/* Transaction ID */
u16 mTransactionID;
/* Size of data buffer */
u16 mDataSize;
/* Next entry in linked list */
struct sReadMemList * mpNext;
} sReadMemList;
/*=========================================================================*/
// Struct sNotifyList
//
// Structure that defines an entry in a Notification linked list
/*=========================================================================*/
typedef struct sNotifyList
{
/* Function to be run when data becomes available */
void (* mpNotifyFunct)(struct sGobiUSBNet *, u16, void *);
/* Transaction ID */
u16 mTransactionID;
/* Data to provide as parameter to mpNotifyFunct */
void * mpData;
/* Next entry in linked list */
struct sNotifyList * mpNext;
} sNotifyList;
/*=========================================================================*/
// Struct sURBList
//
// Structure that defines an entry in a URB linked list
/*=========================================================================*/
typedef struct sURBList
{
/* The current URB */
struct urb * mpURB;
/* Next entry in linked list */
struct sURBList * mpNext;
} sURBList;
/*=========================================================================*/
// Struct sClientMemList
//
// Structure that defines an entry in a Client Memory linked list
// Stores data specific to a Service Type and Client ID
/*=========================================================================*/
typedef struct sClientMemList
{
/* Client ID for this Client */
u16 mClientID;
/* Linked list of Read entries */
/* Stores data read from device before sending to client */
sReadMemList * mpList;
/* Linked list of Notification entries */
/* Stores notification functions to be run as data becomes
available or the device is removed */
sNotifyList * mpReadNotifyList;
/* Linked list of URB entries */
/* Stores pointers to outstanding URBs which need canceled
when the client is deregistered or the device is removed */
sURBList * mpURBList;
/* Next entry in linked list */
struct sClientMemList * mpNext;
/* Wait queue object for poll() */
wait_queue_head_t mWaitQueue;
} sClientMemList;
/*=========================================================================*/
// Struct sURBSetupPacket
//
// Structure that defines a USB Setup packet for Control URBs
// Taken from USB CDC specifications
/*=========================================================================*/
typedef struct sURBSetupPacket
{
/* Request type */
u8 mRequestType;
/* Request code */
u8 mRequestCode;
/* Value */
u16 mValue;
/* Index */
u16 mIndex;
/* Length of Control URB */
u16 mLength;
} sURBSetupPacket;
// Common value for sURBSetupPacket.mLength
#define DEFAULT_READ_URB_LENGTH 0x1000
#ifdef QUECTEL_QMI_MERGE
#define MERGE_PACKET_IDENTITY 0x2c7c
#define MERGE_PACKET_VERSION 0x0001
#define MERGE_PACKET_MAX_PAYLOAD_SIZE 56
typedef struct sQMIMsgHeader {
u16 idenity;
u16 version;
u16 cur_len;
u16 total_len;
} sQMIMsgHeader;
typedef struct sQMIMsgPacket {
sQMIMsgHeader header;
u16 len;
char buf[DEFAULT_READ_URB_LENGTH];
} sQMIMsgPacket;
#endif
#ifdef CONFIG_PM
#if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,29 ))
/*=========================================================================*/
// Struct sAutoPM
//
// Structure used to manage AutoPM thread which determines whether the
// device is in use or may enter autosuspend. Also submits net
// transmissions asynchronously.
/*=========================================================================*/
typedef struct sAutoPM
{
/* Thread for atomic autopm function */
struct task_struct * mpThread;
/* Signal for completion when it's time for the thread to work */
struct completion mThreadDoWork;
/* Time to exit? */
bool mbExit;
/* List of URB's queued to be sent to the device */
sURBList * mpURBList;
/* URB list lock (for adding and removing elements) */
spinlock_t mURBListLock;
/* Length of the URB list */
atomic_t mURBListLen;
/* Active URB */
struct urb * mpActiveURB;
/* Active URB lock (for adding and removing elements) */
spinlock_t mActiveURBLock;
/* Duplicate pointer to USB device interface */
struct usb_interface * mpIntf;
} sAutoPM;
#endif
#endif /* CONFIG_PM */
/*=========================================================================*/
// Struct sQMIDev
//
// Structure that defines the data for the QMI device
/*=========================================================================*/
typedef struct sQMIDev
{
/* Device number */
dev_t mDevNum;
/* Device class */
struct class * mpDevClass;
/* cdev struct */
struct cdev mCdev;
/* is mCdev initialized? */
bool mbCdevIsInitialized;
/* Pointer to read URB */
struct urb * mpReadURB;
//#define READ_QMI_URB_ERROR
#ifdef READ_QMI_URB_ERROR
struct timer_list mReadUrbTimer;
#endif
#ifdef QUECTEL_QMI_MERGE
sQMIMsgPacket * mpQmiMsgPacket;
#endif
/* Read setup packet */
sURBSetupPacket * mpReadSetupPacket;
/* Read buffer attached to current read URB */
void * mpReadBuffer;
/* Inturrupt URB */
/* Used to asynchronously notify when read data is available */
struct urb * mpIntURB;
/* Buffer used by Inturrupt URB */
void * mpIntBuffer;
/* Pointer to memory linked list for all clients */
sClientMemList * mpClientMemList;
/* Spinlock for client Memory entries */
spinlock_t mClientMemLock;
/* Transaction ID associated with QMICTL "client" */
atomic_t mQMICTLTransactionID;
} sQMIDev;
typedef struct {
u32 qmap_enabled;
u32 dl_data_aggregation_max_datagrams;
u32 dl_data_aggregation_max_size ;
u32 ul_data_aggregation_max_datagrams;
u32 ul_data_aggregation_max_size;
u32 dl_minimum_padding;
} QMAP_SETTING;
/*=========================================================================*/
// Struct sGobiUSBNet
//
// Structure that defines the data associated with the Qualcomm USB device
/*=========================================================================*/
typedef struct sGobiUSBNet
{
atomic_t refcount;
/* Net device structure */
struct usbnet * mpNetDev;
#ifdef QUECTEL_WWAN_QMAP
unsigned link_state;
int qmap_mode;
int qmap_size;
int qmap_version;
struct net_device *mpQmapNetDev[QUECTEL_WWAN_QMAP];
struct tasklet_struct txq;
QMAP_SETTING qmap_settings;
#if defined(QUECTEL_UL_DATA_AGG)
struct ul_agg_ctx agg_ctx;
#endif
#ifdef QUECTEL_BRIDGE_MODE
int m_qmap_bridge_mode[QUECTEL_WWAN_QMAP];
#endif
#endif
#if 1 //def DATA_MODE_RP
bool mbMdm9x07;
bool mbMdm9x06; //for BG96
/* QMI "device" work in IP Mode or ETH Mode */
bool mbRawIPMode;
#ifdef QUECTEL_BRIDGE_MODE
int m_bridge_mode;
uint m_bridge_ipv4;
unsigned char mHostMAC[6];
#endif
int m_qcrmcall_mode;
#endif
struct completion mQMIReadyCompletion;
bool mbQMIReady;
bool mbProbeDone;
bool mbQMISyncIng;
/* Usb device interface */
struct usb_interface * mpIntf;
/* Pointers to usbnet_open and usbnet_stop functions */
int (* mpUSBNetOpen)(struct net_device *);
int (* mpUSBNetStop)(struct net_device *);
/* Reason(s) why interface is down */
/* Used by Gobi*DownReason */
unsigned long mDownReason;
#define NO_NDIS_CONNECTION 0
#define CDC_CONNECTION_SPEED 1
#define DRIVER_SUSPENDED 2
#define NET_IFACE_STOPPED 3
/* QMI "device" status */
bool mbQMIValid;
bool mbDeregisterQMIDevice;
/* QMI "device" memory */
sQMIDev mQMIDev;
/* Device MEID */
char mMEID[14];
struct hrtimer timer;
struct tasklet_struct bh;
unsigned long
pending_num : 8,
pending_size : 16;
struct sk_buff *pending_pool[16];
#ifdef CONFIG_PM
#if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,29 ))
/* AutoPM thread */
sAutoPM mAutoPM;
#endif
#endif /* CONFIG_PM */
} sGobiUSBNet;
/*=========================================================================*/
// Struct sQMIFilpStorage
//
// Structure that defines the storage each file handle contains
// Relates the file handle to a client
/*=========================================================================*/
typedef struct sQMIFilpStorage
{
/* Client ID */
u16 mClientID;
/* Device pointer */
sGobiUSBNet * mpDev;
} sQMIFilpStorage;

View File

@ -0,0 +1,47 @@
#
# Copyright (C) 2015 OpenWrt.org
#
# This is free software, licensed under the GNU General Public License v2.
# See /LICENSE for more information.
#
include $(TOPDIR)/rules.mk
PKG_NAME:=pcie_mhi
PKG_VERSION:=3.2
PKG_RELEASE:=1
include $(INCLUDE_DIR)/kernel.mk
include $(INCLUDE_DIR)/package.mk
define KernelPackage/pcie_mhi
SUBMENU:=PCIE Support
TITLE:=Kernel pcie driver for MHI device
DEPENDS:=+pciids +pciutils +quectel-CM-5G
FILES:=$(PKG_BUILD_DIR)/pcie_mhi.ko
AUTOLOAD:=$(call AutoLoad,90,pcie_mhi)
endef
define KernelPackage/pcie_mhi/description
Kernel module for register a custom pciemhi platform device.
endef
MAKE_OPTS:= \
ARCH="$(LINUX_KARCH)" \
CROSS_COMPILE="$(TARGET_CROSS)" \
CXXFLAGS="$(TARGET_CXXFLAGS)" \
M="$(PKG_BUILD_DIR)" \
$(EXTRA_KCONFIG)
define Build/Prepare
mkdir -p $(PKG_BUILD_DIR)
$(CP) ./src/* $(PKG_BUILD_DIR)/
endef
define Build/Compile
$(MAKE) -C "$(LINUX_DIR)" \
$(MAKE_OPTS) \
modules
endef
$(eval $(call KernelPackage,pcie_mhi))

View File

@ -0,0 +1,34 @@
#ccflags-y += -g
obj-m += pcie_mhi.o
pcie_mhi-objs := core/mhi_init.o core/mhi_main.o core/mhi_pm.o core/mhi_boot.o core/mhi_dtr.o controllers/mhi_qti.o
pcie_mhi-objs += devices/mhi_uci.o
ifeq (1,1)
pcie_mhi-objs += devices/mhi_netdev_quectel.o
else
pcie_mhi-objs += devices/mhi_netdev.o
pcie_mhi-objs += devices/rmnet_handler.o
endif
PWD := $(shell pwd)
ifeq ($(ARCH),)
ARCH := $(shell uname -m)
endif
ifeq ($(CROSS_COMPILE),)
CROSS_COMPILE :=
endif
ifeq ($(KDIR),)
KDIR := /lib/modules/$(shell uname -r)/build
endif
pcie_mhi: clean
$(MAKE) ARCH=${ARCH} CROSS_COMPILE=${CROSS_COMPILE} -C $(KDIR) M=$(PWD) modules
#cp pcie_mhi.ko /tftpboot/
clean:
$(MAKE) ARCH=${ARCH} CROSS_COMPILE=${CROSS_COMPILE} -C $(KDIR) M=$(PWD) clean
find . -name *.o.ur-safe | xargs rm -f
install: pcie_mhi
sudo cp pcie_mhi.ko /lib/modules/${shell uname -r}/kernel/drivers/pci/
sudo depmod

View File

@ -0,0 +1,36 @@
1. porting pcie_mhi driver as next
$ git diff drivers/Makefile
diff --git a/drivers/Makefile b/drivers/Makefile
index 77fbc52..e45837e 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -184,3 +184,4 @@ obj-$(CONFIG_FPGA) += fpga/
obj-$(CONFIG_FSI) += fsi/
obj-$(CONFIG_TEE) += tee/
obj-$(CONFIG_MULTIPLEXER) += mux/
+obj-y += pcie_mhi/
$ tree drivers/pcie_mhi/ -L 1
drivers/pcie_mhi/
controllers
core
devices
Makefile
2. check RG500 attach pcie_mhi driver successful
root@OpenWrt:/# lspci
00:00.0 Class 0604: 17cb:0302
01:00.0 Class ff00: 17cb:0306
root@OpenWrt:~# dmesg | grep mhi
[ 138.483252] mhi_init Quectel_Linux_PCIE_MHI_Driver_V1.3.0.6
[ 138.492350] mhi_pci_probe pci_dev->name = 0000:01:00.0, domain=0, bus=1, slot=0, vendor=17CB, device=0306
3. how to use, see next logs
log/QXDM_OVER_PCIE.txt
log/AT_OVER_PCIE.txt
log/MBIM_OVER_PCIE.txt
log/QMI_OVER_PCIE.txt

View File

@ -0,0 +1,103 @@
Release Notes
[V1.3.4]
Date: 12/8/2022
enhancement:
1. only allow to enable autosuspend when module is in MHI_EE_AMSS
2. show pcie link speed and width when driver probe
3. check pcie link status by read pcie vid and pid when driver probe,
if pcie link is down, return -EIO
4. support RM520 (1eac:1004)
5. support qmap command packet
fix:
1. fix tx queue is wrong stop when do uplink TPUT
2. fix after QFirehose, module fail to bootup at very small probability
3. mhi uci add mutex lock for concurrent reads/writes
[V1.3.3]
Date: 30/6/2022
enhancement:
1. remove one un-necessary kmalloc when do qfirehose
2. support mhi monitor (like usbmon), usage: cat /sys/kernel/debug/mhi_q/0306_00\:01.00/mhimon
3. set ring size of event 0 to 256 (from 1024), required by x6x
4. support PCIE local network card mhi_swip0 (chan 46/47), default disabled
5. porting IPQ5018 mhi rate controll code from spf11.5
6. set pcie rmnet download max qmap packet size to 15KB (same to IPQ MHI Driver)
7. support set different mac address for rmnet net card
8. when mhi netdev fail to malloc, use delay_work instead work
9. optimize code for 'when driver load, modem is still in MHI_EE_PTHRU'
fix:
1. Fix not synchronize access rp/wp when mhi_queue_xxx and mhi_process_xxx_ring run on different CPU
2. set dma mask when driver probe, some SOC like rpi_4 need it
[V1.3.2]
Date: 12/16/2021
enhancement:
1. support Linux Kernel V5.14
2. mhi_netdev_quectel.c do not print log in softirq context
[V1.3.1]
Date: 9/26/2021
enhancement:
fix:
[V1.3.0.19]
Date: 9/18/2021
enhancement:
1. support sdx62 (17cb:0308)
2. support IPQ5018's NSS
3. use 'qsdk/qca/src/data-kernel/drivers/rmnet-nss/rmnet_nss.c' instead myself rmnet_nss.c
and pcie_mhi.ko must load after then rmnet_nss.ko
4. allow bhi irq is not 0 (for ipq5018)
fix:
[V1.3.0.18]
Date: 4/14/2021
enhancement:
1. support mbim multiple call, usage:
# insmod pcie_mhi.ko mhi_mbim_enabeld=1 qmap_mode=4
# quectel-mbim-proxy -d /dev/mhi_MBIM &
# quectel-CM -n X
fix:
[V1.3.0.17]
Date: 3/11/2021
enhancement:
fix:
1. fix CPU loading very high when TPUT test when only one MSI interrupt
2. fix error on latest X24 modem
[V1.3.0.16]
Date: 11/18/2020
enhancement:
fix:
1. add ring size to 32, for in-bound chan, if one ring is full, modem will not generate MSI interrupt for all chan
[V1.3.0.15]
Date: 10/30/2020
enhancement:
1. support multi-modems, named as /dev/mhi_<chan_name>X
fix:
1. fix compile error on kernel v5.8
[V1.3.0.14]
Date: 10/9/2020
enhancement:
1. suppport EM120&EM160
fix:
1. fix compile error on kernel v5.6
2. support runtime suspend
[V1.3.0.13]
Date: 9/7/2020
enhancement:
1. suppport EM120&EM160
fix:
1. fix error on X55 + PCIE2.0(e.g IPQ4019)
2. support runtime suspend
[V1.3.0.12]
Date: 7/7/2020
enhancement:
1. suppport create only none netcard (enabled by marco MHI_NETDEV_ONE_CARD_MODE),
fix:

View File

@ -0,0 +1,13 @@
menu "MHI controllers"
config MHI_QTI
tristate "MHI QTI"
depends on MHI_BUS
help
If you say yes to this option, MHI bus support for QTI modem chipsets
will be enabled. QTI PCIe based modems uses MHI as the communication
protocol. MHI control driver is the bus master for such modems. As the
bus master driver, it oversees power management operations such as
suspend, resume, powering on and off the device.
endmenu

View File

@ -0,0 +1 @@
obj-$(CONFIG_MHI_QTI) += mhi_qti.o mhi_arch_qti.o

View File

@ -0,0 +1,275 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.*/
#include <linux/async.h>
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/dma-direction.h>
#include <linux/list.h>
#include <linux/of.h>
#include <linux/memblock.h>
#include <linux/module.h>
#include <linux/msm-bus.h>
#include <linux/msm_pcie.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include "../core/mhi.h"
#include "mhi_qti.h"
struct arch_info {
struct mhi_dev *mhi_dev;
struct msm_bus_scale_pdata *msm_bus_pdata;
u32 bus_client;
struct pci_saved_state *pcie_state;
struct pci_saved_state *ref_pcie_state;
struct dma_iommu_mapping *mapping;
};
struct mhi_bl_info {
struct mhi_device *mhi_device;
async_cookie_t cookie;
void *ipc_log;
};
/* ipc log markings */
#define DLOG "Dev->Host: "
#define HLOG "Host: "
#ifdef CONFIG_MHI_DEBUG
#define MHI_IPC_LOG_PAGES (100)
enum MHI_DEBUG_LEVEL mhi_ipc_log_lvl = MHI_MSG_LVL_VERBOSE;
#else
#define MHI_IPC_LOG_PAGES (10)
enum MHI_DEBUG_LEVEL mhi_ipc_log_lvl = MHI_MSG_LVL_ERROR;
#endif
static int mhi_arch_set_bus_request(struct mhi_controller *mhi_cntrl, int index)
{
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
struct arch_info *arch_info = mhi_dev->arch_info;
MHI_LOG("Setting bus request to index %d\n", index);
if (arch_info->bus_client)
return msm_bus_scale_client_update_request(
arch_info->bus_client,
index);
/* default return success */
return 0;
}
static void mhi_bl_dl_cb(struct mhi_device *mhi_dev,
struct mhi_result *mhi_result)
{
struct mhi_bl_info *mhi_bl_info = mhi_device_get_devdata(mhi_dev);
char *buf = mhi_result->buf_addr;
/* force a null at last character */
buf[mhi_result->bytes_xferd - 1] = 0;
ipc_log_string(mhi_bl_info->ipc_log, "%s %s", DLOG, buf);
}
static void mhi_bl_dummy_cb(struct mhi_device *mhi_dev,
struct mhi_result *mhi_result)
{
}
static void mhi_bl_remove(struct mhi_device *mhi_dev)
{
struct mhi_bl_info *mhi_bl_info = mhi_device_get_devdata(mhi_dev);
ipc_log_string(mhi_bl_info->ipc_log, HLOG "Received Remove notif.\n");
/* wait for boot monitor to exit */
async_synchronize_cookie(mhi_bl_info->cookie + 1);
}
static void mhi_bl_boot_monitor(void *data, async_cookie_t cookie)
{
struct mhi_bl_info *mhi_bl_info = data;
struct mhi_device *mhi_device = mhi_bl_info->mhi_device;
struct mhi_controller *mhi_cntrl = mhi_device->mhi_cntrl;
/* 15 sec timeout for booting device */
const u32 timeout = msecs_to_jiffies(15000);
/* wait for device to enter boot stage */
wait_event_timeout(mhi_cntrl->state_event, mhi_cntrl->ee == MHI_EE_AMSS
|| mhi_cntrl->ee == MHI_EE_DISABLE_TRANSITION,
timeout);
if (mhi_cntrl->ee == MHI_EE_AMSS) {
ipc_log_string(mhi_bl_info->ipc_log, HLOG
"Device successfully booted to mission mode\n");
mhi_unprepare_from_transfer(mhi_device);
} else {
ipc_log_string(mhi_bl_info->ipc_log, HLOG
"Device failed to boot to mission mode, ee = %s\n",
TO_MHI_EXEC_STR(mhi_cntrl->ee));
}
}
static int mhi_bl_probe(struct mhi_device *mhi_dev,
const struct mhi_device_id *id)
{
char node_name[32];
struct mhi_bl_info *mhi_bl_info;
mhi_bl_info = devm_kzalloc(&mhi_dev->dev, sizeof(*mhi_bl_info),
GFP_KERNEL);
if (!mhi_bl_info)
return -ENOMEM;
snprintf(node_name, sizeof(node_name), "mhi_bl_%04x_%02u.%02u.%02u",
mhi_dev->dev_id, mhi_dev->domain, mhi_dev->bus, mhi_dev->slot);
mhi_bl_info->ipc_log = ipc_log_context_create(MHI_IPC_LOG_PAGES,
node_name, 0);
if (!mhi_bl_info->ipc_log)
return -EINVAL;
mhi_bl_info->mhi_device = mhi_dev;
mhi_device_set_devdata(mhi_dev, mhi_bl_info);
ipc_log_string(mhi_bl_info->ipc_log, HLOG
"Entered SBL, Session ID:0x%x\n",
mhi_dev->mhi_cntrl->session_id);
/* start a thread to monitor entering mission mode */
mhi_bl_info->cookie = async_schedule(mhi_bl_boot_monitor, mhi_bl_info);
return 0;
}
static const struct mhi_device_id mhi_bl_match_table[] = {
{ .chan = "BL" },
{},
};
static struct mhi_driver mhi_bl_driver = {
.id_table = mhi_bl_match_table,
.remove = mhi_bl_remove,
.probe = mhi_bl_probe,
.ul_xfer_cb = mhi_bl_dummy_cb,
.dl_xfer_cb = mhi_bl_dl_cb,
.driver = {
.name = "MHI_BL",
.owner = THIS_MODULE,
},
};
int mhi_arch_pcie_init(struct mhi_controller *mhi_cntrl)
{
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
struct arch_info *arch_info = mhi_dev->arch_info;
char node[32];
if (!arch_info) {
arch_info = devm_kzalloc(&mhi_dev->pci_dev->dev,
sizeof(*arch_info), GFP_KERNEL);
if (!arch_info)
return -ENOMEM;
mhi_dev->arch_info = arch_info;
snprintf(node, sizeof(node), "mhi_%04x_%02u.%02u.%02u",
mhi_cntrl->dev_id, mhi_cntrl->domain, mhi_cntrl->bus,
mhi_cntrl->slot);
mhi_cntrl->log_buf = ipc_log_context_create(MHI_IPC_LOG_PAGES,
node, 0);
mhi_cntrl->log_lvl = mhi_ipc_log_lvl;
/* save reference state for pcie config space */
arch_info->ref_pcie_state = pci_store_saved_state(
mhi_dev->pci_dev);
mhi_driver_register(&mhi_bl_driver);
}
return mhi_arch_set_bus_request(mhi_cntrl, 1);
}
void mhi_arch_pcie_deinit(struct mhi_controller *mhi_cntrl)
{
mhi_arch_set_bus_request(mhi_cntrl, 0);
}
int mhi_arch_link_off(struct mhi_controller *mhi_cntrl, bool graceful)
{
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
struct arch_info *arch_info = mhi_dev->arch_info;
struct pci_dev *pci_dev = mhi_dev->pci_dev;
int ret;
MHI_LOG("Entered\n");
if (graceful) {
pci_clear_master(pci_dev);
ret = pci_save_state(mhi_dev->pci_dev);
if (ret) {
MHI_ERR("Failed with pci_save_state, ret:%d\n", ret);
return ret;
}
arch_info->pcie_state = pci_store_saved_state(pci_dev);
pci_disable_device(pci_dev);
}
/*
* We will always attempt to put link into D3hot, however
* link down may have happened due to error fatal, so
* ignoring the return code
*/
pci_set_power_state(pci_dev, PCI_D3hot);
/* release the resources */
mhi_arch_set_bus_request(mhi_cntrl, 0);
MHI_LOG("Exited\n");
return 0;
}
int mhi_arch_link_on(struct mhi_controller *mhi_cntrl)
{
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
struct arch_info *arch_info = mhi_dev->arch_info;
struct pci_dev *pci_dev = mhi_dev->pci_dev;
int ret;
MHI_LOG("Entered\n");
/* request resources and establish link trainning */
ret = mhi_arch_set_bus_request(mhi_cntrl, 1);
if (ret)
MHI_LOG("Could not set bus frequency, ret:%d\n", ret);
ret = pci_set_power_state(pci_dev, PCI_D0);
if (ret) {
MHI_ERR("Failed to set PCI_D0 state, ret:%d\n", ret);
return ret;
}
ret = pci_enable_device(pci_dev);
if (ret) {
MHI_ERR("Failed to enable device, ret:%d\n", ret);
return ret;
}
ret = pci_load_and_free_saved_state(pci_dev, &arch_info->pcie_state);
if (ret)
MHI_LOG("Failed to load saved cfg state\n");
pci_restore_state(pci_dev);
pci_set_master(pci_dev);
MHI_LOG("Exited\n");
return 0;
}

View File

@ -0,0 +1,715 @@
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/dma-direction.h>
#include <linux/list.h>
#include <linux/of.h>
#include <linux/memblock.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/interrupt.h>
#include <linux/version.h>
#include "../core/mhi.h"
#include "mhi_qcom.h"
#if 1
#ifndef PCI_IRQ_MSI
#define PCI_IRQ_MSI (1 << 1) /* Allow MSI interrupts */
#if (LINUX_VERSION_CODE < KERNEL_VERSION( 3,10,53 ))
int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec)
{
int nvec = maxvec;
int rc;
if (maxvec < minvec)
return -ERANGE;
do {
rc = pci_enable_msi_block(dev, nvec);
if (rc < 0) {
return rc;
} else if (rc > 0) {
if (rc < minvec)
return -ENOSPC;
nvec = rc;
}
} while (rc);
return nvec;
}
#endif
static int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
unsigned int max_vecs, unsigned int flags)
{
return pci_enable_msi_range(dev, min_vecs, max_vecs);
}
static void pci_free_irq_vectors(struct pci_dev *dev)
{
pci_disable_msi(dev);
}
static int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
{
return dev->irq + nr;
}
#endif
#endif
static struct pci_device_id mhi_pcie_device_id[] = {
{PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0303)}, //SDX20
{PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0304)}, //SDX24
{PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0305)},
{PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0306)}, //SDX55
{PCI_DEVICE(0x2C7C, 0x0512)},
{PCI_DEVICE(MHI_PCIE_VENDOR_ID, MHI_PCIE_DEBUG_ID)},
{0},
};
MODULE_DEVICE_TABLE(pci, mhi_pcie_device_id);
static struct pci_driver mhi_pcie_driver;
void mhi_deinit_pci_dev(struct mhi_controller *mhi_cntrl)
{
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
struct pci_dev *pci_dev = mhi_dev->pci_dev;
pci_free_irq_vectors(pci_dev);
iounmap(mhi_cntrl->regs);
mhi_cntrl->regs = NULL;
pci_clear_master(pci_dev);
pci_release_region(pci_dev, mhi_dev->resn);
pci_disable_device(pci_dev);
}
static int mhi_init_pci_dev(struct mhi_controller *mhi_cntrl)
{
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
struct pci_dev *pci_dev = mhi_dev->pci_dev;
int ret;
resource_size_t start, len;
int i;
mhi_dev->resn = MHI_PCI_BAR_NUM;
ret = pci_assign_resource(pci_dev, mhi_dev->resn);
if (ret) {
MHI_ERR("Error assign pci resources, ret:%d\n", ret);
return ret;
}
ret = pci_enable_device(pci_dev);
if (ret) {
MHI_ERR("Error enabling device, ret:%d\n", ret);
goto error_enable_device;
}
ret = pci_request_region(pci_dev, mhi_dev->resn, "mhi");
if (ret) {
MHI_ERR("Error pci_request_region, ret:%d\n", ret);
goto error_request_region;
}
pci_set_master(pci_dev);
start = pci_resource_start(pci_dev, mhi_dev->resn);
len = pci_resource_len(pci_dev, mhi_dev->resn);
mhi_cntrl->regs = ioremap_nocache(start, len);
MHI_LOG("mhi_cntrl->regs = %p\n", mhi_cntrl->regs);
if (!mhi_cntrl->regs) {
MHI_ERR("Error ioremap region\n");
goto error_ioremap;
}
ret = pci_alloc_irq_vectors(pci_dev, 1, mhi_cntrl->msi_required, PCI_IRQ_MSI);
if (IS_ERR_VALUE((ulong)ret) || ret < mhi_cntrl->msi_required) {
if (ret == -ENOSPC) {
/* imx_3.14.52_1.1.0_ga
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index f06e8f0..6a9614f 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -376,6 +376,13 @@ static int dw_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev,
if (msgvec > 5)
msgvec = 0;
+#if 1 //Add by Quectel 20190419
+ if (msgvec > 0 && pdev->vendor == 0x17cb) {
+ dev_info(&pdev->dev, "%s quectel fixup pos=%d, msg_ctr=%04x, msgvec=%d\n", __func__, desc->msi_attrib.pos, msg_ctr, msgvec);
+ msgvec = 0;
+ }
+#endif
+
irq = assign_irq((1 << msgvec), desc, &pos);
if (irq < 0)
return irq;
*/
}
//imx_4.1.15_2.0.0_ga & DELL_OPTIPLEX_7010 only alloc one msi interrupt for one pcie device
if (ret != 1) {
MHI_ERR("Failed to enable MSI, ret=%d, msi_required=%d\n", ret, mhi_cntrl->msi_required);
goto error_req_msi;
}
}
mhi_cntrl->msi_allocated = ret;
MHI_LOG("msi_required = %d, msi_allocated = %d, msi_irq = %u\n", mhi_cntrl->msi_required, mhi_cntrl->msi_allocated, pci_dev->irq);
for (i = 0; i < mhi_cntrl->msi_allocated; i++) {
mhi_cntrl->irq[i] = pci_irq_vector(pci_dev, i);
if (mhi_cntrl->irq[i] < 0) {
ret = mhi_cntrl->irq[i];
goto error_get_irq_vec;
}
}
#if 0
/* configure runtime pm */
pm_runtime_set_autosuspend_delay(&pci_dev->dev, MHI_RPM_SUSPEND_TMR_MS);
pm_runtime_dont_use_autosuspend(&pci_dev->dev);
pm_suspend_ignore_children(&pci_dev->dev, true);
/*
* pci framework will increment usage count (twice) before
* calling local device driver probe function.
* 1st pci.c pci_pm_init() calls pm_runtime_forbid
* 2nd pci-driver.c local_pci_probe calls pm_runtime_get_sync
* Framework expect pci device driver to call
* pm_runtime_put_noidle to decrement usage count after
* successful probe and and call pm_runtime_allow to enable
* runtime suspend.
*/
pm_runtime_mark_last_busy(&pci_dev->dev);
pm_runtime_put_noidle(&pci_dev->dev);
#endif
return 0;
error_get_irq_vec:
pci_free_irq_vectors(pci_dev);
error_req_msi:
iounmap(mhi_cntrl->regs);
error_ioremap:
pci_clear_master(pci_dev);
error_request_region:
pci_disable_device(pci_dev);
error_enable_device:
pci_release_region(pci_dev, mhi_dev->resn);
return ret;
}
#ifdef CONFIG_PM
static int mhi_runtime_idle(struct device *dev)
{
struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev);
MHI_LOG("Entered returning -EBUSY\n");
/*
* RPM framework during runtime resume always calls
* rpm_idle to see if device ready to suspend.
* If dev.power usage_count count is 0, rpm fw will call
* rpm_idle cb to see if device is ready to suspend.
* if cb return 0, or cb not defined the framework will
* assume device driver is ready to suspend;
* therefore, fw will schedule runtime suspend.
* In MHI power management, MHI host shall go to
* runtime suspend only after entering MHI State M2, even if
* usage count is 0. Return -EBUSY to disable automatic suspend.
*/
return -EBUSY;
}
static int mhi_runtime_suspend(struct device *dev)
{
int ret = 0;
struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev);
MHI_LOG("Enter\n");
mutex_lock(&mhi_cntrl->pm_mutex);
ret = mhi_pm_suspend(mhi_cntrl);
if (ret) {
MHI_LOG("Abort due to ret:%d\n", ret);
goto exit_runtime_suspend;
}
ret = mhi_arch_link_off(mhi_cntrl, true);
if (ret)
MHI_ERR("Failed to Turn off link ret:%d\n", ret);
exit_runtime_suspend:
mutex_unlock(&mhi_cntrl->pm_mutex);
MHI_LOG("Exited with ret:%d\n", ret);
return ret;
}
static int mhi_runtime_resume(struct device *dev)
{
int ret = 0;
struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev);
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
MHI_LOG("Enter\n");
mutex_lock(&mhi_cntrl->pm_mutex);
if (!mhi_dev->powered_on) {
MHI_LOG("Not fully powered, return success\n");
mutex_unlock(&mhi_cntrl->pm_mutex);
return 0;
}
/* turn on link */
ret = mhi_arch_link_on(mhi_cntrl);
if (ret)
goto rpm_resume_exit;
/* enter M0 state */
ret = mhi_pm_resume(mhi_cntrl);
rpm_resume_exit:
mutex_unlock(&mhi_cntrl->pm_mutex);
MHI_LOG("Exited with :%d\n", ret);
return ret;
}
static int mhi_system_resume(struct device *dev)
{
int ret = 0;
struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev);
ret = mhi_runtime_resume(dev);
if (ret) {
MHI_ERR("Failed to resume link\n");
} else {
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
}
return ret;
}
int mhi_system_suspend(struct device *dev)
{
struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev);
MHI_LOG("Entered\n");
/* if rpm status still active then force suspend */
if (!pm_runtime_status_suspended(dev))
return mhi_runtime_suspend(dev);
pm_runtime_set_suspended(dev);
pm_runtime_disable(dev);
MHI_LOG("Exit\n");
return 0;
}
#endif
/* checks if link is down */
static int mhi_link_status(struct mhi_controller *mhi_cntrl, void *priv)
{
struct mhi_dev *mhi_dev = priv;
u16 dev_id;
int ret;
/* try reading device id, if dev id don't match, link is down */
ret = pci_read_config_word(mhi_dev->pci_dev, PCI_DEVICE_ID, &dev_id);
return (ret || dev_id != mhi_cntrl->dev_id) ? -EIO : 0;
}
static int mhi_runtime_get(struct mhi_controller *mhi_cntrl, void *priv)
{
struct mhi_dev *mhi_dev = priv;
struct device *dev = &mhi_dev->pci_dev->dev;
return pm_runtime_get(dev);
}
static void mhi_runtime_put(struct mhi_controller *mhi_cntrl, void *priv)
{
struct mhi_dev *mhi_dev = priv;
struct device *dev = &mhi_dev->pci_dev->dev;
pm_runtime_put_noidle(dev);
}
static void mhi_status_cb(struct mhi_controller *mhi_cntrl,
void *priv,
enum MHI_CB reason)
{
struct mhi_dev *mhi_dev = priv;
struct device *dev = &mhi_dev->pci_dev->dev;
if (reason == MHI_CB_IDLE) {
MHI_LOG("Schedule runtime suspend 1\n");
pm_runtime_mark_last_busy(dev);
pm_request_autosuspend(dev);
}
}
int mhi_debugfs_trigger_m0(void *data, u64 val)
{
struct mhi_controller *mhi_cntrl = data;
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
MHI_LOG("Trigger M3 Exit\n");
pm_runtime_get(&mhi_dev->pci_dev->dev);
pm_runtime_put(&mhi_dev->pci_dev->dev);
return 0;
}
int mhi_debugfs_trigger_m3(void *data, u64 val)
{
struct mhi_controller *mhi_cntrl = data;
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
MHI_LOG("Trigger M3 Entry\n");
pm_runtime_mark_last_busy(&mhi_dev->pci_dev->dev);
pm_request_autosuspend(&mhi_dev->pci_dev->dev);
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(debugfs_trigger_m0_fops, NULL,
mhi_debugfs_trigger_m0, "%llu\n");
DEFINE_SIMPLE_ATTRIBUTE(debugfs_trigger_m3_fops, NULL,
mhi_debugfs_trigger_m3, "%llu\n");
static int mhi_init_debugfs_trigger_go(void *data, u64 val)
{
struct mhi_controller *mhi_cntrl = data;
MHI_LOG("Trigger power up sequence\n");
mhi_async_power_up(mhi_cntrl);
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(mhi_init_debugfs_trigger_go_fops, NULL,
mhi_init_debugfs_trigger_go, "%llu\n");
int mhi_init_debugfs_debug_show(struct seq_file *m, void *d)
{
seq_puts(m, "Enable debug mode to debug external soc\n");
seq_puts(m,
"Usage: echo 'devid,timeout,domain,smmu_cfg' > debug_mode\n");
seq_puts(m, "No spaces between parameters\n");
seq_puts(m, "\t1. devid : 0 or pci device id to register\n");
seq_puts(m, "\t2. timeout: mhi cmd/state transition timeout\n");
seq_puts(m, "\t3. domain: Rootcomplex\n");
seq_puts(m, "\t4. smmu_cfg: smmu configuration mask:\n");
seq_puts(m, "\t\t- BIT0: ATTACH\n");
seq_puts(m, "\t\t- BIT1: S1 BYPASS\n");
seq_puts(m, "\t\t-BIT2: FAST_MAP\n");
seq_puts(m, "\t\t-BIT3: ATOMIC\n");
seq_puts(m, "\t\t-BIT4: FORCE_COHERENT\n");
seq_puts(m, "\t\t-BIT5: GEOMETRY\n");
seq_puts(m, "\tAll timeout are in ms, enter 0 to keep default\n");
seq_puts(m, "Examples inputs: '0x307,10000'\n");
seq_puts(m, "\techo '0,10000,1'\n");
seq_puts(m, "\techo '0x307,10000,0,0x3d'\n");
seq_puts(m, "firmware image name will be changed to debug.mbn\n");
return 0;
}
static int mhi_init_debugfs_debug_open(struct inode *node, struct file *file)
{
return single_open(file, mhi_init_debugfs_debug_show, NULL);
}
static ssize_t mhi_init_debugfs_debug_write(struct file *fp,
const char __user *ubuf,
size_t count,
loff_t *pos)
{
char *buf = kmalloc(count + 1, GFP_KERNEL);
/* #,devid,timeout,domain,smmu-cfg */
int args[5] = {0};
static char const *dbg_fw = "debug.mbn";
int ret;
struct mhi_controller *mhi_cntrl = fp->f_inode->i_private;
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
struct pci_device_id *id;
if (!buf)
return -ENOMEM;
ret = copy_from_user(buf, ubuf, count);
if (ret)
goto error_read;
buf[count] = 0;
get_options(buf, ARRAY_SIZE(args), args);
kfree(buf);
/* override default parameters */
mhi_cntrl->fw_image = dbg_fw;
mhi_cntrl->edl_image = dbg_fw;
if (args[0] >= 2 && args[2])
mhi_cntrl->timeout_ms = args[2];
if (args[0] >= 3 && args[3])
mhi_cntrl->domain = args[3];
if (args[0] >= 4 && args[4])
mhi_dev->smmu_cfg = args[4];
/* If it's a new device id register it */
if (args[0] && args[1]) {
/* find the debug_id and overwrite it */
for (id = mhi_pcie_device_id; id->vendor; id++)
if (id->device == MHI_PCIE_DEBUG_ID) {
id->device = args[1];
pci_unregister_driver(&mhi_pcie_driver);
ret = pci_register_driver(&mhi_pcie_driver);
}
}
mhi_dev->debug_mode = true;
debugfs_create_file("go", 0444, mhi_cntrl->parent, mhi_cntrl,
&mhi_init_debugfs_trigger_go_fops);
pr_info(
"%s: ret:%d pcidev:0x%x smm_cfg:%u timeout:%u\n",
__func__, ret, args[1], mhi_dev->smmu_cfg,
mhi_cntrl->timeout_ms);
return count;
error_read:
kfree(buf);
return ret;
}
static const struct file_operations debugfs_debug_ops = {
.open = mhi_init_debugfs_debug_open,
.release = single_release,
.read = seq_read,
.write = mhi_init_debugfs_debug_write,
};
static struct mhi_controller * mhi_platform_probe(struct pci_dev *pci_dev)
{
struct mhi_controller *mhi_cntrl;
struct mhi_dev *mhi_dev;
u64 addr_win[2];
int ret;
mhi_cntrl = mhi_alloc_controller(sizeof(*mhi_dev));
if (!mhi_cntrl) {
pr_err("mhi_alloc_controller fail\n");
return NULL;
}
mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
mhi_cntrl->dev_id = pci_dev->device;
mhi_cntrl->domain = pci_domain_nr(pci_dev->bus);
mhi_cntrl->bus = pci_dev->bus->number;
mhi_cntrl->slot = PCI_SLOT(pci_dev->devfn);
mhi_dev->smmu_cfg = 0;
#if 0 //def CONFIG_HAVE_MEMBLOCK
addr_win[0] = memblock_start_of_DRAM();
addr_win[1] = memblock_end_of_DRAM();
#else
#define MHI_MEM_BASE_DEFAULT 0x000000000
#define MHI_MEM_SIZE_DEFAULT 0x2000000000
addr_win[0] = MHI_MEM_BASE_DEFAULT;
addr_win[1] = MHI_MEM_SIZE_DEFAULT;
if (sizeof(dma_addr_t) == 4) {
addr_win[1] = 0xFFFFFFFF;
}
#endif
mhi_cntrl->iova_start = addr_win[0];
mhi_cntrl->iova_stop = addr_win[1];
mhi_dev->pci_dev = pci_dev;
mhi_cntrl->pci_dev = pci_dev;
/* setup power management apis */
mhi_cntrl->status_cb = mhi_status_cb;
mhi_cntrl->runtime_get = mhi_runtime_get;
mhi_cntrl->runtime_put = mhi_runtime_put;
mhi_cntrl->link_status = mhi_link_status;
ret = mhi_arch_platform_init(mhi_dev);
if (ret)
goto error_probe;
ret = mhi_register_mhi_controller(mhi_cntrl);
if (ret)
goto error_register;
if (mhi_cntrl->parent)
debugfs_create_file("debug_mode", 0444, mhi_cntrl->parent,
mhi_cntrl, &debugfs_debug_ops);
return mhi_cntrl;
error_register:
mhi_arch_platform_deinit(mhi_dev);
error_probe:
mhi_free_controller(mhi_cntrl);
return NULL;
}
int mhi_pci_probe(struct pci_dev *pci_dev,
const struct pci_device_id *device_id)
{
struct mhi_controller *mhi_cntrl = NULL;
u32 domain = pci_domain_nr(pci_dev->bus);
u32 bus = pci_dev->bus->number;
u32 slot = PCI_SLOT(pci_dev->devfn);
struct mhi_dev *mhi_dev;
int ret;
pr_info("%s pci_dev->name = %s, domain=%d, bus=%d, slot=%d, vendor=%04X, device=%04X\n",
__func__, dev_name(&pci_dev->dev), domain, bus, slot, pci_dev->vendor, pci_dev->device);
mhi_cntrl = mhi_platform_probe(pci_dev);
if (!mhi_cntrl) {
pr_err("mhi_platform_probe fail\n");
return -EPROBE_DEFER;
}
mhi_cntrl->dev_id = pci_dev->device;
mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
mhi_dev->pci_dev = pci_dev;
mhi_dev->powered_on = true;
ret = mhi_arch_pcie_init(mhi_cntrl);
if (ret) {
MHI_ERR("Error mhi_arch_pcie_init, ret:%d\n", ret);
return ret;
}
ret = mhi_arch_iommu_init(mhi_cntrl);
if (ret) {
MHI_ERR("Error mhi_arch_iommu_init, ret:%d\n", ret);
goto error_iommu_init;
}
ret = mhi_init_pci_dev(mhi_cntrl);
if (ret) {
MHI_ERR("Error mhi_init_pci_dev, ret:%d\n", ret);
goto error_init_pci;
}
/* start power up sequence if not in debug mode */
if (!mhi_dev->debug_mode) {
ret = mhi_async_power_up(mhi_cntrl);
if (ret) {
MHI_ERR("Error mhi_async_power_up, ret:%d\n", ret);
goto error_power_up;
}
}
#if 0
pm_runtime_mark_last_busy(&pci_dev->dev);
pm_runtime_allow(&pci_dev->dev);
pm_runtime_disable(&pci_dev->dev);
#endif
if (mhi_cntrl->dentry) {
debugfs_create_file("m0", 0444, mhi_cntrl->dentry, mhi_cntrl,
&debugfs_trigger_m0_fops);
debugfs_create_file("m3", 0444, mhi_cntrl->dentry, mhi_cntrl,
&debugfs_trigger_m3_fops);
}
dev_set_drvdata(&pci_dev->dev, mhi_cntrl);
MHI_LOG("Return successful\n");
return 0;
error_power_up:
mhi_deinit_pci_dev(mhi_cntrl);
error_init_pci:
mhi_arch_iommu_deinit(mhi_cntrl);
error_iommu_init:
mhi_arch_pcie_deinit(mhi_cntrl);
return ret;
}
static void mhi_pci_remove(struct pci_dev *pci_dev)
{
struct mhi_controller *mhi_cntrl = (struct mhi_controller *)dev_get_drvdata(&pci_dev->dev);
if (mhi_cntrl && mhi_cntrl->pci_dev == pci_dev) {
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
MHI_LOG("%s\n", dev_name(&pci_dev->dev));
if (!mhi_dev->debug_mode) {
mhi_power_down(mhi_cntrl, 1);
}
mhi_deinit_pci_dev(mhi_cntrl);
mhi_arch_iommu_deinit(mhi_cntrl);
mhi_arch_pcie_deinit(mhi_cntrl);
mhi_unregister_mhi_controller(mhi_cntrl);
}
}
static const struct dev_pm_ops pm_ops = {
SET_RUNTIME_PM_OPS(mhi_runtime_suspend,
mhi_runtime_resume,
mhi_runtime_idle)
SET_SYSTEM_SLEEP_PM_OPS(mhi_system_suspend, mhi_system_resume)
};
static struct pci_driver mhi_pcie_driver = {
.name = "mhi",
.id_table = mhi_pcie_device_id,
.probe = mhi_pci_probe,
.remove = mhi_pci_remove,
.driver = {
.pm = &pm_ops
}
};
int __init mhi_controller_qcom_init(void)
{
return pci_register_driver(&mhi_pcie_driver);
};
void mhi_controller_qcom_exit(void)
{
pr_info("%s enter\n", __func__);
pci_unregister_driver(&mhi_pcie_driver);
pr_info("%s exit\n", __func__);
}

View File

@ -0,0 +1,92 @@
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _MHI_QCOM_
#define _MHI_QCOM_
/* iova cfg bitmask */
#define MHI_SMMU_ATTACH BIT(0)
#define MHI_SMMU_S1_BYPASS BIT(1)
#define MHI_SMMU_FAST BIT(2)
#define MHI_SMMU_ATOMIC BIT(3)
#define MHI_SMMU_FORCE_COHERENT BIT(4)
#define MHI_PCIE_VENDOR_ID (0x17cb)
#define MHI_PCIE_DEBUG_ID (0xffff)
#define MHI_RPM_SUSPEND_TMR_MS (3000)
#define MHI_PCI_BAR_NUM (0)
struct mhi_dev {
struct pci_dev *pci_dev;
u32 smmu_cfg;
int resn;
void *arch_info;
bool powered_on;
bool debug_mode;
};
void mhi_deinit_pci_dev(struct mhi_controller *mhi_cntrl);
int mhi_pci_probe(struct pci_dev *pci_dev,
const struct pci_device_id *device_id);
#if (LINUX_VERSION_CODE < KERNEL_VERSION( 3,10,65 ))
static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
{
int rc = dma_set_mask(dev, mask);
if (rc == 0)
dma_set_coherent_mask(dev, mask);
return rc;
}
#endif
static inline int mhi_arch_iommu_init(struct mhi_controller *mhi_cntrl)
{
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
mhi_cntrl->dev = &mhi_dev->pci_dev->dev;
return dma_set_mask_and_coherent(mhi_cntrl->dev, DMA_BIT_MASK(64));
}
static inline void mhi_arch_iommu_deinit(struct mhi_controller *mhi_cntrl)
{
}
static inline int mhi_arch_pcie_init(struct mhi_controller *mhi_cntrl)
{
return 0;
}
static inline void mhi_arch_pcie_deinit(struct mhi_controller *mhi_cntrl)
{
}
static inline int mhi_arch_platform_init(struct mhi_dev *mhi_dev)
{
return 0;
}
static inline void mhi_arch_platform_deinit(struct mhi_dev *mhi_dev)
{
}
static inline int mhi_arch_link_off(struct mhi_controller *mhi_cntrl,
bool graceful)
{
return 0;
}
static inline int mhi_arch_link_on(struct mhi_controller *mhi_cntrl)
{
return 0;
}
#endif /* _MHI_QCOM_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,44 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.*/
#ifndef _MHI_QTI_
#define _MHI_QTI_
/* iova cfg bitmask */
#define MHI_SMMU_ATTACH BIT(0)
#define MHI_SMMU_S1_BYPASS BIT(1)
#define MHI_SMMU_FAST BIT(2)
#define MHI_SMMU_ATOMIC BIT(3)
#define MHI_SMMU_FORCE_COHERENT BIT(4)
#define MHI_PCIE_VENDOR_ID (0x17cb)
#define MHI_PCIE_DEBUG_ID (0xffff)
/* runtime suspend timer */
#define MHI_RPM_SUSPEND_TMR_MS (2000)
#define MHI_PCI_BAR_NUM (0)
struct mhi_dev {
struct pci_dev *pci_dev;
u32 smmu_cfg;
int resn;
void *arch_info;
bool powered_on;
dma_addr_t iova_start;
dma_addr_t iova_stop;
bool lpm_disabled;
};
void mhi_deinit_pci_dev(struct mhi_controller *mhi_cntrl);
int mhi_pci_probe(struct pci_dev *pci_dev,
const struct pci_device_id *device_id);
void mhi_pci_device_removed(struct pci_dev *pci_dev);
int mhi_arch_pcie_init(struct mhi_controller *mhi_cntrl);
void mhi_arch_pcie_deinit(struct mhi_controller *mhi_cntrl);
int mhi_arch_iommu_init(struct mhi_controller *mhi_cntrl);
void mhi_arch_iommu_deinit(struct mhi_controller *mhi_cntrl);
int mhi_arch_link_off(struct mhi_controller *mhi_cntrl, bool graceful);
int mhi_arch_link_on(struct mhi_controller *mhi_cntrl);
#endif /* _MHI_QTI_ */

View File

@ -0,0 +1 @@
obj-$(CONFIG_MHI_BUS) +=mhi_init.o mhi_main.o mhi_pm.o mhi_boot.o mhi_dtr.o

View File

@ -0,0 +1,890 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. */
#ifndef _MHI_H_
#define _MHI_H_
#define PCIE_MHI_DRIVER_VERSION "V1.3.4"
#define ENABLE_MHI_MON
//#define ENABLE_IP_SW0
#include <linux/miscdevice.h>
typedef enum
{
MHI_CLIENT_LOOPBACK_OUT = 0,
MHI_CLIENT_LOOPBACK_IN = 1,
MHI_CLIENT_SAHARA_OUT = 2,
MHI_CLIENT_SAHARA_IN = 3,
MHI_CLIENT_DIAG_OUT = 4,
MHI_CLIENT_DIAG_IN = 5,
MHI_CLIENT_SSR_OUT = 6,
MHI_CLIENT_SSR_IN = 7,
MHI_CLIENT_QDSS_OUT = 8,
MHI_CLIENT_QDSS_IN = 9,
MHI_CLIENT_EFS_OUT = 10,
MHI_CLIENT_EFS_IN = 11,
MHI_CLIENT_MBIM_OUT = 12,
MHI_CLIENT_MBIM_IN = 13,
MHI_CLIENT_QMI_OUT = 14,
MHI_CLIENT_QMI_IN = 15,
MHI_CLIENT_QMI_2_OUT = 16,
MHI_CLIENT_QMI_2_IN = 17,
MHI_CLIENT_IP_CTRL_1_OUT = 18,
MHI_CLIENT_IP_CTRL_1_IN = 19,
MHI_CLIENT_IPCR_OUT = 20,
MHI_CLIENT_IPCR_IN = 21,
MHI_CLIENT_TEST_FW_OUT = 22,
MHI_CLIENT_TEST_FW_IN = 23,
MHI_CLIENT_RESERVED_0 = 24,
MHI_CLIENT_BOOT_LOG_IN = 25,
MHI_CLIENT_DCI_OUT = 26,
MHI_CLIENT_DCI_IN = 27,
MHI_CLIENT_QBI_OUT = 28,
MHI_CLIENT_QBI_IN = 29,
MHI_CLIENT_RESERVED_1_LOWER = 30,
MHI_CLIENT_RESERVED_1_UPPER = 31,
MHI_CLIENT_DUN_OUT = 32,
MHI_CLIENT_DUN_IN = 33,
MHI_CLIENT_EDL_OUT = 34,
MHI_CLIENT_EDL_IN = 35,
MHI_CLIENT_ADB_FB_OUT = 36,
MHI_CLIENT_ADB_FB_IN = 37,
MHI_CLIENT_RESERVED_2_LOWER = 38,
MHI_CLIENT_RESERVED_2_UPPER = 41,
MHI_CLIENT_CSVT_OUT = 42,
MHI_CLIENT_CSVT_IN = 43,
MHI_CLIENT_SMCT_OUT = 44,
MHI_CLIENT_SMCT_IN = 45,
MHI_CLIENT_IP_SW_0_OUT = 46,
MHI_CLIENT_IP_SW_0_IN = 47,
MHI_CLIENT_IP_SW_1_OUT = 48,
MHI_CLIENT_IP_SW_1_IN = 49,
MHI_CLIENT_RESERVED_3_LOWER = 50,
MHI_CLIENT_RESERVED_3_UPPER = 59,
MHI_CLIENT_TEST_0_OUT = 60,
MHI_CLIENT_TEST_0_IN = 61,
MHI_CLIENT_TEST_1_OUT = 62,
MHI_CLIENT_TEST_1_IN = 63,
MHI_CLIENT_TEST_2_OUT = 64,
MHI_CLIENT_TEST_2_IN = 65,
MHI_CLIENT_TEST_3_OUT = 66,
MHI_CLIENT_TEST_3_IN = 67,
MHI_CLIENT_RESERVED_4_LOWER = 68,
MHI_CLIENT_RESERVED_4_UPPER = 91,
MHI_CLIENT_OEM_0_OUT = 92,
MHI_CLIENT_OEM_0_IN = 93,
MHI_CLIENT_OEM_1_OUT = 94,
MHI_CLIENT_OEM_1_IN = 95,
MHI_CLIENT_OEM_2_OUT = 96,
MHI_CLIENT_OEM_2_IN = 97,
MHI_CLIENT_OEM_3_OUT = 98,
MHI_CLIENT_OEM_3_IN = 99,
MHI_CLIENT_IP_HW_0_OUT = 100,
MHI_CLIENT_IP_HW_0_IN = 101,
MHI_CLIENT_ADPL = 102,
MHI_CLIENT_RESERVED_5_LOWER = 103,
MHI_CLIENT_RESERVED_5_UPPER = 127,
MHI_MAX_CHANNELS = 128
}MHI_CLIENT_CHANNEL_TYPE;
/* Event Ring Index */
typedef enum
{
SW_EVT_RING = 0,
PRIMARY_EVENT_RING = SW_EVT_RING,
#ifdef ENABLE_IP_SW0
SW_0_OUT_EVT_RING,
SW_0_IN_EVT_RING,
#endif
IPA_OUT_EVENT_RING,
IPA_IN_EVENT_RING,
ADPL_EVT_RING,
MAX_EVT_RING_IDX
}MHI_EVT_RING_IDX;
#define MHI_VERSION 0x01000000
#define MHIREGLEN_VALUE 0x100 /* **** WRONG VALUE *** */
#define MHI_MSI_INDEX 1
#define MAX_NUM_MHI_DEVICES 1
#define NUM_MHI_XFER_RINGS 128
#define NUM_MHI_EVT_RINGS MAX_EVT_RING_IDX
#define NUM_MHI_HW_EVT_RINGS 3
#define NUM_MHI_XFER_RING_ELEMENTS 16
#define NUM_MHI_EVT_RING_ELEMENTS (NUM_MHI_IPA_IN_RING_ELEMENTS*2) //must *2, event ring full will make x55 dump
#define NUM_MHI_IPA_IN_RING_ELEMENTS 512
#define NUM_MHI_IPA_OUT_RING_ELEMENTS 512 //donot use ul agg, so increase
#define NUM_MHI_DIAG_IN_RING_ELEMENTS 128
#define NUM_MHI_SW_IP_RING_ELEMENTS 512
/*
* for if set Interrupt moderation time as 1ms,
and transfer more than NUM_MHI_CHAN_RING_ELEMENTS data are sent to the modem in 1ms.
e.g. firehose upgrade.
modem will not trigger irq for these transfer.
*/
#define NUM_MHI_CHAN_RING_ELEMENTS 32 //8
#define MHI_EVT_CMD_QUEUE_SIZE 160
#define MHI_EVT_STATE_QUEUE_SIZE 128
#define MHI_EVT_XFER_QUEUE_SIZE 1024
#define CHAN_INBOUND(_x) ((_x)%2)
#define CHAN_SBL(_x) (((_x) == MHI_CLIENT_SAHARA_OUT) || \
((_x) == MHI_CLIENT_SAHARA_IN) || \
((_x) == MHI_CLIENT_BOOT_LOG_IN))
#define CHAN_EDL(_x) (((_x) == MHI_CLIENT_EDL_OUT) || \
((_x) == MHI_CLIENT_EDL_IN))
struct mhi_chan;
struct mhi_event;
struct mhi_ctxt;
struct mhi_cmd;
struct image_info;
struct bhi_vec_entry;
struct mhi_timesync;
struct mhi_buf_info;
/**
* enum MHI_CB - MHI callback
* @MHI_CB_IDLE: MHI entered idle state
* @MHI_CB_PENDING_DATA: New data available for client to process
* @MHI_CB_LPM_ENTER: MHI host entered low power mode
* @MHI_CB_LPM_EXIT: MHI host about to exit low power mode
* @MHI_CB_EE_RDDM: MHI device entered RDDM execution enviornment
* @MHI_CB_EE_MISSION_MODE: MHI device entered Mission Mode exec env
* @MHI_CB_SYS_ERROR: MHI device enter error state (may recover)
* @MHI_CB_FATAL_ERROR: MHI device entered fatal error
*/
enum MHI_CB {
MHI_CB_IDLE,
MHI_CB_PENDING_DATA,
MHI_CB_LPM_ENTER,
MHI_CB_LPM_EXIT,
MHI_CB_EE_RDDM,
MHI_CB_EE_MISSION_MODE,
MHI_CB_SYS_ERROR,
MHI_CB_FATAL_ERROR,
};
/**
* enum MHI_DEBUG_LEVL - various debugging level
*/
enum MHI_DEBUG_LEVEL {
MHI_MSG_LVL_VERBOSE,
MHI_MSG_LVL_INFO,
MHI_MSG_LVL_ERROR,
MHI_MSG_LVL_CRITICAL,
MHI_MSG_LVL_MASK_ALL,
};
/*
GSI_XFER_FLAG_BEI: Block event interrupt
1: Event generated by this ring element must not assert an interrupt to the host
0: Event generated by this ring element must assert an interrupt to the host
GSI_XFER_FLAG_EOT: Interrupt on end of transfer
1: If an EOT condition is encountered when processing this ring element, an event is generated by the device with its completion code set to EOT.
0: If an EOT condition is encountered for this ring element, a completion event is not be generated by the device, unless IEOB is 1
GSI_XFER_FLAG_EOB: Interrupt on end of block
1: Device notifies host after processing this ring element by sending a completion event
0: Completion event is not required after processing this ring element
GSI_XFER_FLAG_CHAIN: Chain bit that identifies the ring elements in a TD
*/
/**
* enum MHI_FLAGS - Transfer flags
* @MHI_EOB: End of buffer for bulk transfer
* @MHI_EOT: End of transfer
* @MHI_CHAIN: Linked transfer
*/
enum MHI_FLAGS {
MHI_EOB,
MHI_EOT,
MHI_CHAIN,
};
/**
* enum mhi_device_type - Device types
* @MHI_XFER_TYPE: Handles data transfer
* @MHI_TIMESYNC_TYPE: Use for timesync feature
* @MHI_CONTROLLER_TYPE: Control device
*/
enum mhi_device_type {
MHI_XFER_TYPE,
MHI_TIMESYNC_TYPE,
MHI_CONTROLLER_TYPE,
};
/**
* enum mhi_ee - device current execution enviornment
* @MHI_EE_PBL - device in PBL
* @MHI_EE_SBL - device in SBL
* @MHI_EE_AMSS - device in mission mode (firmware fully loaded)
* @MHI_EE_RDDM - device in ram dump collection mode
* @MHI_EE_WFW - device in WLAN firmware mode
* @MHI_EE_PTHRU - device in PBL but configured in pass thru mode
* @MHI_EE_EDL - device in emergency download mode
*/
enum mhi_ee {
MHI_EE_PBL = 0x0,
MHI_EE_SBL = 0x1,
MHI_EE_AMSS = 0x2,
MHI_EE_RDDM = 0x3,
MHI_EE_WFW = 0x4,
MHI_EE_PTHRU = 0x5,
MHI_EE_EDL = 0x6,
MHI_EE_FP = 0x7, /* FlashProg, Flash Programmer Environment */
MHI_EE_MAX_SUPPORTED = MHI_EE_FP,
MHI_EE_DISABLE_TRANSITION, /* local EE, not related to mhi spec */
MHI_EE_MAX,
};
/**
* enum mhi_dev_state - device current MHI state
*/
enum mhi_dev_state {
MHI_STATE_RESET = 0x0,
MHI_STATE_READY = 0x1,
MHI_STATE_M0 = 0x2,
MHI_STATE_M1 = 0x3,
MHI_STATE_M2 = 0x4,
MHI_STATE_M3 = 0x5,
MHI_STATE_BHI = 0x7,
MHI_STATE_SYS_ERR = 0xFF,
MHI_STATE_MAX,
};
extern const char * const mhi_ee_str[MHI_EE_MAX];
#define TO_MHI_EXEC_STR(ee) (((ee) >= MHI_EE_MAX) ? \
"INVALID_EE" : mhi_ee_str[ee])
/**
* struct image_info - firmware and rddm table table
* @mhi_buf - Contain device firmware and rddm table
* @entries - # of entries in table
*/
struct image_info {
struct mhi_buf *mhi_buf;
struct bhi_vec_entry *bhi_vec;
u32 entries;
};
/**
* struct mhi_controller - Master controller structure for external modem
* @dev: Device associated with this controller
* @of_node: DT that has MHI configuration information
* @regs: Points to base of MHI MMIO register space
* @bhi: Points to base of MHI BHI register space
* @bhie: Points to base of MHI BHIe register space
* @wake_db: MHI WAKE doorbell register address
* @dev_id: PCIe device id of the external device
* @domain: PCIe domain the device connected to
* @bus: PCIe bus the device assigned to
* @slot: PCIe slot for the modem
* @iova_start: IOMMU starting address for data
* @iova_stop: IOMMU stop address for data
* @fw_image: Firmware image name for normal booting
* @edl_image: Firmware image name for emergency download mode
* @fbc_download: MHI host needs to do complete image transfer
* @rddm_size: RAM dump size that host should allocate for debugging purpose
* @sbl_size: SBL image size
* @seg_len: BHIe vector size
* @fbc_image: Points to firmware image buffer
* @rddm_image: Points to RAM dump buffer
* @max_chan: Maximum number of channels controller support
* @mhi_chan: Points to channel configuration table
* @lpm_chans: List of channels that require LPM notifications
* @total_ev_rings: Total # of event rings allocated
* @hw_ev_rings: Number of hardware event rings
* @sw_ev_rings: Number of software event rings
* @msi_required: Number of msi required to operate
* @msi_allocated: Number of msi allocated by bus master
* @irq: base irq # to request
* @mhi_event: MHI event ring configurations table
* @mhi_cmd: MHI command ring configurations table
* @mhi_ctxt: MHI device context, shared memory between host and device
* @timeout_ms: Timeout in ms for state transitions
* @pm_state: Power management state
* @ee: MHI device execution environment
* @dev_state: MHI STATE
* @status_cb: CB function to notify various power states to but master
* @link_status: Query link status in case of abnormal value read from device
* @runtime_get: Async runtime resume function
* @runtimet_put: Release votes
* @time_get: Return host time in us
* @lpm_disable: Request controller to disable link level low power modes
* @lpm_enable: Controller may enable link level low power modes again
* @priv_data: Points to bus master's private data
*/
struct mhi_controller {
struct list_head node;
struct mhi_device *mhi_dev;
/* device node for iommu ops */
struct device *dev;
struct device_node *of_node;
/* mmio base */
phys_addr_t base_addr;
void __iomem *regs;
void __iomem *bhi;
void __iomem *bhie;
void __iomem *wake_db;
/* device topology */
u32 vendor;
u32 dev_id;
u32 domain;
u32 bus;
u32 slot;
u32 cntrl_idx;
struct device *cntrl_dev;
/* addressing window */
dma_addr_t iova_start;
dma_addr_t iova_stop;
/* fw images */
const char *fw_image;
const char *edl_image;
/* mhi host manages downloading entire fbc images */
bool fbc_download;
size_t rddm_size;
size_t sbl_size;
size_t seg_len;
u32 session_id;
u32 sequence_id;
struct image_info *fbc_image;
struct image_info *rddm_image;
/* physical channel config data */
u32 max_chan;
struct mhi_chan *mhi_chan;
struct list_head lpm_chans; /* these chan require lpm notification */
/* physical event config data */
u32 total_ev_rings;
u32 hw_ev_rings;
u32 sw_ev_rings;
u32 msi_required;
u32 msi_allocated;
u32 msi_irq_base;
int *irq; /* interrupt table */
struct mhi_event *mhi_event;
/* cmd rings */
struct mhi_cmd *mhi_cmd;
/* mhi context (shared with device) */
struct mhi_ctxt *mhi_ctxt;
u32 timeout_ms;
/* caller should grab pm_mutex for suspend/resume operations */
struct mutex pm_mutex;
bool pre_init;
rwlock_t pm_lock;
u32 pm_state;
enum mhi_ee ee;
enum mhi_dev_state dev_state;
bool wake_set;
atomic_t dev_wake;
atomic_t alloc_size;
atomic_t pending_pkts;
struct list_head transition_list;
spinlock_t transition_lock;
spinlock_t wlock;
/* debug counters */
u32 M0, M2, M3;
/* worker for different state transitions */
struct work_struct st_worker;
struct work_struct fw_worker;
struct work_struct syserr_worker;
struct delayed_work ready_worker;
wait_queue_head_t state_event;
/* shadow functions */
void (*status_cb)(struct mhi_controller *mhi_cntrl, void *priv,
enum MHI_CB reason);
int (*link_status)(struct mhi_controller *mhi_cntrl, void *priv);
void (*wake_get)(struct mhi_controller *mhi_cntrl, bool override);
void (*wake_put)(struct mhi_controller *mhi_cntrl, bool override);
int (*runtime_get)(struct mhi_controller *mhi_cntrl, void *priv);
void (*runtime_put)(struct mhi_controller *mhi_cntrl, void *priv);
void (*runtime_mark_last_busy)(struct mhi_controller *mhi_cntrl, void *priv);
u64 (*time_get)(struct mhi_controller *mhi_cntrl, void *priv);
int (*lpm_disable)(struct mhi_controller *mhi_cntrl, void *priv);
int (*lpm_enable)(struct mhi_controller *mhi_cntrl, void *priv);
int (*map_single)(struct mhi_controller *mhi_cntrl,
struct mhi_buf_info *buf);
void (*unmap_single)(struct mhi_controller *mhi_cntrl,
struct mhi_buf_info *buf);
/* channel to control DTR messaging */
struct mhi_device *dtr_dev;
/* bounce buffer settings */
bool bounce_buf;
size_t buffer_len;
/* supports time sync feature */
struct mhi_timesync *mhi_tsync;
struct mhi_device *tsync_dev;
/* kernel log level */
enum MHI_DEBUG_LEVEL klog_lvl;
int klog_slient;
/* private log level controller driver to set */
enum MHI_DEBUG_LEVEL log_lvl;
/* controller specific data */
void *priv_data;
void *log_buf;
struct dentry *dentry;
struct dentry *parent;
struct miscdevice miscdev;
#ifdef ENABLE_MHI_MON
spinlock_t lock;
/* Ref */
int nreaders; /* Under mon_lock AND mbus->lock */
struct list_head r_list; /* Chain of readers (usually one) */
struct kref ref; /* Under mon_lock */
/* Stats */
unsigned int cnt_events;
unsigned int cnt_text_lost;
#endif
};
#ifdef ENABLE_MHI_MON
struct mhi_tre;
struct mon_reader {
struct list_head r_link;
struct mhi_controller *m_bus;
void *r_data; /* Use container_of instead? */
void (*rnf_submit)(void *data, u32 chan, dma_addr_t wp, struct mhi_tre *mhi_tre, void *buf, size_t len);
void (*rnf_receive)(void *data, u32 chan, dma_addr_t wp, struct mhi_tre *mhi_tre, void *buf, size_t len);
void (*rnf_complete)(void *data, u32 chan, dma_addr_t wp, struct mhi_tre *mhi_tre);
};
#endif
/**
* struct mhi_device - mhi device structure associated bind to channel
* @dev: Device associated with the channels
* @mtu: Maximum # of bytes controller support
* @ul_chan_id: MHI channel id for UL transfer
* @dl_chan_id: MHI channel id for DL transfer
* @tiocm: Device current terminal settings
* @priv: Driver private data
*/
struct mhi_device {
struct device dev;
u32 vendor;
u32 dev_id;
u32 domain;
u32 bus;
u32 slot;
size_t mtu;
int ul_chan_id;
int dl_chan_id;
int ul_event_id;
int dl_event_id;
u32 tiocm;
const struct mhi_device_id *id;
const char *chan_name;
struct mhi_controller *mhi_cntrl;
struct mhi_chan *ul_chan;
struct mhi_chan *dl_chan;
atomic_t dev_wake;
enum mhi_device_type dev_type;
void *priv_data;
int (*ul_xfer)(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan,
void *buf, size_t len, enum MHI_FLAGS flags);
int (*dl_xfer)(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan,
void *buf, size_t size, enum MHI_FLAGS flags);
void (*status_cb)(struct mhi_device *mhi_dev, enum MHI_CB reason);
};
/**
* struct mhi_result - Completed buffer information
* @buf_addr: Address of data buffer
* @dir: Channel direction
* @bytes_xfer: # of bytes transferred
* @transaction_status: Status of last trasnferred
*/
struct mhi_result {
void *buf_addr;
enum dma_data_direction dir;
size_t bytes_xferd;
int transaction_status;
};
/**
* struct mhi_buf - Describes the buffer
* @page: buffer as a page
* @buf: cpu address for the buffer
* @phys_addr: physical address of the buffer
* @dma_addr: iommu address for the buffer
* @skb: skb of ip packet
* @len: # of bytes
* @name: Buffer label, for offload channel configurations name must be:
* ECA - Event context array data
* CCA - Channel context array data
*/
struct mhi_buf {
struct list_head node;
struct page *page;
void *buf;
phys_addr_t phys_addr;
dma_addr_t dma_addr;
struct sk_buff *skb;
size_t len;
const char *name; /* ECA, CCA */
};
/**
* struct mhi_driver - mhi driver information
* @id_table: NULL terminated channel ID names
* @ul_xfer_cb: UL data transfer callback
* @dl_xfer_cb: DL data transfer callback
* @status_cb: Asynchronous status callback
*/
struct mhi_driver {
const struct mhi_device_id *id_table;
int (*probe)(struct mhi_device *mhi_dev,
const struct mhi_device_id *id);
void (*remove)(struct mhi_device *mhi_dev);
void (*ul_xfer_cb)(struct mhi_device *mhi_dev, struct mhi_result *res);
void (*dl_xfer_cb)(struct mhi_device *mhi_dev, struct mhi_result *res);
void (*status_cb)(struct mhi_device *mhi_dev, enum MHI_CB mhi_cb);
struct device_driver driver;
};
#define to_mhi_driver(drv) container_of(drv, struct mhi_driver, driver)
#define to_mhi_device(dev) container_of(dev, struct mhi_device, dev)
static inline void mhi_device_set_devdata(struct mhi_device *mhi_dev,
void *priv)
{
mhi_dev->priv_data = priv;
}
static inline void *mhi_device_get_devdata(struct mhi_device *mhi_dev)
{
return mhi_dev->priv_data;
}
/**
* mhi_queue_transfer - Queue a buffer to hardware
* All transfers are asyncronous transfers
* @mhi_dev: Device associated with the channels
* @dir: Data direction
* @buf: Data buffer (skb for hardware channels)
* @len: Size in bytes
* @mflags: Interrupt flags for the device
*/
static inline int mhi_queue_transfer(struct mhi_device *mhi_dev,
enum dma_data_direction dir,
void *buf,
size_t len,
enum MHI_FLAGS mflags)
{
if (dir == DMA_TO_DEVICE)
return mhi_dev->ul_xfer(mhi_dev, mhi_dev->ul_chan, buf, len,
mflags);
else
return mhi_dev->dl_xfer(mhi_dev, mhi_dev->dl_chan, buf, len,
mflags);
}
static inline void *mhi_controller_get_devdata(struct mhi_controller *mhi_cntrl)
{
return mhi_cntrl->priv_data;
}
static inline void mhi_free_controller(struct mhi_controller *mhi_cntrl)
{
kfree(mhi_cntrl);
}
/**
* mhi_driver_register - Register driver with MHI framework
* @mhi_drv: mhi_driver structure
*/
int mhi_driver_register(struct mhi_driver *mhi_drv);
/**
* mhi_driver_unregister - Unregister a driver for mhi_devices
* @mhi_drv: mhi_driver structure
*/
void mhi_driver_unregister(struct mhi_driver *mhi_drv);
/**
* mhi_device_configure - configure ECA or CCA context
* For offload channels that client manage, call this
* function to configure channel context or event context
* array associated with the channel
* @mhi_div: Device associated with the channels
* @dir: Direction of the channel
* @mhi_buf: Configuration data
* @elements: # of configuration elements
*/
int mhi_device_configure(struct mhi_device *mhi_div,
enum dma_data_direction dir,
struct mhi_buf *mhi_buf,
int elements);
/**
* mhi_device_get - disable all low power modes
* Only disables lpm, does not immediately exit low power mode
* if controller already in a low power mode
* @mhi_dev: Device associated with the channels
*/
void mhi_device_get(struct mhi_device *mhi_dev);
/**
* mhi_device_get_sync - disable all low power modes
* Synchronously disable all low power, exit low power mode if
* controller already in a low power state
* @mhi_dev: Device associated with the channels
*/
int mhi_device_get_sync(struct mhi_device *mhi_dev);
/**
* mhi_device_put - re-enable low power modes
* @mhi_dev: Device associated with the channels
*/
void mhi_device_put(struct mhi_device *mhi_dev);
/**
* mhi_prepare_for_transfer - setup channel for data transfer
* Moves both UL and DL channel from RESET to START state
* @mhi_dev: Device associated with the channels
*/
int mhi_prepare_for_transfer(struct mhi_device *mhi_dev);
/**
* mhi_unprepare_from_transfer -unprepare the channels
* Moves both UL and DL channels to RESET state
* @mhi_dev: Device associated with the channels
*/
void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev);
/**
* mhi_get_no_free_descriptors - Get transfer ring length
* Get # of TD available to queue buffers
* @mhi_dev: Device associated with the channels
* @dir: Direction of the channel
*/
int mhi_get_no_free_descriptors(struct mhi_device *mhi_dev,
enum dma_data_direction dir);
/**
* mhi_poll - poll for any available data to consume
* This is only applicable for DL direction
* @mhi_dev: Device associated with the channels
* @budget: In descriptors to service before returning
*/
int mhi_poll(struct mhi_device *mhi_dev, u32 budget);
/**
* mhi_ioctl - user space IOCTL support for MHI channels
* Native support for setting TIOCM
* @mhi_dev: Device associated with the channels
* @cmd: IOCTL cmd
* @arg: Optional parameter, iotcl cmd specific
*/
long mhi_ioctl(struct mhi_device *mhi_dev, unsigned int cmd, unsigned long arg);
/**
* mhi_alloc_controller - Allocate mhi_controller structure
* Allocate controller structure and additional data for controller
* private data. You may get the private data pointer by calling
* mhi_controller_get_devdata
* @size: # of additional bytes to allocate
*/
struct mhi_controller *mhi_alloc_controller(size_t size);
/**
* of_register_mhi_controller - Register MHI controller
* Registers MHI controller with MHI bus framework. DT must be supported
* @mhi_cntrl: MHI controller to register
*/
int of_register_mhi_controller(struct mhi_controller *mhi_cntrl);
void mhi_unregister_mhi_controller(struct mhi_controller *mhi_cntrl);
/**
* mhi_bdf_to_controller - Look up a registered controller
* Search for controller based on device identification
* @domain: RC domain of the device
* @bus: Bus device connected to
* @slot: Slot device assigned to
* @dev_id: Device Identification
*/
struct mhi_controller *mhi_bdf_to_controller(u32 domain, u32 bus, u32 slot,
u32 dev_id);
/**
* mhi_prepare_for_power_up - Do pre-initialization before power up
* This is optional, call this before power up if controller do not
* want bus framework to automatically free any allocated memory during shutdown
* process.
* @mhi_cntrl: MHI controller
*/
int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl);
/**
* mhi_async_power_up - Starts MHI power up sequence
* @mhi_cntrl: MHI controller
*/
int mhi_async_power_up(struct mhi_controller *mhi_cntrl);
int mhi_sync_power_up(struct mhi_controller *mhi_cntrl);
/**
* mhi_power_down - Start MHI power down sequence
* @mhi_cntrl: MHI controller
* @graceful: link is still accessible, do a graceful shutdown process otherwise
* we will shutdown host w/o putting device into RESET state
*/
void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful);
/**
* mhi_unprepare_after_powre_down - free any allocated memory for power up
* @mhi_cntrl: MHI controller
*/
void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl);
/**
* mhi_pm_suspend - Move MHI into a suspended state
* Transition to MHI state M3 state from M0||M1||M2 state
* @mhi_cntrl: MHI controller
*/
int mhi_pm_suspend(struct mhi_controller *mhi_cntrl);
/**
* mhi_pm_resume - Resume MHI from suspended state
* Transition to MHI state M0 state from M3 state
* @mhi_cntrl: MHI controller
*/
int mhi_pm_resume(struct mhi_controller *mhi_cntrl);
/**
* mhi_download_rddm_img - Download ramdump image from device for
* debugging purpose.
* @mhi_cntrl: MHI controller
* @in_panic: If we trying to capture image while in kernel panic
*/
int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic);
/**
* mhi_force_rddm_mode - Force external device into rddm mode
* to collect device ramdump. This is useful if host driver assert
* and we need to see device state as well.
* @mhi_cntrl: MHI controller
*/
int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl);
/**
* mhi_get_remote_time_sync - Get external soc time relative to local soc time
* using MMIO method.
* @mhi_dev: Device associated with the channels
* @t_host: Pointer to output local soc time
* @t_dev: Pointer to output remote soc time
*/
int mhi_get_remote_time_sync(struct mhi_device *mhi_dev,
u64 *t_host,
u64 *t_dev);
/**
* mhi_get_mhi_state - Return MHI state of device
* @mhi_cntrl: MHI controller
*/
enum mhi_dev_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl);
/**
* mhi_set_mhi_state - Set device state
* @mhi_cntrl: MHI controller
* @state: state to set
*/
void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl,
enum mhi_dev_state state);
/**
* mhi_is_active - helper function to determine if MHI in active state
* @mhi_dev: client device
*/
static inline bool mhi_is_active(struct mhi_device *mhi_dev)
{
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
return (mhi_cntrl->dev_state >= MHI_STATE_M0 &&
mhi_cntrl->dev_state <= MHI_STATE_M3);
}
/**
* mhi_debug_reg_dump - dump MHI registers for debug purpose
* @mhi_cntrl: MHI controller
*/
void mhi_debug_reg_dump(struct mhi_controller *mhi_cntrl);
#ifdef CONFIG_MHI_DEBUG
#define MHI_VERB(fmt, ...) do { \
if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_VERBOSE) \
pr_debug("[D][mhi%d][%s] " fmt, mhi_cntrl->cntrl_idx, __func__, ##__VA_ARGS__);\
} while (0)
#else
#define MHI_VERB(fmt, ...)
#endif
#define MHI_LOG(fmt, ...) do { \
if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_INFO) \
pr_info("[I][mhi%d][%s] " fmt, mhi_cntrl->cntrl_idx, __func__, ##__VA_ARGS__);\
else if (!mhi_cntrl->klog_slient) \
printk(KERN_DEBUG "[I][mhi%d][%s] " fmt, mhi_cntrl->cntrl_idx, __func__, ##__VA_ARGS__);\
} while (0)
#define MHI_ERR(fmt, ...) do { \
if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_ERROR) \
pr_err("[E][mhi%d][%s] " fmt, mhi_cntrl->cntrl_idx, __func__, ##__VA_ARGS__); \
} while (0)
#define MHI_CRITICAL(fmt, ...) do { \
if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_CRITICAL) \
pr_alert("[C][mhi%d][%s] " fmt, mhi_cntrl->cntrl_idx, __func__, ##__VA_ARGS__); \
} while (0)
int mhi_register_mhi_controller(struct mhi_controller *mhi_cntrl);
void mhi_unregister_mhi_controller(struct mhi_controller *mhi_cntrl);
#ifndef MHI_NAME_SIZE
#define MHI_NAME_SIZE 32
/**
* * struct mhi_device_id - MHI device identification
* * @chan: MHI channel name
* * @driver_data: driver data;
* */
struct mhi_device_id {
const char chan[MHI_NAME_SIZE];
unsigned long driver_data;
};
#endif
#endif /* _MHI_H_ */

View File

@ -0,0 +1,846 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. */
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
#include <linux/firmware.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/of.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
#include "mhi.h"
#include "mhi_internal.h"
/* Software defines */
/* BHI Version */
#define BHI_MAJOR_VERSION 0x1
#define BHI_MINOR_VERSION 0x1
#define MSMHWID_NUMDWORDS 6 /* Number of dwords that make the MSMHWID */
#define OEMPKHASH_NUMDWORDS 48 /* Number of dwords that make the OEM PK HASH */
#define IsPBLExecEnv(ExecEnv) ((ExecEnv == MHI_EE_PBL) || (ExecEnv == MHI_EE_EDL) )
typedef u32 ULONG;
typedef struct _bhi_info_type
{
ULONG bhi_ver_minor;
ULONG bhi_ver_major;
ULONG bhi_image_address_low;
ULONG bhi_image_address_high;
ULONG bhi_image_size;
ULONG bhi_rsvd1;
ULONG bhi_imgtxdb;
ULONG bhi_rsvd2;
ULONG bhi_msivec;
ULONG bhi_rsvd3;
ULONG bhi_ee;
ULONG bhi_status;
ULONG bhi_errorcode;
ULONG bhi_errdbg1;
ULONG bhi_errdbg2;
ULONG bhi_errdbg3;
ULONG bhi_sernum;
ULONG bhi_sblantirollbackver;
ULONG bhi_numsegs;
ULONG bhi_msmhwid[6];
ULONG bhi_oempkhash[48];
ULONG bhi_rsvd5;
}BHI_INFO_TYPE, *PBHI_INFO_TYPE;
static void PrintBhiInfo(struct mhi_controller *mhi_cntrl, BHI_INFO_TYPE *bhi_info)
{
ULONG index;
char str[128];
MHI_LOG("BHI Device Info...\n");
MHI_LOG("BHI Version = { Major = 0x%X Minor = 0x%X}\n", bhi_info->bhi_ver_major, bhi_info->bhi_ver_minor);
MHI_LOG("BHI Execution Environment = 0x%X\n", bhi_info->bhi_ee);
MHI_LOG("BHI Status = 0x%X\n", bhi_info->bhi_status);
MHI_LOG("BHI Error code = 0x%X { Dbg1 = 0x%X Dbg2 = 0x%X Dbg3 = 0x%X }\n", bhi_info->bhi_errorcode, bhi_info->bhi_errdbg1, bhi_info->bhi_errdbg2, bhi_info->bhi_errdbg3);
MHI_LOG("BHI Serial Number = 0x%X\n", bhi_info->bhi_sernum);
MHI_LOG("BHI SBL Anti-Rollback Ver = 0x%X\n", bhi_info->bhi_sblantirollbackver);
MHI_LOG("BHI Number of Segments = 0x%X\n", bhi_info->bhi_numsegs);
for (index = 0; index < 6; index++)
{
snprintf(str+3*index, sizeof(str)-3*index, "%02x ", bhi_info->bhi_msmhwid[index]);
}
MHI_LOG("BHI MSM HW-Id = %s\n", str);
for (index = 0; index < 24; index++)
{
snprintf(str+3*index, sizeof(str)-3*index, "%02x ", bhi_info->bhi_oempkhash[index]);
}
MHI_LOG("BHI OEM PK Hash = %s\n", str);
}
static u32 bhi_read_reg(struct mhi_controller *mhi_cntrl, u32 offset)
{
u32 out = 0;
int ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_EXECENV, &out);
return (ret) ? 0 : out;
}
static int BhiRead(struct mhi_controller *mhi_cntrl, BHI_INFO_TYPE *bhi_info)
{
ULONG index;
memset(bhi_info, 0x00, sizeof(BHI_INFO_TYPE));
/* bhi_ver */
bhi_info->bhi_ver_minor = bhi_read_reg(mhi_cntrl, BHI_BHIVERSION_MINOR);
bhi_info->bhi_ver_major = bhi_read_reg(mhi_cntrl, BHI_BHIVERSION_MINOR);
bhi_info->bhi_image_address_low = bhi_read_reg(mhi_cntrl, BHI_IMGADDR_LOW);
bhi_info->bhi_image_address_high = bhi_read_reg(mhi_cntrl, BHI_IMGADDR_HIGH);
bhi_info->bhi_image_size = bhi_read_reg(mhi_cntrl, BHI_IMGSIZE);
bhi_info->bhi_rsvd1 = bhi_read_reg(mhi_cntrl, BHI_RSVD1);
bhi_info->bhi_imgtxdb = bhi_read_reg(mhi_cntrl, BHI_IMGTXDB);
bhi_info->bhi_rsvd2 = bhi_read_reg(mhi_cntrl, BHI_RSVD2);
bhi_info->bhi_msivec = bhi_read_reg(mhi_cntrl, BHI_INTVEC);
bhi_info->bhi_rsvd3 = bhi_read_reg(mhi_cntrl, BHI_RSVD3);
bhi_info->bhi_ee = bhi_read_reg(mhi_cntrl, BHI_EXECENV);
bhi_info->bhi_status = bhi_read_reg(mhi_cntrl, BHI_STATUS);
bhi_info->bhi_errorcode = bhi_read_reg(mhi_cntrl, BHI_ERRCODE);
bhi_info->bhi_errdbg1 = bhi_read_reg(mhi_cntrl, BHI_ERRDBG1);
bhi_info->bhi_errdbg2 = bhi_read_reg(mhi_cntrl, BHI_ERRDBG2);
bhi_info->bhi_errdbg3 = bhi_read_reg(mhi_cntrl, BHI_ERRDBG3);
bhi_info->bhi_sernum = bhi_read_reg(mhi_cntrl, BHI_SERIALNU);
bhi_info->bhi_sblantirollbackver = bhi_read_reg(mhi_cntrl, BHI_SBLANTIROLLVER);
bhi_info->bhi_numsegs = bhi_read_reg(mhi_cntrl, BHI_NUMSEG);
for (index = 0; index < MSMHWID_NUMDWORDS; index++)
{
bhi_info->bhi_msmhwid[index] = bhi_read_reg(mhi_cntrl, BHI_MSMHWID(index));
}
for (index = 0; index < OEMPKHASH_NUMDWORDS; index++)
{
bhi_info->bhi_oempkhash[index] = bhi_read_reg(mhi_cntrl, BHI_OEMPKHASH(index));
}
bhi_info->bhi_rsvd5 = bhi_read_reg(mhi_cntrl, BHI_RSVD5);
PrintBhiInfo(mhi_cntrl, bhi_info);
/* Check the Execution Environment */
if (!IsPBLExecEnv(bhi_info->bhi_ee))
{
MHI_LOG("E - EE: 0x%X Expected PBL/EDL\n", bhi_info->bhi_ee);
}
/* Return the number of bytes read */
return 0;
}
/* setup rddm vector table for rddm transfer */
static void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
struct image_info *img_info)
{
struct mhi_buf *mhi_buf = img_info->mhi_buf;
struct bhi_vec_entry *bhi_vec = img_info->bhi_vec;
int i = 0;
for (i = 0; i < img_info->entries - 1; i++, mhi_buf++, bhi_vec++) {
MHI_VERB("Setting vector:%pad size:%zu\n",
&mhi_buf->dma_addr, mhi_buf->len);
bhi_vec->dma_addr = mhi_buf->dma_addr;
bhi_vec->size = mhi_buf->len;
}
}
/* collect rddm during kernel panic */
static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl)
{
int ret;
struct mhi_buf *mhi_buf;
u32 sequence_id;
u32 rx_status;
enum mhi_ee ee;
struct image_info *rddm_image = mhi_cntrl->rddm_image;
const u32 delayus = 2000;
u32 retry = (mhi_cntrl->timeout_ms * 1000) / delayus;
const u32 rddm_timeout_us = 200000;
int rddm_retry = rddm_timeout_us / delayus; /* time to enter rddm */
void __iomem *base = mhi_cntrl->bhie;
MHI_LOG("Entered with pm_state:%s dev_state:%s ee:%s\n",
to_mhi_pm_state_str(mhi_cntrl->pm_state),
TO_MHI_STATE_STR(mhi_cntrl->dev_state),
TO_MHI_EXEC_STR(mhi_cntrl->ee));
/*
* This should only be executing during a kernel panic, we expect all
* other cores to shutdown while we're collecting rddm buffer. After
* returning from this function, we expect device to reset.
*
* Normaly, we would read/write pm_state only after grabbing
* pm_lock, since we're in a panic, skipping it. Also there is no
* gurantee this state change would take effect since
* we're setting it w/o grabbing pmlock, it's best effort
*/
mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
/* update should take the effect immediately */
smp_wmb();
/* setup the RX vector table */
mhi_rddm_prepare(mhi_cntrl, rddm_image);
mhi_buf = &rddm_image->mhi_buf[rddm_image->entries - 1];
MHI_LOG("Starting BHIe programming for RDDM\n");
mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_HIGH_OFFS,
upper_32_bits(mhi_buf->dma_addr));
mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_LOW_OFFS,
lower_32_bits(mhi_buf->dma_addr));
mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len);
sequence_id = prandom_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK;
if (unlikely(!sequence_id))
sequence_id = 1;
mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS,
BHIE_RXVECDB_SEQNUM_BMSK, BHIE_RXVECDB_SEQNUM_SHFT,
sequence_id);
MHI_LOG("Trigger device into RDDM mode\n");
mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR);
MHI_LOG("Waiting for device to enter RDDM\n");
while (rddm_retry--) {
ee = mhi_get_exec_env(mhi_cntrl);
if (ee == MHI_EE_RDDM)
break;
udelay(delayus);
}
if (rddm_retry <= 0) {
/* This is a hardware reset, will force device to enter rddm */
MHI_LOG(
"Did not enter RDDM triggering host req. reset to force rddm\n");
mhi_write_reg(mhi_cntrl, mhi_cntrl->regs,
MHI_SOC_RESET_REQ_OFFSET, MHI_SOC_RESET_REQ);
udelay(delayus);
}
ee = mhi_get_exec_env(mhi_cntrl);
MHI_LOG("Waiting for image download completion, current EE:%s\n",
TO_MHI_EXEC_STR(ee));
while (retry--) {
ret = mhi_read_reg_field(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS,
BHIE_RXVECSTATUS_STATUS_BMSK,
BHIE_RXVECSTATUS_STATUS_SHFT,
&rx_status);
if (ret)
return -EIO;
if (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL) {
MHI_LOG("RDDM successfully collected\n");
return 0;
}
udelay(delayus);
}
ee = mhi_get_exec_env(mhi_cntrl);
ret = mhi_read_reg(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS, &rx_status);
MHI_ERR("Did not complete RDDM transfer\n");
MHI_ERR("Current EE:%s\n", TO_MHI_EXEC_STR(ee));
MHI_ERR("RXVEC_STATUS:0x%x, ret:%d\n", rx_status, ret);
return -EIO;
}
/* download ramdump image from device */
int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic)
{
void __iomem *base = mhi_cntrl->bhie;
rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
struct image_info *rddm_image = mhi_cntrl->rddm_image;
struct mhi_buf *mhi_buf;
int ret;
u32 rx_status;
u32 sequence_id;
if (!rddm_image)
return -ENOMEM;
if (in_panic)
return __mhi_download_rddm_in_panic(mhi_cntrl);
MHI_LOG("Waiting for device to enter RDDM state from EE:%s\n",
TO_MHI_EXEC_STR(mhi_cntrl->ee));
ret = wait_event_timeout(mhi_cntrl->state_event,
mhi_cntrl->ee == MHI_EE_RDDM ||
MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
msecs_to_jiffies(mhi_cntrl->timeout_ms));
if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
MHI_ERR("MHI is not in valid state, pm_state:%s ee:%s\n",
to_mhi_pm_state_str(mhi_cntrl->pm_state),
TO_MHI_EXEC_STR(mhi_cntrl->ee));
return -EIO;
}
mhi_rddm_prepare(mhi_cntrl, mhi_cntrl->rddm_image);
/* vector table is the last entry */
mhi_buf = &rddm_image->mhi_buf[rddm_image->entries - 1];
read_lock_bh(pm_lock);
if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
read_unlock_bh(pm_lock);
return -EIO;
}
MHI_LOG("Starting BHIe Programming for RDDM\n");
mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_HIGH_OFFS,
upper_32_bits(mhi_buf->dma_addr));
mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_LOW_OFFS,
lower_32_bits(mhi_buf->dma_addr));
mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len);
sequence_id = prandom_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK;
mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS,
BHIE_RXVECDB_SEQNUM_BMSK, BHIE_RXVECDB_SEQNUM_SHFT,
sequence_id);
read_unlock_bh(pm_lock);
MHI_LOG("Upper:0x%x Lower:0x%x len:0x%zx sequence:%u\n",
upper_32_bits(mhi_buf->dma_addr),
lower_32_bits(mhi_buf->dma_addr),
mhi_buf->len, sequence_id);
MHI_LOG("Waiting for image download completion\n");
/* waiting for image download completion */
wait_event_timeout(mhi_cntrl->state_event,
MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
mhi_read_reg_field(mhi_cntrl, base,
BHIE_RXVECSTATUS_OFFS,
BHIE_RXVECSTATUS_STATUS_BMSK,
BHIE_RXVECSTATUS_STATUS_SHFT,
&rx_status) || rx_status,
msecs_to_jiffies(mhi_cntrl->timeout_ms));
if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
return -EIO;
return (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL) ? 0 : -EIO;
}
EXPORT_SYMBOL(mhi_download_rddm_img);
static int mhi_fw_load_amss(struct mhi_controller *mhi_cntrl,
const struct mhi_buf *mhi_buf)
{
void __iomem *base = mhi_cntrl->bhie;
rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
u32 tx_status;
read_lock_bh(pm_lock);
if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
read_unlock_bh(pm_lock);
return -EIO;
}
MHI_LOG("Starting BHIe Programming\n");
mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_HIGH_OFFS,
upper_32_bits(mhi_buf->dma_addr));
mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_LOW_OFFS,
lower_32_bits(mhi_buf->dma_addr));
mhi_write_reg(mhi_cntrl, base, BHIE_TXVECSIZE_OFFS, mhi_buf->len);
mhi_cntrl->sequence_id = prandom_u32() & BHIE_TXVECSTATUS_SEQNUM_BMSK;
mhi_write_reg_field(mhi_cntrl, base, BHIE_TXVECDB_OFFS,
BHIE_TXVECDB_SEQNUM_BMSK, BHIE_TXVECDB_SEQNUM_SHFT,
mhi_cntrl->sequence_id);
read_unlock_bh(pm_lock);
MHI_LOG("Upper:0x%x Lower:0x%x len:0x%zx sequence:%u\n",
upper_32_bits(mhi_buf->dma_addr),
lower_32_bits(mhi_buf->dma_addr),
mhi_buf->len, mhi_cntrl->sequence_id);
MHI_LOG("Waiting for image transfer completion\n");
/* waiting for image download completion */
wait_event_timeout(mhi_cntrl->state_event,
MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
mhi_read_reg_field(mhi_cntrl, base,
BHIE_TXVECSTATUS_OFFS,
BHIE_TXVECSTATUS_STATUS_BMSK,
BHIE_TXVECSTATUS_STATUS_SHFT,
&tx_status) || tx_status,
msecs_to_jiffies(mhi_cntrl->timeout_ms));
if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
return -EIO;
return (tx_status == BHIE_TXVECSTATUS_STATUS_XFER_COMPL) ? 0 : -EIO;
}
static int mhi_fw_load_sbl(struct mhi_controller *mhi_cntrl,
dma_addr_t dma_addr,
size_t size)
{
u32 tx_status, val;
u32 ImgTxDb = 0x1;
int i, ret;
void __iomem *base = mhi_cntrl->bhi;
rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
struct {
char *name;
u32 offset;
} error_reg[] = {
{ "ERROR_CODE", BHI_ERRCODE },
{ "ERROR_DBG1", BHI_ERRDBG1 },
{ "ERROR_DBG2", BHI_ERRDBG2 },
{ "ERROR_DBG3", BHI_ERRDBG3 },
{ NULL },
};
MHI_LOG("Starting BHI programming\n");
/* program start sbl download via bhi protocol */
read_lock_bh(pm_lock);
if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
read_unlock_bh(pm_lock);
goto invalid_pm_state;
}
mhi_write_reg(mhi_cntrl, base, BHI_STATUS, 0);
mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_HIGH,
upper_32_bits(dma_addr));
mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_LOW,
lower_32_bits(dma_addr));
mhi_write_reg(mhi_cntrl, base, BHI_IMGSIZE, size);
mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICFG, MHICFG_NER_MASK, MHICFG_NER_SHIFT, NUM_MHI_EVT_RINGS);
mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICFG, MHICFG_NHWER_MASK, MHICFG_NHWER_SHIFT, NUM_MHI_HW_EVT_RINGS);
mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, mhi_cntrl->msi_irq_base);
mhi_write_reg(mhi_cntrl, base, BHI_IMGTXDB, ImgTxDb);
read_unlock_bh(pm_lock);
MHI_LOG("Waiting for image transfer completion\n");
/* waiting for image download completion */
ret = wait_event_timeout(mhi_cntrl->state_event,
MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
mhi_read_reg_field(mhi_cntrl, base, BHI_STATUS,
BHI_STATUS_MASK, BHI_STATUS_SHIFT,
&tx_status) || tx_status,
msecs_to_jiffies(mhi_cntrl->timeout_ms));
if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
goto invalid_pm_state;
if (tx_status == BHI_STATUS_ERROR) {
MHI_ERR("Image transfer failed\n");
read_lock_bh(pm_lock);
if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
for (i = 0; error_reg[i].name; i++) {
ret = mhi_read_reg(mhi_cntrl, base,
error_reg[i].offset, &val);
if (ret)
break;
MHI_ERR("reg:%s value:0x%x\n",
error_reg[i].name, val);
}
}
read_unlock_bh(pm_lock);
goto invalid_pm_state;
}
return (tx_status == BHI_STATUS_SUCCESS) ? 0 : -ETIMEDOUT;
invalid_pm_state:
return -EIO;
}
void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl,
struct image_info *image_info)
{
int i;
struct mhi_buf *mhi_buf = image_info->mhi_buf;
for (i = 0; i < image_info->entries; i++, mhi_buf++)
mhi_free_coherent(mhi_cntrl, mhi_buf->len, mhi_buf->buf,
mhi_buf->dma_addr);
kfree(image_info->mhi_buf);
kfree(image_info);
}
int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl,
struct image_info **image_info,
size_t alloc_size)
{
size_t seg_size = mhi_cntrl->seg_len;
/* requier additional entry for vec table */
int segments = DIV_ROUND_UP(alloc_size, seg_size) + 1;
int i;
struct image_info *img_info;
struct mhi_buf *mhi_buf;
MHI_LOG("Allocating bytes:%zu seg_size:%zu total_seg:%u\n",
alloc_size, seg_size, segments);
img_info = kzalloc(sizeof(*img_info), GFP_KERNEL);
if (!img_info)
return -ENOMEM;
/* allocate memory for entries */
img_info->mhi_buf = kcalloc(segments, sizeof(*img_info->mhi_buf),
GFP_KERNEL);
if (!img_info->mhi_buf)
goto error_alloc_mhi_buf;
/* allocate and populate vector table */
mhi_buf = img_info->mhi_buf;
for (i = 0; i < segments; i++, mhi_buf++) {
size_t vec_size = seg_size;
/* last entry is for vector table */
if (i == segments - 1)
vec_size = sizeof(struct bhi_vec_entry) * i;
mhi_buf->len = vec_size;
mhi_buf->buf = mhi_alloc_coherent(mhi_cntrl, vec_size,
&mhi_buf->dma_addr, GFP_KERNEL);
if (!mhi_buf->buf)
goto error_alloc_segment;
MHI_LOG("Entry:%d Address:0x%llx size:%zu\n", i,
(unsigned long long)mhi_buf->dma_addr,
mhi_buf->len);
}
img_info->bhi_vec = img_info->mhi_buf[segments - 1].buf;
img_info->entries = segments;
*image_info = img_info;
MHI_LOG("Successfully allocated bhi vec table\n");
return 0;
error_alloc_segment:
for (--i, --mhi_buf; i >= 0; i--, mhi_buf--)
mhi_free_coherent(mhi_cntrl, mhi_buf->len, mhi_buf->buf,
mhi_buf->dma_addr);
error_alloc_mhi_buf:
kfree(img_info);
return -ENOMEM;
}
static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl,
const struct firmware *firmware,
struct image_info *img_info)
{
size_t remainder = firmware->size;
size_t to_cpy;
const u8 *buf = firmware->data;
int i = 0;
struct mhi_buf *mhi_buf = img_info->mhi_buf;
struct bhi_vec_entry *bhi_vec = img_info->bhi_vec;
while (remainder) {
MHI_ASSERT(i >= img_info->entries, "malformed vector table");
to_cpy = min(remainder, mhi_buf->len);
memcpy(mhi_buf->buf, buf, to_cpy);
bhi_vec->dma_addr = mhi_buf->dma_addr;
bhi_vec->size = to_cpy;
MHI_VERB("Setting Vector:0x%llx size: %llu\n",
bhi_vec->dma_addr, bhi_vec->size);
buf += to_cpy;
remainder -= to_cpy;
i++;
bhi_vec++;
mhi_buf++;
}
}
void mhi_fw_load_worker(struct work_struct *work)
{
int ret;
struct mhi_controller *mhi_cntrl;
const char *fw_name;
const struct firmware *firmware;
struct image_info *image_info;
void *buf;
dma_addr_t dma_addr;
size_t size;
mhi_cntrl = container_of(work, struct mhi_controller, fw_worker);
MHI_LOG("Waiting for device to enter PBL from EE:%s\n",
TO_MHI_EXEC_STR(mhi_cntrl->ee));
ret = wait_event_timeout(mhi_cntrl->state_event,
MHI_IN_PBL(mhi_cntrl->ee) ||
MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
msecs_to_jiffies(mhi_cntrl->timeout_ms));
if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
MHI_ERR("MHI is not in valid state\n");
return;
}
MHI_LOG("Device current EE:%s\n", TO_MHI_EXEC_STR(mhi_cntrl->ee));
/* if device in pthru, we do not have to load firmware */
if (mhi_cntrl->ee == MHI_EE_PTHRU)
return;
fw_name = (mhi_cntrl->ee == MHI_EE_EDL) ?
mhi_cntrl->edl_image : mhi_cntrl->fw_image;
if (!fw_name || (mhi_cntrl->fbc_download && (!mhi_cntrl->sbl_size ||
!mhi_cntrl->seg_len))) {
MHI_ERR("No firmware image defined or !sbl_size || !seg_len\n");
return;
}
ret = request_firmware(&firmware, fw_name, mhi_cntrl->dev);
if (ret) {
MHI_ERR("Error loading firmware, ret:%d\n", ret);
return;
}
size = (mhi_cntrl->fbc_download) ? mhi_cntrl->sbl_size : firmware->size;
/* the sbl size provided is maximum size, not necessarily image size */
if (size > firmware->size)
size = firmware->size;
buf = mhi_alloc_coherent(mhi_cntrl, size, &dma_addr, GFP_KERNEL);
if (!buf) {
MHI_ERR("Could not allocate memory for image\n");
release_firmware(firmware);
return;
}
/* load sbl image */
memcpy(buf, firmware->data, size);
ret = mhi_fw_load_sbl(mhi_cntrl, dma_addr, size);
mhi_free_coherent(mhi_cntrl, size, buf, dma_addr);
if (!mhi_cntrl->fbc_download || ret || mhi_cntrl->ee == MHI_EE_EDL)
release_firmware(firmware);
/* error or in edl, we're done */
if (ret || mhi_cntrl->ee == MHI_EE_EDL)
return;
write_lock_irq(&mhi_cntrl->pm_lock);
mhi_cntrl->dev_state = MHI_STATE_RESET;
write_unlock_irq(&mhi_cntrl->pm_lock);
/*
* if we're doing fbc, populate vector tables while
* device transitioning into MHI READY state
*/
if (mhi_cntrl->fbc_download) {
ret = mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->fbc_image,
firmware->size);
if (ret) {
MHI_ERR("Error alloc size of %zu\n", firmware->size);
goto error_alloc_fw_table;
}
MHI_LOG("Copying firmware image into vector table\n");
/* load the firmware into BHIE vec table */
mhi_firmware_copy(mhi_cntrl, firmware, mhi_cntrl->fbc_image);
}
/* transitioning into MHI RESET->READY state */
ret = mhi_ready_state_transition(mhi_cntrl);
MHI_LOG("To Reset->Ready PM_STATE:%s MHI_STATE:%s EE:%s, ret:%d\n",
to_mhi_pm_state_str(mhi_cntrl->pm_state),
TO_MHI_STATE_STR(mhi_cntrl->dev_state),
TO_MHI_EXEC_STR(mhi_cntrl->ee), ret);
if (!mhi_cntrl->fbc_download)
return;
if (ret) {
MHI_ERR("Did not transition to READY state\n");
goto error_read;
}
/* wait for SBL event */
ret = wait_event_timeout(mhi_cntrl->state_event,
mhi_cntrl->ee == MHI_EE_SBL ||
MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
msecs_to_jiffies(mhi_cntrl->timeout_ms));
if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
MHI_ERR("MHI did not enter BHIE\n");
goto error_read;
}
/* start full firmware image download */
image_info = mhi_cntrl->fbc_image;
ret = mhi_fw_load_amss(mhi_cntrl,
/* last entry is vec table */
&image_info->mhi_buf[image_info->entries - 1]);
MHI_LOG("amss fw_load, ret:%d\n", ret);
release_firmware(firmware);
return;
error_read:
mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
mhi_cntrl->fbc_image = NULL;
error_alloc_fw_table:
release_firmware(firmware);
}
int BhiWrite(struct mhi_controller *mhi_cntrl, void __user *ubuf, size_t size)
{
int ret;
dma_addr_t dma_addr;
void *dma_buf;
MHI_LOG("Device current EE:%s, M:%s, PM:%s\n",
TO_MHI_EXEC_STR(mhi_get_exec_env(mhi_cntrl)),
TO_MHI_STATE_STR(mhi_get_mhi_state(mhi_cntrl)),
to_mhi_pm_state_str(mhi_cntrl->pm_state));
#if 0
if (mhi_get_exec_env(mhi_cntrl) == MHI_EE_EDL && mhi_cntrl->ee != MHI_EE_EDL) {
mhi_cntrl->ee = MHI_EE_EDL;
wait_event_timeout(mhi_cntrl->state_event,
MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
msecs_to_jiffies(mhi_cntrl->timeout_ms + 500));
}
#endif
#if 0
if (!MHI_IN_PBL(mhi_cntrl->ee) || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
MHI_ERR("MHI is not in valid BHI state\n");
return -EINVAL;
}
#endif
if (mhi_cntrl->ee != MHI_EE_EDL) {
MHI_ERR("MHI is not in EDL state\n");
return -EINVAL;
}
dma_buf = mhi_alloc_coherent(mhi_cntrl, size, &dma_addr, GFP_KERNEL);
if (!dma_buf) {
MHI_ERR("Could not allocate memory for image\n");
return -ENOMEM;
}
ret = copy_from_user(dma_buf, ubuf, size);
if (ret) {
MHI_ERR("IOCTL_BHI_WRITEIMAGE copy buf error, ret = %d\n", ret);
mhi_free_coherent(mhi_cntrl, size, dma_buf, dma_addr);;
return ret;
}
ret = mhi_fw_load_sbl(mhi_cntrl, dma_addr, size);
mhi_free_coherent(mhi_cntrl, size, dma_buf, dma_addr);
if (ret) {
MHI_ERR("ret = %d, ee=%d\n", ret, mhi_cntrl->ee);
goto error_state;
}
write_lock_irq(&mhi_cntrl->pm_lock);
mhi_cntrl->dev_state = MHI_STATE_RESET;
write_unlock_irq(&mhi_cntrl->pm_lock);
/* transitioning into MHI RESET->READY state */
ret = mhi_ready_state_transition(mhi_cntrl);
if (ret) {
MHI_ERR("Did not transition to READY state\n");
goto error_state;
}
MHI_LOG("To Reset->Ready PM_STATE:%s MHI_STATE:%s EE:%s, ret:%d\n",
to_mhi_pm_state_str(mhi_cntrl->pm_state),
TO_MHI_STATE_STR(mhi_cntrl->dev_state),
TO_MHI_EXEC_STR(mhi_cntrl->ee), ret);
/* wait for BHIE event */
ret = wait_event_timeout(mhi_cntrl->state_event,
mhi_cntrl->ee == MHI_EE_FP ||
MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
msecs_to_jiffies(mhi_cntrl->timeout_ms));
if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
MHI_ERR("MHI did not enter Flash Programmer Environment\n");
goto error_state;
}
MHI_LOG("MHI enter Flash Programmer Environment\n");
return 0;
error_state:
MHI_LOG("Device current EE:%s, M:%s\n",
TO_MHI_EXEC_STR(mhi_get_exec_env(mhi_cntrl)),
TO_MHI_STATE_STR(mhi_get_mhi_state(mhi_cntrl)));
return ret;
}
long bhi_get_dev_info(struct mhi_controller *mhi_cntrl, void __user *ubuf)
{
long ret = -EINVAL;
BHI_INFO_TYPE bhi_info;
ret = BhiRead(mhi_cntrl, &bhi_info);
if (ret) {
MHI_ERR("IOCTL_BHI_GETDEVINFO BhiRead error, ret = %ld\n", ret);
return ret;
}
ret = copy_to_user(ubuf, &bhi_info, sizeof(bhi_info));
if (ret) {
MHI_ERR("IOCTL_BHI_GETDEVINFO copy error, ret = %ld\n", ret);
}
return ret;
}
long bhi_write_image(struct mhi_controller *mhi_cntrl, void __user *ubuf)
{
long ret = -EINVAL;
size_t size;
ret = copy_from_user(&size, ubuf, sizeof(size));
if (ret) {
MHI_ERR("IOCTL_BHI_WRITEIMAGE copy size error, ret = %ld\n", ret);
return ret;
}
ret = BhiWrite(mhi_cntrl, ubuf+sizeof(size), size);
if (ret) {
MHI_ERR("IOCTL_BHI_WRITEIMAGE BhiWrite error, ret = %ld\n", ret);
}
return ret;
}

View File

@ -0,0 +1,274 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.*/
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/of.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/termios.h>
#include <linux/wait.h>
#include "mhi.h"
#include "mhi_internal.h"
struct __packed dtr_ctrl_msg {
u32 preamble;
u32 msg_id;
u32 dest_id;
u32 size;
u32 msg;
};
#define CTRL_MAGIC (0x4C525443)
#define CTRL_MSG_DTR BIT(0)
#define CTRL_MSG_RTS BIT(1)
#define CTRL_MSG_DCD BIT(0)
#define CTRL_MSG_DSR BIT(1)
#define CTRL_MSG_RI BIT(3)
#define CTRL_HOST_STATE (0x10)
#define CTRL_DEVICE_STATE (0x11)
#define CTRL_GET_CHID(dtr) (dtr->dest_id & 0xFF)
static int mhi_dtr_tiocmset(struct mhi_controller *mhi_cntrl,
struct mhi_device *mhi_dev,
u32 tiocm)
{
struct dtr_ctrl_msg *dtr_msg = NULL;
struct mhi_chan *dtr_chan = mhi_cntrl->dtr_dev->ul_chan;
spinlock_t *res_lock = &mhi_dev->dev.devres_lock;
u32 cur_tiocm;
int ret = 0;
cur_tiocm = mhi_dev->tiocm & ~(TIOCM_CD | TIOCM_DSR | TIOCM_RI);
tiocm &= (TIOCM_DTR | TIOCM_RTS);
/* state did not changed */
if (cur_tiocm == tiocm)
return 0;
mutex_lock(&dtr_chan->mutex);
dtr_msg = kzalloc(sizeof(*dtr_msg), GFP_KERNEL);
if (!dtr_msg) {
ret = -ENOMEM;
goto tiocm_exit;
}
dtr_msg->preamble = CTRL_MAGIC;
dtr_msg->msg_id = CTRL_HOST_STATE;
dtr_msg->dest_id = mhi_dev->ul_chan_id;
dtr_msg->size = sizeof(u32);
if (tiocm & TIOCM_DTR)
dtr_msg->msg |= CTRL_MSG_DTR;
if (tiocm & TIOCM_RTS)
dtr_msg->msg |= CTRL_MSG_RTS;
/*
* 'minicom -D /dev/mhi_DUN' will send RTS:1 when open, and RTS:0 when exit.
* RTS:0 will prevent modem output AT response.
* But 'busybox microcom' do not send any RTS to modem.
* [75094.969783] mhi_uci_q 0306_00.03.00_DUN: mhi_dtr_tiocmset DTR:0 RTS:1
* [75100.210994] mhi_uci_q 0306_00.03.00_DUN: mhi_dtr_tiocmset DTR:0 RTS:0
*/
dev_dbg(&mhi_dev->dev, "%s DTR:%d RTS:%d\n", __func__,
!!(tiocm & TIOCM_DTR), !!(tiocm & TIOCM_RTS));
reinit_completion(&dtr_chan->completion);
ret = mhi_queue_transfer(mhi_cntrl->dtr_dev, DMA_TO_DEVICE, dtr_msg,
sizeof(*dtr_msg), MHI_EOT);
if (ret)
goto tiocm_exit;
ret = wait_for_completion_timeout(&dtr_chan->completion,
msecs_to_jiffies(mhi_cntrl->timeout_ms));
if (!ret) {
MHI_ERR("Failed to receive transfer callback\n");
ret = -EIO;
goto tiocm_exit;
}
ret = 0;
spin_lock_irq(res_lock);
mhi_dev->tiocm &= ~(TIOCM_DTR | TIOCM_RTS);
mhi_dev->tiocm |= tiocm;
spin_unlock_irq(res_lock);
tiocm_exit:
kfree(dtr_msg);
mutex_unlock(&dtr_chan->mutex);
return ret;
}
long mhi_ioctl(struct mhi_device *mhi_dev, unsigned int cmd, unsigned long arg)
{
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
int ret;
/* ioctl not supported by this controller */
if (!mhi_cntrl->dtr_dev)
return -EIO;
switch (cmd) {
case TIOCMGET:
return mhi_dev->tiocm;
case TIOCMSET:
{
u32 tiocm;
ret = get_user(tiocm, (u32 *)arg);
if (ret)
return ret;
return mhi_dtr_tiocmset(mhi_cntrl, mhi_dev, tiocm);
}
default:
break;
}
return -EINVAL;
}
EXPORT_SYMBOL(mhi_ioctl);
static int mhi_dtr_queue_inbound(struct mhi_controller *mhi_cntrl)
{
struct mhi_device *mhi_dev = mhi_cntrl->dtr_dev;
int nr_trbs = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE);
size_t mtu = mhi_dev->mtu;
void *buf;
int ret = -EIO, i;
for (i = 0; i < nr_trbs; i++) {
buf = kmalloc(mtu, GFP_KERNEL);
if (!buf)
return -ENOMEM;
ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, buf, mtu,
MHI_EOT);
if (ret) {
kfree(buf);
return ret;
}
}
return ret;
}
static void mhi_dtr_dl_xfer_cb(struct mhi_device *mhi_dev,
struct mhi_result *mhi_result)
{
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
struct dtr_ctrl_msg *dtr_msg = mhi_result->buf_addr;
u32 chan;
spinlock_t *res_lock;
if (mhi_result->transaction_status == -ENOTCONN) {
kfree(mhi_result->buf_addr);
return;
}
if (mhi_result->bytes_xferd != sizeof(*dtr_msg)) {
MHI_ERR("Unexpected length %zu received\n",
mhi_result->bytes_xferd);
return;
}
MHI_LOG("preamble:0x%x msg_id:%u dest_id:%u msg:0x%x\n",
dtr_msg->preamble, dtr_msg->msg_id, dtr_msg->dest_id,
dtr_msg->msg);
chan = CTRL_GET_CHID(dtr_msg);
if (chan >= mhi_cntrl->max_chan)
goto auto_queue;
mhi_dev = mhi_cntrl->mhi_chan[chan].mhi_dev;
if (!mhi_dev)
goto auto_queue;
res_lock = &mhi_dev->dev.devres_lock;
spin_lock_irq(res_lock);
mhi_dev->tiocm &= ~(TIOCM_CD | TIOCM_DSR | TIOCM_RI);
if (dtr_msg->msg & CTRL_MSG_DCD)
mhi_dev->tiocm |= TIOCM_CD;
if (dtr_msg->msg & CTRL_MSG_DSR)
mhi_dev->tiocm |= TIOCM_DSR;
if (dtr_msg->msg & CTRL_MSG_RI)
mhi_dev->tiocm |= TIOCM_RI;
spin_unlock_irq(res_lock);
auto_queue:
mhi_queue_transfer(mhi_cntrl->dtr_dev, DMA_FROM_DEVICE, mhi_result->buf_addr,
mhi_cntrl->dtr_dev->mtu, MHI_EOT);
}
static void mhi_dtr_ul_xfer_cb(struct mhi_device *mhi_dev,
struct mhi_result *mhi_result)
{
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
struct mhi_chan *dtr_chan = mhi_cntrl->dtr_dev->ul_chan;
MHI_VERB("Received with status:%d\n", mhi_result->transaction_status);
if (!mhi_result->transaction_status)
complete(&dtr_chan->completion);
}
static void mhi_dtr_remove(struct mhi_device *mhi_dev)
{
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
mhi_cntrl->dtr_dev = NULL;
}
static int mhi_dtr_probe(struct mhi_device *mhi_dev,
const struct mhi_device_id *id)
{
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
int ret;
MHI_LOG("Enter for DTR control channel\n");
mhi_dev->mtu = min_t(size_t, id->driver_data, mhi_dev->mtu);
ret = mhi_prepare_for_transfer(mhi_dev);
if (!ret)
mhi_cntrl->dtr_dev = mhi_dev;
if (!ret)
ret = mhi_dtr_queue_inbound(mhi_cntrl);
MHI_LOG("Exit with ret:%d\n", ret);
return ret;
}
static const struct mhi_device_id mhi_dtr_table[] = {
{ .chan = "IP_CTRL", .driver_data = sizeof(struct dtr_ctrl_msg) },
{},
};
static struct mhi_driver mhi_dtr_driver = {
.id_table = mhi_dtr_table,
.remove = mhi_dtr_remove,
.probe = mhi_dtr_probe,
.ul_xfer_cb = mhi_dtr_ul_xfer_cb,
.dl_xfer_cb = mhi_dtr_dl_xfer_cb,
.driver = {
.name = "MHI_DTR",
.owner = THIS_MODULE,
}
};
int __init mhi_dtr_init(void)
{
return mhi_driver_register(&mhi_dtr_driver);
}
void mhi_dtr_exit(void) {
mhi_driver_unregister(&mhi_dtr_driver);
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,362 @@
#ifndef __SDX20_MHI_H
#define __SDX20_MHI_H
#include <linux/types.h>
/* MHI control data structures alloted by the host, including
* channel context array, event context array, command context and rings */
/* Channel context state */
enum mhi_dev_ch_ctx_state {
MHI_DEV_CH_STATE_DISABLED,
MHI_DEV_CH_STATE_ENABLED,
MHI_DEV_CH_STATE_RUNNING,
MHI_DEV_CH_STATE_SUSPENDED,
MHI_DEV_CH_STATE_STOP,
MHI_DEV_CH_STATE_ERROR,
MHI_DEV_CH_STATE_RESERVED,
MHI_DEV_CH_STATE_32BIT = 0x7FFFFFFF
};
/* Channel type */
enum mhi_dev_ch_ctx_type {
MHI_DEV_CH_TYPE_NONE,
MHI_DEV_CH_TYPE_OUTBOUND_CHANNEL,
MHI_DEV_CH_TYPE_INBOUND_CHANNEL,
MHI_DEV_CH_RESERVED
};
/* Channel context type */
struct mhi_dev_ch_ctx {
enum mhi_dev_ch_ctx_state ch_state;
enum mhi_dev_ch_ctx_type ch_type;
uint32_t err_indx;
uint64_t rbase;
uint64_t rlen;
uint64_t rp;
uint64_t wp;
} __packed;
enum mhi_dev_ring_element_type_id {
MHI_DEV_RING_EL_INVALID = 0,
MHI_DEV_RING_EL_NOOP = 1,
MHI_DEV_RING_EL_TRANSFER = 2,
MHI_DEV_RING_EL_RESET = 16,
MHI_DEV_RING_EL_STOP = 17,
MHI_DEV_RING_EL_START = 18,
MHI_DEV_RING_EL_MHI_STATE_CHG = 32,
MHI_DEV_RING_EL_CMD_COMPLETION_EVT = 33,
MHI_DEV_RING_EL_TRANSFER_COMPLETION_EVENT = 34,
MHI_DEV_RING_EL_EE_STATE_CHANGE_NOTIFY = 64,
MHI_DEV_RING_EL_UNDEF
};
enum mhi_dev_ring_state {
RING_STATE_UINT = 0,
RING_STATE_IDLE,
RING_STATE_PENDING,
};
enum mhi_dev_ring_type {
RING_TYPE_CMD = 0,
RING_TYPE_ER,
RING_TYPE_CH,
RING_TYPE_INVAL
};
/* Event context interrupt moderation */
enum mhi_dev_evt_ctx_int_mod_timer {
MHI_DEV_EVT_INT_MODERATION_DISABLED
};
/* Event ring type */
enum mhi_dev_evt_ctx_event_ring_type {
MHI_DEV_EVT_TYPE_DEFAULT,
MHI_DEV_EVT_TYPE_VALID,
MHI_DEV_EVT_RESERVED
};
/* Event ring context type */
struct mhi_dev_ev_ctx {
uint32_t res1:16;
enum mhi_dev_evt_ctx_int_mod_timer intmodt:16;
enum mhi_dev_evt_ctx_event_ring_type ertype;
uint32_t msivec;
uint64_t rbase;
uint64_t rlen;
uint64_t rp;
uint64_t wp;
} __packed;
/* Command context */
struct mhi_dev_cmd_ctx {
uint32_t res1;
uint32_t res2;
uint32_t res3;
uint64_t rbase;
uint64_t rlen;
uint64_t rp;
uint64_t wp;
} __packed;
/* generic context */
struct mhi_dev_gen_ctx {
uint32_t res1;
uint32_t res2;
uint32_t res3;
uint64_t rbase;
uint64_t rlen;
uint64_t rp;
uint64_t wp;
} __packed;
/* Transfer ring element */
struct mhi_dev_transfer_ring_element {
uint64_t data_buf_ptr;
uint32_t len:16;
uint32_t res1:16;
uint32_t chain:1;
uint32_t res2:7;
uint32_t ieob:1;
uint32_t ieot:1;
uint32_t bei:1;
uint32_t res3:5;
enum mhi_dev_ring_element_type_id type:8;
uint32_t res4:8;
} __packed;
/* Command ring element */
/* Command ring No op command */
struct mhi_dev_cmd_ring_op {
uint64_t res1;
uint32_t res2;
uint32_t res3:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t chid:8;
} __packed;
/* Command ring reset channel command */
struct mhi_dev_cmd_ring_reset_channel_cmd {
uint64_t res1;
uint32_t res2;
uint32_t res3:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t chid:8;
} __packed;
/* Command ring stop channel command */
struct mhi_dev_cmd_ring_stop_channel_cmd {
uint64_t res1;
uint32_t res2;
uint32_t res3:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t chid:8;
} __packed;
/* Command ring start channel command */
struct mhi_dev_cmd_ring_start_channel_cmd {
uint64_t res1;
uint32_t seqnum;
uint32_t reliable:1;
uint32_t res2:15;
enum mhi_dev_ring_element_type_id type:8;
uint32_t chid:8;
} __packed;
enum mhi_dev_cmd_completion_code {
MHI_CMD_COMPL_CODE_INVALID = 0,
MHI_CMD_COMPL_CODE_SUCCESS = 1,
MHI_CMD_COMPL_CODE_EOT = 2,
MHI_CMD_COMPL_CODE_OVERFLOW = 3,
MHI_CMD_COMPL_CODE_EOB = 4,
MHI_CMD_COMPL_CODE_UNDEFINED = 16,
MHI_CMD_COMPL_CODE_RING_EL = 17,
MHI_CMD_COMPL_CODE_RES
};
/* Event ring elements */
/* Transfer completion event */
struct mhi_dev_event_ring_transfer_completion {
uint64_t ptr;
uint32_t len:16;
uint32_t res1:8;
enum mhi_dev_cmd_completion_code code:8;
uint32_t res2:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t chid:8;
} __packed;
/* Command completion event */
struct mhi_dev_event_ring_cmd_completion {
uint64_t ptr;
uint32_t res1:24;
enum mhi_dev_cmd_completion_code code:8;
uint32_t res2:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t res3:8;
} __packed;
enum mhi_dev_state {
MHI_DEV_RESET_STATE = 0,
MHI_DEV_READY_STATE,
MHI_DEV_M0_STATE,
MHI_DEV_M1_STATE,
MHI_DEV_M2_STATE,
MHI_DEV_M3_STATE,
MHI_DEV_MAX_STATE,
MHI_DEV_SYSERR_STATE = 0xff
};
/* MHI state change event */
struct mhi_dev_event_ring_state_change {
uint64_t ptr;
uint32_t res1:24;
enum mhi_dev_state mhistate:8;
uint32_t res2:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t res3:8;
} __packed;
enum mhi_dev_execenv {
MHI_DEV_SBL_EE = 1,
MHI_DEV_AMSS_EE = 2,
MHI_DEV_UNRESERVED
};
/* EE state change event */
struct mhi_dev_event_ring_ee_state_change {
uint64_t ptr;
uint32_t res1:24;
enum mhi_dev_execenv execenv:8;
uint32_t res2:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t res3:8;
} __packed;
/* Generic cmd to parse common details like type and channel id */
struct mhi_dev_ring_generic {
uint64_t ptr;
uint32_t res1:24;
enum mhi_dev_state mhistate:8;
uint32_t res2:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t chid:8;
} __packed;
struct mhi_config {
uint32_t mhi_reg_len;
uint32_t version;
uint32_t event_rings;
uint32_t channels;
uint32_t chdb_offset;
uint32_t erdb_offset;
};
#define NUM_CHANNELS 128
#define HW_CHANNEL_BASE 100
#define HW_CHANNEL_END 107
#define MHI_ENV_VALUE 2
#define MHI_MASK_ROWS_CH_EV_DB 4
#define TRB_MAX_DATA_SIZE 8192
#define MHI_CTRL_STATE 25
#define IPA_DMA_SYNC 1
#define IPA_DMA_ASYNC 0
/*maximum trasnfer completion events buffer*/
#define MAX_TR_EVENTS 50
/*maximum event requests */
#define MHI_MAX_EVT_REQ 50
/* Possible ring element types */
union mhi_dev_ring_element_type {
struct mhi_dev_cmd_ring_op cmd_no_op;
struct mhi_dev_cmd_ring_reset_channel_cmd cmd_reset;
struct mhi_dev_cmd_ring_stop_channel_cmd cmd_stop;
struct mhi_dev_cmd_ring_start_channel_cmd cmd_start;
struct mhi_dev_transfer_ring_element cmd_transfer;
struct mhi_dev_event_ring_transfer_completion evt_tr_comp;
struct mhi_dev_event_ring_cmd_completion evt_cmd_comp;
struct mhi_dev_event_ring_state_change evt_state_change;
struct mhi_dev_event_ring_ee_state_change evt_ee_state;
struct mhi_dev_ring_generic generic;
};
/* Transfer ring element type */
union mhi_dev_ring_ctx {
struct mhi_dev_cmd_ctx cmd;
struct mhi_dev_ev_ctx ev;
struct mhi_dev_ch_ctx ch;
struct mhi_dev_gen_ctx generic;
};
/* MHI host Control and data address region */
struct mhi_host_addr {
uint32_t ctrl_base_lsb;
uint32_t ctrl_base_msb;
uint32_t ctrl_limit_lsb;
uint32_t ctrl_limit_msb;
uint32_t data_base_lsb;
uint32_t data_base_msb;
uint32_t data_limit_lsb;
uint32_t data_limit_msb;
};
/* MHI physical and virtual address region */
struct mhi_meminfo {
struct device *dev;
uintptr_t pa_aligned;
uintptr_t pa_unaligned;
uintptr_t va_aligned;
uintptr_t va_unaligned;
uintptr_t size;
};
struct mhi_addr {
uint64_t host_pa;
uintptr_t device_pa;
uintptr_t device_va;
size_t size;
dma_addr_t phy_addr;
void *virt_addr;
bool use_ipa_dma;
};
struct mhi_interrupt_state {
uint32_t mask;
uint32_t status;
};
enum mhi_dev_channel_state {
MHI_DEV_CH_UNINT,
MHI_DEV_CH_STARTED,
MHI_DEV_CH_PENDING_START,
MHI_DEV_CH_PENDING_STOP,
MHI_DEV_CH_STOPPED,
MHI_DEV_CH_CLOSED,
};
enum mhi_dev_ch_operation {
MHI_DEV_OPEN_CH,
MHI_DEV_CLOSE_CH,
MHI_DEV_READ_CH,
MHI_DEV_READ_WR,
MHI_DEV_POLL,
};
enum mhi_ctrl_info {
MHI_STATE_CONFIGURED = 0,
MHI_STATE_CONNECTED = 1,
MHI_STATE_DISCONNECTED = 2,
MHI_STATE_INVAL,
};
enum mhi_dev_tr_compl_evt_type {
SEND_EVENT_BUFFER,
SEND_EVENT_RD_OFFSET,
};
enum mhi_dev_transfer_type {
MHI_DEV_DMA_SYNC,
MHI_DEV_DMA_ASYNC,
};
#endif /* _SDX20_MHI_H_ */

View File

@ -0,0 +1,426 @@
#ifndef __SDX20_MHI_H
#define __SDX20_MHI_H
#include <linux/types.h>
/* MHI control data structures alloted by the host, including
* channel context array, event context array, command context and rings */
/* Channel context state */
enum mhi_dev_ch_ctx_state {
MHI_DEV_CH_STATE_DISABLED,
MHI_DEV_CH_STATE_ENABLED,
MHI_DEV_CH_STATE_RUNNING,
MHI_DEV_CH_STATE_SUSPENDED,
MHI_DEV_CH_STATE_STOP,
MHI_DEV_CH_STATE_ERROR,
MHI_DEV_CH_STATE_RESERVED,
MHI_DEV_CH_STATE_32BIT = 0x7FFFFFFF
};
/* Channel type */
enum mhi_dev_ch_ctx_type {
MHI_DEV_CH_TYPE_NONE,
MHI_DEV_CH_TYPE_OUTBOUND_CHANNEL,
MHI_DEV_CH_TYPE_INBOUND_CHANNEL,
MHI_DEV_CH_RESERVED
};
/* Channel context type */
struct mhi_dev_ch_ctx {
enum mhi_dev_ch_ctx_state ch_state;
enum mhi_dev_ch_ctx_type ch_type;
uint32_t err_indx;
uint64_t rbase;
uint64_t rlen;
uint64_t rp;
uint64_t wp;
} __packed;
enum mhi_dev_ring_element_type_id {
MHI_DEV_RING_EL_INVALID = 0,
MHI_DEV_RING_EL_NOOP = 1,
MHI_DEV_RING_EL_TRANSFER = 2,
MHI_DEV_RING_EL_RESET = 16,
MHI_DEV_RING_EL_STOP = 17,
MHI_DEV_RING_EL_START = 18,
MHI_DEV_RING_EL_MHI_STATE_CHG = 32,
MHI_DEV_RING_EL_CMD_COMPLETION_EVT = 33,
MHI_DEV_RING_EL_TRANSFER_COMPLETION_EVENT = 34,
MHI_DEV_RING_EL_EE_STATE_CHANGE_NOTIFY = 64,
MHI_DEV_RING_EL_UNDEF
};
enum mhi_dev_ring_state {
RING_STATE_UINT = 0,
RING_STATE_IDLE,
RING_STATE_PENDING,
};
enum mhi_dev_ring_type {
RING_TYPE_CMD = 0,
RING_TYPE_ER,
RING_TYPE_CH,
RING_TYPE_INVAL
};
/* Event context interrupt moderation */
enum mhi_dev_evt_ctx_int_mod_timer {
MHI_DEV_EVT_INT_MODERATION_DISABLED
};
/* Event ring type */
enum mhi_dev_evt_ctx_event_ring_type {
MHI_DEV_EVT_TYPE_DEFAULT,
MHI_DEV_EVT_TYPE_VALID,
MHI_DEV_EVT_RESERVED
};
/* Event ring context type */
struct mhi_dev_ev_ctx {
uint32_t res1:16;
enum mhi_dev_evt_ctx_int_mod_timer intmodt:16;
enum mhi_dev_evt_ctx_event_ring_type ertype;
uint32_t msivec;
uint64_t rbase;
uint64_t rlen;
uint64_t rp;
uint64_t wp;
} __packed;
/* Command context */
struct mhi_dev_cmd_ctx {
uint32_t res1;
uint32_t res2;
uint32_t res3;
uint64_t rbase;
uint64_t rlen;
uint64_t rp;
uint64_t wp;
} __packed;
/* generic context */
struct mhi_dev_gen_ctx {
uint32_t res1;
uint32_t res2;
uint32_t res3;
uint64_t rbase;
uint64_t rlen;
uint64_t rp;
uint64_t wp;
} __packed;
/* Transfer ring element */
struct mhi_dev_transfer_ring_element {
uint64_t data_buf_ptr;
uint32_t len:16;
uint32_t res1:16;
uint32_t chain:1;
uint32_t res2:7;
uint32_t ieob:1;
uint32_t ieot:1;
uint32_t bei:1;
uint32_t res3:5;
enum mhi_dev_ring_element_type_id type:8;
uint32_t res4:8;
} __packed;
/* Command ring element */
/* Command ring No op command */
struct mhi_dev_cmd_ring_op {
uint64_t res1;
uint32_t res2;
uint32_t res3:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t chid:8;
} __packed;
/* Command ring reset channel command */
struct mhi_dev_cmd_ring_reset_channel_cmd {
uint64_t res1;
uint32_t res2;
uint32_t res3:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t chid:8;
} __packed;
/* Command ring stop channel command */
struct mhi_dev_cmd_ring_stop_channel_cmd {
uint64_t res1;
uint32_t res2;
uint32_t res3:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t chid:8;
} __packed;
/* Command ring start channel command */
struct mhi_dev_cmd_ring_start_channel_cmd {
uint64_t res1;
uint32_t seqnum;
uint32_t reliable:1;
uint32_t res2:15;
enum mhi_dev_ring_element_type_id type:8;
uint32_t chid:8;
} __packed;
enum mhi_dev_cmd_completion_code {
MHI_CMD_COMPL_CODE_INVALID = 0,
MHI_CMD_COMPL_CODE_SUCCESS = 1,
MHI_CMD_COMPL_CODE_EOT = 2,
MHI_CMD_COMPL_CODE_OVERFLOW = 3,
MHI_CMD_COMPL_CODE_EOB = 4,
MHI_CMD_COMPL_CODE_UNDEFINED = 16,
MHI_CMD_COMPL_CODE_RING_EL = 17,
MHI_CMD_COMPL_CODE_RES
};
/* Event ring elements */
/* Transfer completion event */
struct mhi_dev_event_ring_transfer_completion {
uint64_t ptr;
uint32_t len:16;
uint32_t res1:8;
enum mhi_dev_cmd_completion_code code:8;
uint32_t res2:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t chid:8;
} __packed;
/* Command completion event */
struct mhi_dev_event_ring_cmd_completion {
uint64_t ptr;
uint32_t res1:24;
enum mhi_dev_cmd_completion_code code:8;
uint32_t res2:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t res3:8;
} __packed;
enum mhi_dev_state {
MHI_DEV_RESET_STATE = 0,
MHI_DEV_READY_STATE,
MHI_DEV_M0_STATE,
MHI_DEV_M1_STATE,
MHI_DEV_M2_STATE,
MHI_DEV_M3_STATE,
MHI_DEV_MAX_STATE,
MHI_DEV_SYSERR_STATE = 0xff
};
/* MHI state change event */
struct mhi_dev_event_ring_state_change {
uint64_t ptr;
uint32_t res1:24;
enum mhi_dev_state mhistate:8;
uint32_t res2:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t res3:8;
} __packed;
enum mhi_dev_execenv {
MHI_DEV_SBL_EE = 1,
MHI_DEV_AMSS_EE = 2,
MHI_DEV_UNRESERVED
};
/* EE state change event */
struct mhi_dev_event_ring_ee_state_change {
uint64_t ptr;
uint32_t res1:24;
enum mhi_dev_execenv execenv:8;
uint32_t res2:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t res3:8;
} __packed;
/* Generic cmd to parse common details like type and channel id */
struct mhi_dev_ring_generic {
uint64_t ptr;
uint32_t res1:24;
enum mhi_dev_state mhistate:8;
uint32_t res2:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t chid:8;
} __packed;
struct mhi_config {
uint32_t mhi_reg_len;
uint32_t version;
uint32_t event_rings;
uint32_t channels;
uint32_t chdb_offset;
uint32_t erdb_offset;
};
#define NUM_CHANNELS 128
#define HW_CHANNEL_BASE 100
#define HW_CHANNEL_END 107
#define MHI_ENV_VALUE 2
#define MHI_MASK_ROWS_CH_EV_DB 4
#define TRB_MAX_DATA_SIZE 8192
#define MHI_CTRL_STATE 25
#define IPA_DMA_SYNC 1
#define IPA_DMA_ASYNC 0
/*maximum trasnfer completion events buffer*/
#define MAX_TR_EVENTS 50
/*maximum event requests */
#define MHI_MAX_EVT_REQ 50
/* Possible ring element types */
union mhi_dev_ring_element_type {
struct mhi_dev_cmd_ring_op cmd_no_op;
struct mhi_dev_cmd_ring_reset_channel_cmd cmd_reset;
struct mhi_dev_cmd_ring_stop_channel_cmd cmd_stop;
struct mhi_dev_cmd_ring_start_channel_cmd cmd_start;
struct mhi_dev_transfer_ring_element tre;
struct mhi_dev_event_ring_transfer_completion evt_tr_comp;
struct mhi_dev_event_ring_cmd_completion evt_cmd_comp;
struct mhi_dev_event_ring_state_change evt_state_change;
struct mhi_dev_event_ring_ee_state_change evt_ee_state;
struct mhi_dev_ring_generic generic;
};
/* Transfer ring element type */
union mhi_dev_ring_ctx {
struct mhi_dev_cmd_ctx cmd;
struct mhi_dev_ev_ctx ev;
struct mhi_dev_ch_ctx ch;
struct mhi_dev_gen_ctx generic;
};
/* MHI host Control and data address region */
struct mhi_host_addr {
uint32_t ctrl_base_lsb;
uint32_t ctrl_base_msb;
uint32_t ctrl_limit_lsb;
uint32_t ctrl_limit_msb;
uint32_t data_base_lsb;
uint32_t data_base_msb;
uint32_t data_limit_lsb;
uint32_t data_limit_msb;
};
/* MHI physical and virtual address region */
struct mhi_meminfo {
struct device *dev;
uintptr_t pa_aligned;
uintptr_t pa_unaligned;
uintptr_t va_aligned;
uintptr_t va_unaligned;
uintptr_t size;
};
struct mhi_addr {
uint64_t host_pa;
uintptr_t device_pa;
uintptr_t device_va;
size_t size;
dma_addr_t phy_addr;
void *virt_addr;
bool use_ipa_dma;
};
struct mhi_interrupt_state {
uint32_t mask;
uint32_t status;
};
enum mhi_dev_channel_state {
MHI_DEV_CH_UNINT,
MHI_DEV_CH_STARTED,
MHI_DEV_CH_PENDING_START,
MHI_DEV_CH_PENDING_STOP,
MHI_DEV_CH_STOPPED,
MHI_DEV_CH_CLOSED,
};
enum mhi_dev_ch_operation {
MHI_DEV_OPEN_CH,
MHI_DEV_CLOSE_CH,
MHI_DEV_READ_CH,
MHI_DEV_READ_WR,
MHI_DEV_POLL,
};
enum mhi_ctrl_info {
MHI_STATE_CONFIGURED = 0,
MHI_STATE_CONNECTED = 1,
MHI_STATE_DISCONNECTED = 2,
MHI_STATE_INVAL,
};
enum mhi_dev_tr_compl_evt_type {
SEND_EVENT_BUFFER,
SEND_EVENT_RD_OFFSET,
};
enum mhi_dev_transfer_type {
MHI_DEV_DMA_SYNC,
MHI_DEV_DMA_ASYNC,
};
#if 0
/* SW channel client list */
enum mhi_client_channel {
MHI_CLIENT_LOOPBACK_OUT = 0,
MHI_CLIENT_LOOPBACK_IN = 1,
MHI_CLIENT_SAHARA_OUT = 2,
MHI_CLIENT_SAHARA_IN = 3,
MHI_CLIENT_DIAG_OUT = 4,
MHI_CLIENT_DIAG_IN = 5,
MHI_CLIENT_SSR_OUT = 6,
MHI_CLIENT_SSR_IN = 7,
MHI_CLIENT_QDSS_OUT = 8,
MHI_CLIENT_QDSS_IN = 9,
MHI_CLIENT_EFS_OUT = 10,
MHI_CLIENT_EFS_IN = 11,
MHI_CLIENT_MBIM_OUT = 12,
MHI_CLIENT_MBIM_IN = 13,
MHI_CLIENT_QMI_OUT = 14,
MHI_CLIENT_QMI_IN = 15,
MHI_CLIENT_IP_CTRL_0_OUT = 16,
MHI_CLIENT_IP_CTRL_0_IN = 17,
MHI_CLIENT_IP_CTRL_1_OUT = 18,
MHI_CLIENT_IP_CTRL_1_IN = 19,
MHI_CLIENT_DCI_OUT = 20,
MHI_CLIENT_DCI_IN = 21,
MHI_CLIENT_IP_CTRL_3_OUT = 22,
MHI_CLIENT_IP_CTRL_3_IN = 23,
MHI_CLIENT_IP_CTRL_4_OUT = 24,
MHI_CLIENT_IP_CTRL_4_IN = 25,
MHI_CLIENT_IP_CTRL_5_OUT = 26,
MHI_CLIENT_IP_CTRL_5_IN = 27,
MHI_CLIENT_IP_CTRL_6_OUT = 28,
MHI_CLIENT_IP_CTRL_6_IN = 29,
MHI_CLIENT_IP_CTRL_7_OUT = 30,
MHI_CLIENT_IP_CTRL_7_IN = 31,
MHI_CLIENT_DUN_OUT = 32,
MHI_CLIENT_DUN_IN = 33,
MHI_CLIENT_IP_SW_0_OUT = 34,
MHI_CLIENT_IP_SW_0_IN = 35,
MHI_CLIENT_IP_SW_1_OUT = 36,
MHI_CLIENT_IP_SW_1_IN = 37,
MHI_CLIENT_IP_SW_2_OUT = 38,
MHI_CLIENT_IP_SW_2_IN = 39,
MHI_CLIENT_IP_SW_3_OUT = 40,
MHI_CLIENT_IP_SW_3_IN = 41,
MHI_CLIENT_CSVT_OUT = 42,
MHI_CLIENT_CSVT_IN = 43,
MHI_CLIENT_SMCT_OUT = 44,
MHI_CLIENT_SMCT_IN = 45,
MHI_CLIENT_IP_SW_4_OUT = 46,
MHI_CLIENT_IP_SW_4_IN = 47,
MHI_MAX_SOFTWARE_CHANNELS = 48,
MHI_CLIENT_TEST_OUT = 60,
MHI_CLIENT_TEST_IN = 61,
MHI_CLIENT_RESERVED_1_LOWER = 62,
MHI_CLIENT_RESERVED_1_UPPER = 99,
MHI_CLIENT_IP_HW_0_OUT = 100,
MHI_CLIENT_IP_HW_0_IN = 101,
MHI_CLIENT_RESERVED_2_LOWER = 102,
MHI_CLIENT_RESERVED_2_UPPER = 127,
MHI_MAX_CHANNELS = 102,
};
#endif
#endif /* _SDX20_MHI_H_ */

View File

@ -0,0 +1,33 @@
menu "MHI device support"
config MHI_NETDEV
tristate "MHI NETDEV"
depends on MHI_BUS
help
MHI based net device driver for transferring IP traffic
between host and modem. By enabling this driver, clients
can transfer data using standard network interface. Over
the air traffic goes thru mhi netdev interface.
config MHI_UCI
tristate "MHI UCI"
depends on MHI_BUS
help
MHI based uci driver is for transferring data between host and
modem using standard file operations from user space. Open, read,
write, ioctl, and close operations are supported by this driver.
Please check mhi_uci_match_table for all supported channels that
are exposed to userspace.
config MHI_SATELLITE
tristate "MHI SATELLITE"
depends on MHI_BUS
help
MHI proxy satellite device driver enables NON-HLOS MHI satellite
drivers to communicate with device over PCIe link without host
involvement. Host facilitates propagation of events from device
to NON-HLOS MHI satellite drivers, channel states, and power
management over IPC communication. It helps in HLOS power
savings.
endmenu

View File

@ -0,0 +1,3 @@
obj-$(CONFIG_MHI_NETDEV) +=mhi_netdev.o
obj-$(CONFIG_MHI_UCI) +=mhi_uci.o
obj-$(CONFIG_MHI_SATELLITE) +=mhi_satellite.o

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,922 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.*/
#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/dma-direction.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/delay.h>
#if 1
static inline void *ipc_log_context_create(int max_num_pages,
const char *modname, uint16_t user_version)
{ return NULL; }
static inline int ipc_log_string(void *ilctxt, const char *fmt, ...)
{ return -EINVAL; }
#endif
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/uaccess.h>
#include <linux/tty.h>
#include "../core/mhi.h"
#define DEVICE_NAME "mhi"
#define MHI_UCI_DRIVER_NAME "mhi_uci_q"
struct uci_chan {
wait_queue_head_t wq;
spinlock_t lock;
struct list_head pending; /* user space waiting to read */
struct uci_buf *cur_buf; /* current buffer user space reading */
size_t rx_size;
};
struct uci_buf {
struct page *page;
void *data;
size_t len;
unsigned nr_trb;
struct list_head node;
};
struct uci_dev {
struct list_head node;
dev_t devt;
struct device *dev;
struct mhi_device *mhi_dev;
const char *chan;
struct mutex mutex; /* sync open and close */
struct mutex r_mutex;
struct mutex w_mutex;
struct uci_chan ul_chan;
struct uci_chan dl_chan;
size_t mtu;
int ref_count;
bool enabled;
unsigned rx_error;
unsigned nr_trb;
unsigned nr_trbs;
struct uci_buf *uci_buf;
struct ktermios termios;
size_t bytes_xferd;
};
struct mhi_uci_drv {
struct list_head head;
struct mutex lock;
struct class *class;
int major;
dev_t dev_t;
};
static int uci_msg_lvl = MHI_MSG_LVL_ERROR;
module_param( uci_msg_lvl, uint, S_IRUGO | S_IWUSR);
#define MSG_VERB(fmt, ...) do { \
if (uci_msg_lvl <= MHI_MSG_LVL_VERBOSE) \
pr_err("[D][%s] " fmt, __func__, ##__VA_ARGS__); \
} while (0)
#define MSG_LOG(fmt, ...) do { \
if (uci_msg_lvl <= MHI_MSG_LVL_INFO) \
pr_err("[I][%s] " fmt, __func__, ##__VA_ARGS__); \
} while (0)
#define MSG_ERR(fmt, ...) do { \
if (uci_msg_lvl <= MHI_MSG_LVL_ERROR) \
pr_err("[E][%s] " fmt, __func__, ##__VA_ARGS__); \
} while (0)
#define MAX_UCI_DEVICES (64)
#define QUEC_MHI_UCI_ALWAYS_OPEN //by now, sdx20 can not handle "start-reset-start" operation, so the simply solution is keep start state
static DECLARE_BITMAP(uci_minors, MAX_UCI_DEVICES);
static struct mhi_uci_drv mhi_uci_drv;
static int mhi_queue_inbound(struct uci_dev *uci_dev)
{
struct mhi_device *mhi_dev = uci_dev->mhi_dev;
int nr_trbs = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE);
size_t mtu = uci_dev->mtu;
void *buf;
struct uci_buf *uci_buf;
int ret = -EIO, i;
if (uci_dev->uci_buf == NULL) {
uci_dev->nr_trb = 0;
uci_dev->nr_trbs = (nr_trbs + 1);
uci_dev->uci_buf = kmalloc_array(uci_dev->nr_trbs, sizeof(*uci_buf), GFP_KERNEL);
if (!uci_dev->uci_buf)
return -ENOMEM;
uci_buf = uci_dev->uci_buf;
for (i = 0; i < uci_dev->nr_trbs; i++, uci_buf++) {
uci_buf->page = alloc_pages(GFP_KERNEL, get_order(mtu));
if (!uci_buf->page)
return -ENOMEM;
uci_buf->data = page_address(uci_buf->page);
uci_buf->len = 0;
uci_buf->nr_trb = i;
if (mhi_dev->dl_chan_id == MHI_CLIENT_DUN_IN) {
//MSG_ERR("[%d] = %p\n", i, uci_buf->data);
}
}
}
for (i = 0; i < nr_trbs; i++) {
#if 0
buf = kmalloc(mtu + sizeof(*uci_buf), GFP_KERNEL);
if (!buf)
return -ENOMEM;
uci_buf = buf + mtu;
uci_buf->data = buf;
#else
uci_buf = &uci_dev->uci_buf[i];
buf = uci_buf->data;
#endif
MSG_VERB("Allocated buf %d of %d size %zu\n", i, nr_trbs, mtu);
ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, buf, mtu,
MHI_EOT);
if (ret) {
#if 0
kfree(buf);
#endif
MSG_ERR("Failed to queue buffer %d\n", i);
return ret;
}
}
return ret;
}
static long mhi_uci_ioctl(struct file *file,
unsigned int cmd,
unsigned long arg)
{
struct uci_dev *uci_dev = file->private_data;
struct mhi_device *mhi_dev = uci_dev->mhi_dev;
long ret = -ERESTARTSYS;
mutex_lock(&uci_dev->mutex);
if (uci_dev->enabled)
ret = mhi_ioctl(mhi_dev, cmd, arg);
if (uci_dev->enabled) {
switch (cmd) {
case TCGETS:
#ifndef TCGETS2
ret = kernel_termios_to_user_termios((struct termios __user *)arg, &uci_dev->termios);
#else
ret = kernel_termios_to_user_termios_1((struct termios __user *)arg, &uci_dev->termios);
#endif
break;
case TCSETSF:
case TCSETS:
#ifndef TCGETS2
ret = user_termios_to_kernel_termios(&uci_dev->termios, (struct termios __user *)arg);
#else
ret = user_termios_to_kernel_termios_1(&uci_dev->termios, (struct termios __user *)arg);
#endif
break;
case TCFLSH:
ret = 0;
break;
default:
break;
}
}
mutex_unlock(&uci_dev->mutex);
return ret;
}
static int mhi_uci_release(struct inode *inode, struct file *file)
{
struct uci_dev *uci_dev = file->private_data;
mutex_lock(&uci_dev->mutex);
uci_dev->ref_count--;
if (!uci_dev->ref_count) {
struct uci_chan *uci_chan;
MSG_LOG("Last client left, closing node\n");
if (uci_dev->enabled)
mhi_unprepare_from_transfer(uci_dev->mhi_dev);
/* clean inbound channel */
uci_chan = &uci_dev->dl_chan;
if (uci_dev->uci_buf) {
unsigned nr_trb = 0;
for (nr_trb = 0; nr_trb < uci_dev->nr_trbs; nr_trb++) {
if (uci_dev->uci_buf[nr_trb].page)
__free_pages(uci_dev->uci_buf[nr_trb].page, get_order(uci_dev->mtu));
}
kfree(uci_dev->uci_buf);
}
uci_chan->cur_buf = NULL;
if (!uci_dev->enabled) {
MSG_LOG("Node is deleted, freeing dev node\n");
mutex_unlock(&uci_dev->mutex);
mutex_destroy(&uci_dev->mutex);
clear_bit(MINOR(uci_dev->devt), uci_minors);
kfree(uci_dev);
return 0;
}
}
MSG_LOG("exit: ref_count:%d\n", uci_dev->ref_count);
mutex_unlock(&uci_dev->mutex);
return 0;
}
static unsigned int mhi_uci_poll(struct file *file, poll_table *wait)
{
struct uci_dev *uci_dev = file->private_data;
struct mhi_device *mhi_dev = uci_dev->mhi_dev;
struct uci_chan *uci_chan;
unsigned int mask = 0;
poll_wait(file, &uci_dev->dl_chan.wq, wait);
poll_wait(file, &uci_dev->ul_chan.wq, wait);
uci_chan = &uci_dev->dl_chan;
spin_lock_bh(&uci_chan->lock);
if (!uci_dev->enabled) {
mask = POLLERR;
} else if (!list_empty(&uci_chan->pending) || uci_chan->cur_buf) {
MSG_VERB("Client can read from node\n");
mask |= POLLIN | POLLRDNORM;
}
spin_unlock_bh(&uci_chan->lock);
uci_chan = &uci_dev->ul_chan;
spin_lock_bh(&uci_chan->lock);
if (!uci_dev->enabled) {
mask |= POLLERR;
} else if (mhi_get_no_free_descriptors(mhi_dev, DMA_TO_DEVICE) > 0) {
MSG_VERB("Client can write to node\n");
mask |= POLLOUT | POLLWRNORM;
}
if (!uci_dev->enabled)
mask |= POLLHUP;
if (uci_dev->rx_error)
mask |= POLLERR;
spin_unlock_bh(&uci_chan->lock);
MSG_LOG("Client attempted to poll, returning mask 0x%x\n", mask);
return mask;
}
static ssize_t mhi_uci_write(struct file *file,
const char __user *buf,
size_t count,
loff_t *offp)
{
struct uci_dev *uci_dev = file->private_data;
struct mhi_device *mhi_dev = uci_dev->mhi_dev;
struct uci_chan *uci_chan = &uci_dev->ul_chan;
size_t bytes_xfered = 0;
int ret, nr_avail;
if (!buf || !count || uci_dev->rx_error)
return -EINVAL;
/* confirm channel is active */
spin_lock_bh(&uci_chan->lock);
if (!uci_dev->enabled) {
spin_unlock_bh(&uci_chan->lock);
return -ERESTARTSYS;
}
MSG_VERB("Enter: to xfer:%zu bytes\n", count);
while (count) {
size_t xfer_size;
void *kbuf;
enum MHI_FLAGS flags;
spin_unlock_bh(&uci_chan->lock);
nr_avail = mhi_get_no_free_descriptors(mhi_dev, DMA_TO_DEVICE);
if ((nr_avail == 0) && (file->f_flags & O_NONBLOCK))
return -EAGAIN;
/* wait for free descriptors */
ret = wait_event_interruptible(uci_chan->wq,
(!uci_dev->enabled) ||
(nr_avail = mhi_get_no_free_descriptors(mhi_dev,
DMA_TO_DEVICE)) > 0);
if (ret == -ERESTARTSYS || !uci_dev->enabled) {
MSG_LOG("Exit signal caught for node or not enabled\n");
return -ERESTARTSYS;
}
xfer_size = min_t(size_t, count, uci_dev->mtu);
kbuf = kmalloc(xfer_size, GFP_KERNEL);
if (!kbuf) {
MSG_ERR("Failed to allocate memory %zu\n", xfer_size);
return -ENOMEM;
}
ret = copy_from_user(kbuf, buf, xfer_size);
if (unlikely(ret)) {
kfree(kbuf);
return ret;
}
spin_lock_bh(&uci_chan->lock);
/* if ring is full after this force EOT */
if (nr_avail > 1 && (count - xfer_size))
flags = MHI_CHAIN;
else
flags = MHI_EOT;
if (uci_dev->enabled)
ret = mhi_queue_transfer(mhi_dev, DMA_TO_DEVICE, kbuf,
xfer_size, flags);
else
ret = -ERESTARTSYS;
if (ret) {
kfree(kbuf);
goto sys_interrupt;
}
bytes_xfered += xfer_size;
count -= xfer_size;
buf += xfer_size;
}
spin_unlock_bh(&uci_chan->lock);
MSG_VERB("Exit: Number of bytes xferred:%zu\n", bytes_xfered);
return bytes_xfered;
sys_interrupt:
spin_unlock_bh(&uci_chan->lock);
return ret;
}
static ssize_t mhi_uci_read(struct file *file,
char __user *buf,
size_t count,
loff_t *ppos)
{
struct uci_dev *uci_dev = file->private_data;
struct mhi_device *mhi_dev = uci_dev->mhi_dev;
struct uci_chan *uci_chan = &uci_dev->dl_chan;
struct uci_buf *uci_buf;
char *ptr;
size_t to_copy;
int ret = 0;
if (!buf || uci_dev->rx_error)
return -EINVAL;
MSG_VERB("Client provided buf len:%zu\n", count);
/* confirm channel is active */
spin_lock_bh(&uci_chan->lock);
if (!uci_dev->enabled) {
spin_unlock_bh(&uci_chan->lock);
return -ERESTARTSYS;
}
/* No data available to read, wait */
if (!uci_chan->cur_buf && list_empty(&uci_chan->pending)) {
MSG_VERB("No data available to read waiting\n");
spin_unlock_bh(&uci_chan->lock);
if (file->f_flags & O_NONBLOCK)
return -EAGAIN;
ret = wait_event_interruptible(uci_chan->wq,
(!uci_dev->enabled ||
!list_empty(&uci_chan->pending)));
if (ret == -ERESTARTSYS) {
MSG_LOG("Exit signal caught for node\n");
return -ERESTARTSYS;
}
spin_lock_bh(&uci_chan->lock);
if (!uci_dev->enabled) {
MSG_LOG("node is disabled\n");
ret = -ERESTARTSYS;
goto read_error;
}
}
/* new read, get the next descriptor from the list */
if (!uci_chan->cur_buf) {
uci_buf = list_first_entry_or_null(&uci_chan->pending,
struct uci_buf, node);
if (unlikely(!uci_buf)) {
ret = -EIO;
goto read_error;
}
if (uci_buf->node.next == LIST_POISON1 || uci_buf->node.prev == LIST_POISON1) {
dump_stack();
ret = -EIO;
MSG_ERR("chan[%d] data=%p, len=%zd, nr_trb=%d\n",
mhi_dev->dl_chan_id, uci_buf->data, uci_buf->len, uci_buf->nr_trb);
goto read_error;
}
list_del(&uci_buf->node);
uci_chan->cur_buf = uci_buf;
uci_chan->rx_size = uci_buf->len;
MSG_VERB("Got pkt of size:%zu\n", uci_chan->rx_size);
}
uci_buf = uci_chan->cur_buf;
spin_unlock_bh(&uci_chan->lock);
/* Copy the buffer to user space */
to_copy = min_t(size_t, count, uci_chan->rx_size);
ptr = uci_buf->data + (uci_buf->len - uci_chan->rx_size);
ret = copy_to_user(buf, ptr, to_copy);
if (ret)
return ret;
MSG_VERB("Copied %zu of %zu bytes\n", to_copy, uci_chan->rx_size);
uci_chan->rx_size -= to_copy;
/* we finished with this buffer, queue it back to hardware */
if (!uci_chan->rx_size) {
spin_lock_bh(&uci_chan->lock);
uci_chan->cur_buf = NULL;
if (uci_dev->enabled)
#if 1 //this can make the address in ring do not change
{
if (uci_buf->page) {
unsigned nr_trb = uci_buf->nr_trb ? (uci_buf->nr_trb - 1) : (uci_dev->nr_trbs - 1);
uci_buf = &uci_dev->uci_buf[nr_trb];
ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE,
uci_buf->data, uci_dev->mtu,
MHI_EOT);
} else {
kfree(uci_buf);
ret = 0;
}
}
#endif
else
ret = -ERESTARTSYS;
if (ret) {
MSG_ERR("Failed to recycle element for chan:%d , ret=%d\n", mhi_dev->ul_chan_id, ret);
#if 0
kfree(uci_buf->data);
#endif
goto read_error;
}
spin_unlock_bh(&uci_chan->lock);
}
MSG_VERB("Returning %zu bytes\n", to_copy);
return to_copy;
read_error:
spin_unlock_bh(&uci_chan->lock);
return ret;
}
static ssize_t mhi_uci_write_mutex(struct file *file,
const char __user *buf,
size_t count,
loff_t *offp)
{
struct uci_dev *uci_dev = file->private_data;
int ret;
ret = mutex_lock_interruptible(&uci_dev->w_mutex); /*concurrent writes */
if (ret < 0)
return -ERESTARTSYS;
ret = mhi_uci_write(file, buf, count, offp);
mutex_unlock(&uci_dev->w_mutex);
return ret;
}
static ssize_t mhi_uci_read_mutex(struct file *file,
char __user *buf,
size_t count,
loff_t *ppos)
{
struct uci_dev *uci_dev = file->private_data;
int ret;
ret = mutex_lock_interruptible(&uci_dev->r_mutex); /*concurrent reads */
if (ret < 0)
return -ERESTARTSYS;
ret = mhi_uci_read(file, buf, count, ppos);
mutex_unlock(&uci_dev->r_mutex);
return ret;
}
static int mhi_uci_open(struct inode *inode, struct file *filp)
{
struct uci_dev *uci_dev = NULL, *tmp_dev;
int ret = -EIO;
struct uci_chan *dl_chan;
mutex_lock(&mhi_uci_drv.lock);
list_for_each_entry(tmp_dev, &mhi_uci_drv.head, node) {
if (tmp_dev->devt == inode->i_rdev) {
uci_dev = tmp_dev;
break;
}
}
/* could not find a minor node */
if (!uci_dev)
goto error_exit;
mutex_lock(&uci_dev->mutex);
if (!uci_dev->enabled) {
MSG_ERR("Node exist, but not in active state!\n");
goto error_open_chan;
}
uci_dev->ref_count++;
MSG_LOG("Node open, ref counts %u\n", uci_dev->ref_count);
if (uci_dev->ref_count == 1) {
MSG_LOG("Starting channel\n");
ret = mhi_prepare_for_transfer(uci_dev->mhi_dev);
if (ret) {
MSG_ERR("Error starting transfer channels\n");
uci_dev->ref_count--;
goto error_open_chan;
}
ret = mhi_queue_inbound(uci_dev);
if (ret)
goto error_rx_queue;
#ifdef QUEC_MHI_UCI_ALWAYS_OPEN
uci_dev->ref_count++;
#endif
}
filp->private_data = uci_dev;
mutex_unlock(&uci_dev->mutex);
mutex_unlock(&mhi_uci_drv.lock);
return 0;
error_rx_queue:
dl_chan = &uci_dev->dl_chan;
mhi_unprepare_from_transfer(uci_dev->mhi_dev);
if (uci_dev->uci_buf) {
unsigned nr_trb = 0;
for (nr_trb = 0; nr_trb < uci_dev->nr_trbs; nr_trb++) {
if (uci_dev->uci_buf[nr_trb].page)
__free_pages(uci_dev->uci_buf[nr_trb].page, get_order(uci_dev->mtu));
}
kfree(uci_dev->uci_buf);
}
error_open_chan:
mutex_unlock(&uci_dev->mutex);
error_exit:
mutex_unlock(&mhi_uci_drv.lock);
return ret;
}
static const struct file_operations mhidev_fops = {
.open = mhi_uci_open,
.release = mhi_uci_release,
.read = mhi_uci_read_mutex,
.write = mhi_uci_write_mutex,
.poll = mhi_uci_poll,
.unlocked_ioctl = mhi_uci_ioctl,
};
static void mhi_uci_remove(struct mhi_device *mhi_dev)
{
struct uci_dev *uci_dev = mhi_device_get_devdata(mhi_dev);
MSG_LOG("Enter\n");
mutex_lock(&mhi_uci_drv.lock);
mutex_lock(&uci_dev->mutex);
/* disable the node */
spin_lock_irq(&uci_dev->dl_chan.lock);
spin_lock_irq(&uci_dev->ul_chan.lock);
uci_dev->enabled = false;
spin_unlock_irq(&uci_dev->ul_chan.lock);
spin_unlock_irq(&uci_dev->dl_chan.lock);
wake_up(&uci_dev->dl_chan.wq);
wake_up(&uci_dev->ul_chan.wq);
/* delete the node to prevent new opens */
device_destroy(mhi_uci_drv.class, uci_dev->devt);
uci_dev->dev = NULL;
list_del(&uci_dev->node);
#ifdef QUEC_MHI_UCI_ALWAYS_OPEN
if (uci_dev->ref_count > 0)
uci_dev->ref_count--;
#endif
/* safe to free memory only if all file nodes are closed */
if (!uci_dev->ref_count) {
mutex_unlock(&uci_dev->mutex);
mutex_destroy(&uci_dev->mutex);
clear_bit(MINOR(uci_dev->devt), uci_minors);
kfree(uci_dev);
mutex_unlock(&mhi_uci_drv.lock);
return;
}
MSG_LOG("Exit\n");
mutex_unlock(&uci_dev->mutex);
mutex_unlock(&mhi_uci_drv.lock);
}
static int mhi_uci_probe(struct mhi_device *mhi_dev,
const struct mhi_device_id *id)
{
struct uci_dev *uci_dev;
int minor;
char node_name[32];
int dir;
uci_dev = kzalloc(sizeof(*uci_dev), GFP_KERNEL);
if (!uci_dev)
return -ENOMEM;
mutex_init(&uci_dev->mutex);
mutex_init(&uci_dev->r_mutex);
mutex_init(&uci_dev->w_mutex);
uci_dev->mhi_dev = mhi_dev;
minor = find_first_zero_bit(uci_minors, MAX_UCI_DEVICES);
if (minor >= MAX_UCI_DEVICES) {
kfree(uci_dev);
return -ENOSPC;
}
mutex_lock(&uci_dev->mutex);
mutex_lock(&mhi_uci_drv.lock);
uci_dev->devt = MKDEV(mhi_uci_drv.major, minor);
#if 1
if (mhi_dev->mhi_cntrl->cntrl_idx)
uci_dev->dev = device_create(mhi_uci_drv.class, &mhi_dev->dev,
uci_dev->devt, uci_dev,
DEVICE_NAME "_%s%d",
mhi_dev->chan_name, mhi_dev->mhi_cntrl->cntrl_idx);
else
uci_dev->dev = device_create(mhi_uci_drv.class, &mhi_dev->dev,
uci_dev->devt, uci_dev,
DEVICE_NAME "_%s",
mhi_dev->chan_name);
#else
uci_dev->dev = device_create(mhi_uci_drv.class, &mhi_dev->dev,
uci_dev->devt, uci_dev,
DEVICE_NAME "_%04x_%02u.%02u.%02u%s%d",
mhi_dev->dev_id, mhi_dev->domain,
mhi_dev->bus, mhi_dev->slot, "_pipe_",
mhi_dev->ul_chan_id);
#endif
set_bit(minor, uci_minors);
/* create debugging buffer */
snprintf(node_name, sizeof(node_name), "mhi_uci_%04x_%02u.%02u.%02u_%d",
mhi_dev->dev_id, mhi_dev->domain, mhi_dev->bus, mhi_dev->slot,
mhi_dev->ul_chan_id);
for (dir = 0; dir < 2; dir++) {
struct uci_chan *uci_chan = (dir) ?
&uci_dev->ul_chan : &uci_dev->dl_chan;
spin_lock_init(&uci_chan->lock);
init_waitqueue_head(&uci_chan->wq);
INIT_LIST_HEAD(&uci_chan->pending);
}
uci_dev->termios = tty_std_termios;
uci_dev->mtu = min_t(size_t, id->driver_data, mhi_dev->mtu);
mhi_device_set_devdata(mhi_dev, uci_dev);
uci_dev->enabled = true;
list_add(&uci_dev->node, &mhi_uci_drv.head);
mutex_unlock(&mhi_uci_drv.lock);
mutex_unlock(&uci_dev->mutex);
MSG_LOG("channel:%s successfully probed\n", mhi_dev->chan_name);
return 0;
};
static void mhi_ul_xfer_cb(struct mhi_device *mhi_dev,
struct mhi_result *mhi_result)
{
struct uci_dev *uci_dev = mhi_device_get_devdata(mhi_dev);
struct uci_chan *uci_chan = &uci_dev->ul_chan;
MSG_VERB("status:%d xfer_len:%zu\n", mhi_result->transaction_status,
mhi_result->bytes_xferd);
kfree(mhi_result->buf_addr);
if (!mhi_result->transaction_status)
wake_up(&uci_chan->wq);
}
static void mhi_dl_xfer_cb(struct mhi_device *mhi_dev,
struct mhi_result *mhi_result)
{
struct uci_dev *uci_dev = mhi_device_get_devdata(mhi_dev);
struct uci_chan *uci_chan = &uci_dev->dl_chan;
unsigned long flags;
struct uci_buf *buf;
unsigned nr_trb = uci_dev->nr_trb;
buf = &uci_dev->uci_buf[nr_trb];
if (buf->nr_trb != nr_trb || buf->data != mhi_result->buf_addr)
{
uci_dev->rx_error++;
MSG_ERR("chan[%d]: uci_buf[%u] = %p , mhi_result[%u] = %p\n",
mhi_dev->dl_chan_id, buf->nr_trb, buf->data, nr_trb, mhi_result->buf_addr);
return;
}
uci_dev->nr_trb++;
if (uci_dev->nr_trb == uci_dev->nr_trbs)
uci_dev->nr_trb = 0;
if (mhi_result->transaction_status == -ENOTCONN) {
return;
}
if (mhi_result->bytes_xferd > uci_dev->mtu || mhi_result->bytes_xferd <= 0)
{
MSG_ERR("chan[%d]: bytes_xferd = %zd , mtu = %zd\n",
mhi_dev->dl_chan_id, mhi_result->bytes_xferd, uci_dev->mtu);
return;
}
if (mhi_result->bytes_xferd > uci_dev->bytes_xferd)
{
uci_dev->bytes_xferd = mhi_result->bytes_xferd;
//MSG_ERR("chan[%d]: bytes_xferd = %zd , mtu = %zd\n",
// mhi_dev->dl_chan_id, mhi_result->bytes_xferd, uci_dev->mtu);
}
MSG_VERB("status:%d receive_len:%zu\n", mhi_result->transaction_status,
mhi_result->bytes_xferd);
spin_lock_irqsave(&uci_chan->lock, flags);
#if 0
buf = mhi_result->buf_addr + uci_dev->mtu;
buf->data = mhi_result->buf_addr;
#endif
buf->len = mhi_result->bytes_xferd;
if (mhi_dev->dl_chan_id == MHI_CLIENT_DUN_IN
|| mhi_dev->dl_chan_id == MHI_CLIENT_QMI_IN
|| mhi_dev->dl_chan_id == MHI_CLIENT_MBIM_IN)
{
struct uci_buf *tmp_buf = NULL;
int skip_buf = 0;
#ifdef QUEC_MHI_UCI_ALWAYS_OPEN
if (uci_dev->ref_count == 1)
skip_buf++;
#endif
if (!skip_buf)
tmp_buf = (struct uci_buf *)kmalloc(buf->len + sizeof(struct uci_buf), GFP_ATOMIC);;
if (tmp_buf) {
tmp_buf->page = NULL;
tmp_buf->data = ((void *)tmp_buf) + sizeof(struct uci_buf);
tmp_buf->len = buf->len;
memcpy(tmp_buf->data, buf->data, buf->len);
}
if (buf) {
struct uci_buf *uci_buf = buf;
unsigned nr_trb = uci_buf->nr_trb ? (uci_buf->nr_trb - 1) : (uci_dev->nr_trbs - 1);
uci_buf = &uci_dev->uci_buf[nr_trb];
mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, uci_buf->data, uci_dev->mtu, MHI_EOT);
}
buf = tmp_buf;
}
if (buf)
list_add_tail(&buf->node, &uci_chan->pending);
spin_unlock_irqrestore(&uci_chan->lock, flags);
#ifdef CONFIG_PM_SLEEP
if (mhi_dev->dev.power.wakeup)
__pm_wakeup_event(mhi_dev->dev.power.wakeup, 0);
#endif
wake_up(&uci_chan->wq);
}
#define DIAG_MAX_PCIE_PKT_SZ 2048 //define by module
/* .driver_data stores max mtu */
static const struct mhi_device_id mhi_uci_match_table[] = {
{ .chan = "LOOPBACK", .driver_data = 0x1000 },
{ .chan = "SAHARA", .driver_data = 0x4000 },
{ .chan = "EDL", .driver_data = 0x4000 },
{ .chan = "DIAG", .driver_data = DIAG_MAX_PCIE_PKT_SZ },
{ .chan = "MBIM", .driver_data = 0x1000 },
{ .chan = "QMI0", .driver_data = 0x1000 },
{ .chan = "QMI1", .driver_data = 0x1000 },
{ .chan = "DUN", .driver_data = 0x1000 },
{},
};
static struct mhi_driver mhi_uci_driver = {
.id_table = mhi_uci_match_table,
.remove = mhi_uci_remove,
.probe = mhi_uci_probe,
.ul_xfer_cb = mhi_ul_xfer_cb,
.dl_xfer_cb = mhi_dl_xfer_cb,
.driver = {
.name = MHI_UCI_DRIVER_NAME,
.owner = THIS_MODULE,
},
};
int mhi_device_uci_init(void)
{
int ret;
ret = register_chrdev(0, MHI_UCI_DRIVER_NAME, &mhidev_fops);
if (ret < 0)
return ret;
mhi_uci_drv.major = ret;
mhi_uci_drv.class = class_create(THIS_MODULE, MHI_UCI_DRIVER_NAME);
if (IS_ERR(mhi_uci_drv.class)) {
unregister_chrdev(mhi_uci_drv.major, MHI_UCI_DRIVER_NAME);
return -ENODEV;
}
mutex_init(&mhi_uci_drv.lock);
INIT_LIST_HEAD(&mhi_uci_drv.head);
ret = mhi_driver_register(&mhi_uci_driver);
if (ret) {
class_destroy(mhi_uci_drv.class);
unregister_chrdev(mhi_uci_drv.major, MHI_UCI_DRIVER_NAME);
}
return ret;
}
void mhi_device_uci_exit(void)
{
mhi_driver_unregister(&mhi_uci_driver);
class_destroy(mhi_uci_drv.class);
unregister_chrdev(mhi_uci_drv.major, MHI_UCI_DRIVER_NAME);
}

View File

@ -0,0 +1,13 @@
#
# RMNET MAP driver
#
menuconfig RMNET
tristate "RmNet MAP driver"
default n
select GRO_CELLS
---help---
If you select this, you will enable the RMNET module which is used
for handling data in the multiplexing and aggregation protocol (MAP)
format in the embedded data path. RMNET devices can be attached to
any IP mode physical device.

View File

@ -0,0 +1,11 @@
#
# Makefile for the RMNET module
#
rmnet-y := rmnet_config.o
rmnet-y += rmnet_vnd.o
rmnet-y += rmnet_handlers.o
rmnet-y += rmnet_map_data.o
rmnet-y += rmnet_map_command.o
rmnet-y += rmnet_descriptor.o
obj-$(CONFIG_RMNET) += rmnet.o

View File

@ -0,0 +1,141 @@
/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* RMNET configuration engine
*
*/
#include <net/sock.h>
#include <linux/module.h>
#include <linux/netlink.h>
#include <linux/netdevice.h>
#include <linux/hashtable.h>
#include "rmnet_config.h"
#include "rmnet_handlers.h"
#include "rmnet_vnd.h"
#include "rmnet_private.h"
#include "rmnet_map.h"
#include "rmnet_descriptor.h"
/* Locking scheme -
* The shared resource which needs to be protected is realdev->rx_handler_data.
* For the writer path, this is using rtnl_lock(). The writer paths are
* rmnet_newlink(), rmnet_dellink() and rmnet_force_unassociate_device(). These
* paths are already called with rtnl_lock() acquired in. There is also an
* ASSERT_RTNL() to ensure that we are calling with rtnl acquired. For
* dereference here, we will need to use rtnl_dereference(). Dev list writing
* needs to happen with rtnl_lock() acquired for netdev_master_upper_dev_link().
* For the reader path, the real_dev->rx_handler_data is called in the TX / RX
* path. We only need rcu_read_lock() for these scenarios. In these cases,
* the rcu_read_lock() is held in __dev_queue_xmit() and
* netif_receive_skb_internal(), so readers need to use rcu_dereference_rtnl()
* to get the relevant information. For dev list reading, we again acquire
* rcu_read_lock() in rmnet_dellink() for netdev_master_upper_dev_get_rcu().
* We also use unregister_netdevice_many() to free all rmnet devices in
* rmnet_force_unassociate_device() so we dont lose the rtnl_lock() and free in
* same context.
*/
/* Local Definitions and Declarations */
static int rmnet_is_real_dev_registered(const struct net_device *real_dev)
{
return rcu_access_pointer(real_dev->rx_handler) == rmnet_rx_handler;
}
/* Needs rtnl lock */
static struct rmnet_port*
rmnet_get_port_rtnl(const struct net_device *real_dev)
{
return rtnl_dereference(real_dev->rx_handler_data);
}
static int rmnet_unregister_real_device(struct net_device *real_dev,
struct rmnet_port *port)
{
if (port->nr_rmnet_devs)
return -EINVAL;
rmnet_map_cmd_exit(port);
rmnet_map_tx_aggregate_exit(port);
rmnet_descriptor_deinit(port);
kfree(port);
netdev_rx_handler_unregister(real_dev);
/* release reference on real_dev */
dev_put(real_dev);
netdev_dbg(real_dev, "Removed from rmnet\n");
return 0;
}
static int rmnet_register_real_device(struct net_device *real_dev)
{
struct rmnet_port *port;
int rc, entry;
ASSERT_RTNL();
if (rmnet_is_real_dev_registered(real_dev))
return 0;
port = kzalloc(sizeof(*port), GFP_ATOMIC);
if (!port)
return -ENOMEM;
port->dev = real_dev;
rc = netdev_rx_handler_register(real_dev, rmnet_rx_handler, port);
if (rc) {
kfree(port);
return -EBUSY;
}
/* hold on to real dev for MAP data */
dev_hold(real_dev);
for (entry = 0; entry < RMNET_MAX_LOGICAL_EP; entry++)
INIT_HLIST_HEAD(&port->muxed_ep[entry]);
rc = rmnet_descriptor_init(port);
if (rc) {
rmnet_descriptor_deinit(port);
return rc;
}
rmnet_map_tx_aggregate_init(port);
rmnet_map_cmd_init(port);
netdev_dbg(real_dev, "registered with rmnet\n");
return 0;
}
/* Needs either rcu_read_lock() or rtnl lock */
static struct rmnet_port *rmnet_get_port(struct net_device *real_dev)
{
if (rmnet_is_real_dev_registered(real_dev))
return rcu_dereference_rtnl(real_dev->rx_handler_data);
else
return NULL;
}
static struct rmnet_endpoint *rmnet_get_endpoint(struct rmnet_port *port, u8 mux_id)
{
struct rmnet_endpoint *ep;
hlist_for_each_entry_rcu(ep, &port->muxed_ep[mux_id], hlnode) {
if (ep->mux_id == mux_id)
return ep;
}
return NULL;
}

View File

@ -0,0 +1,174 @@
/* Copyright (c) 2013-2017, 2019 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* RMNET Data configuration engine
*
*/
#include <linux/skbuff.h>
#include <net/gro_cells.h>
#ifndef _RMNET_CONFIG_H_
#define _RMNET_CONFIG_H_
#define RMNET_MAX_LOGICAL_EP 255
#define RMNET_MAX_VEID 4
struct rmnet_endpoint {
u8 mux_id;
struct net_device *egress_dev;
struct hlist_node hlnode;
};
struct rmnet_port_priv_stats {
u64 dl_hdr_last_qmap_vers;
u64 dl_hdr_last_ep_id;
u64 dl_hdr_last_trans_id;
u64 dl_hdr_last_seq;
u64 dl_hdr_last_bytes;
u64 dl_hdr_last_pkts;
u64 dl_hdr_last_flows;
u64 dl_hdr_count;
u64 dl_hdr_total_bytes;
u64 dl_hdr_total_pkts;
u64 dl_trl_last_seq;
u64 dl_trl_count;
};
struct rmnet_egress_agg_params {
u16 agg_size;
u16 agg_count;
u32 agg_time;
};
/* One instance of this structure is instantiated for each real_dev associated
* with rmnet.
*/
struct rmnet_port {
struct net_device *dev;
u32 data_format;
u8 nr_rmnet_devs;
u8 rmnet_mode;
struct hlist_head muxed_ep[RMNET_MAX_LOGICAL_EP];
struct net_device *bridge_ep;
void *rmnet_perf;
struct rmnet_egress_agg_params egress_agg_params;
/* Protect aggregation related elements */
spinlock_t agg_lock;
struct sk_buff *agg_skb;
int agg_state;
u8 agg_count;
struct timespec agg_time;
struct timespec agg_last;
struct hrtimer hrtimer;
struct work_struct agg_wq;
/* dl marker elements */
struct list_head dl_list;
struct rmnet_port_priv_stats stats;
int dl_marker_flush;
/* Descriptor pool */
spinlock_t desc_pool_lock;
struct rmnet_frag_descriptor_pool *frag_desc_pool;
struct sk_buff *chain_head;
struct sk_buff *chain_tail;
};
extern struct rtnl_link_ops rmnet_link_ops;
struct rmnet_vnd_stats {
u64 rx_pkts;
u64 rx_bytes;
u64 tx_pkts;
u64 tx_bytes;
u32 tx_drops;
};
struct rmnet_pcpu_stats {
struct rmnet_vnd_stats stats;
struct u64_stats_sync syncp;
};
struct rmnet_coal_close_stats {
u64 non_coal;
u64 ip_miss;
u64 trans_miss;
u64 hw_nl;
u64 hw_pkt;
u64 hw_byte;
u64 hw_time;
u64 hw_evict;
u64 coal;
};
struct rmnet_coal_stats {
u64 coal_rx;
u64 coal_pkts;
u64 coal_hdr_nlo_err;
u64 coal_hdr_pkt_err;
u64 coal_csum_err;
u64 coal_reconstruct;
u64 coal_ip_invalid;
u64 coal_trans_invalid;
struct rmnet_coal_close_stats close;
u64 coal_veid[RMNET_MAX_VEID];
};
struct rmnet_priv_stats {
u64 csum_ok;
u64 csum_valid_unset;
u64 csum_validation_failed;
u64 csum_err_bad_buffer;
u64 csum_err_invalid_ip_version;
u64 csum_err_invalid_transport;
u64 csum_fragmented_pkt;
u64 csum_skipped;
u64 csum_sw;
u64 csum_hw;
struct rmnet_coal_stats coal;
};
struct rmnet_priv {
u8 mux_id;
struct net_device *real_dev;
struct rmnet_pcpu_stats __percpu *pcpu_stats;
struct gro_cells gro_cells;
struct rmnet_priv_stats stats;
};
enum rmnet_dl_marker_prio {
RMNET_PERF,
RMNET_SHS,
};
enum rmnet_trace_func {
RMNET_MODULE,
NW_STACK_MODULE,
};
enum rmnet_trace_evt {
RMNET_DLVR_SKB,
RMNET_RCV_FROM_PND,
RMNET_TX_UL_PKT,
NW_STACK_DEV_Q_XMIT,
NW_STACK_NAPI_GRO_FLUSH,
NW_STACK_RX,
NW_STACK_TX,
};
static int rmnet_is_real_dev_registered(const struct net_device *real_dev);
static struct rmnet_port *rmnet_get_port(struct net_device *real_dev);
static struct rmnet_endpoint *rmnet_get_endpoint(struct rmnet_port *port, u8 mux_id);
#endif /* _RMNET_CONFIG_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,661 @@
/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* RMNET Packet Descriptor Framework
*
*/
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <net/ipv6.h>
#include <net/ip6_checksum.h>
#include "rmnet_config.h"
#include "rmnet_descriptor.h"
#include "rmnet_handlers.h"
#include "rmnet_private.h"
#include "rmnet_vnd.h"
#define RMNET_FRAG_DESCRIPTOR_POOL_SIZE 64
#define RMNET_DL_IND_HDR_SIZE (sizeof(struct rmnet_map_dl_ind_hdr) + \
sizeof(struct rmnet_map_header) + \
sizeof(struct rmnet_map_control_command_header))
#define RMNET_DL_IND_TRL_SIZE (sizeof(struct rmnet_map_dl_ind_trl) + \
sizeof(struct rmnet_map_header) + \
sizeof(struct rmnet_map_control_command_header))
typedef void (*rmnet_perf_desc_hook_t)(struct rmnet_frag_descriptor *frag_desc,
struct rmnet_port *port);
typedef void (*rmnet_perf_chain_hook_t)(void);
static struct rmnet_frag_descriptor *
rmnet_get_frag_descriptor(struct rmnet_port *port)
{
struct rmnet_frag_descriptor_pool *pool = port->frag_desc_pool;
struct rmnet_frag_descriptor *frag_desc;
spin_lock(&port->desc_pool_lock);
if (!list_empty(&pool->free_list)) {
frag_desc = list_first_entry(&pool->free_list,
struct rmnet_frag_descriptor,
list);
list_del_init(&frag_desc->list);
} else {
frag_desc = kzalloc(sizeof(*frag_desc), GFP_ATOMIC);
if (!frag_desc)
goto out;
INIT_LIST_HEAD(&frag_desc->list);
INIT_LIST_HEAD(&frag_desc->sub_frags);
pool->pool_size++;
}
out:
spin_unlock(&port->desc_pool_lock);
return frag_desc;
}
static void rmnet_recycle_frag_descriptor(struct rmnet_frag_descriptor *frag_desc,
struct rmnet_port *port)
{
struct rmnet_frag_descriptor_pool *pool = port->frag_desc_pool;
struct page *page = skb_frag_page(&frag_desc->frag);
list_del(&frag_desc->list);
if (page)
put_page(page);
memset(frag_desc, 0, sizeof(*frag_desc));
INIT_LIST_HEAD(&frag_desc->list);
INIT_LIST_HEAD(&frag_desc->sub_frags);
spin_lock(&port->desc_pool_lock);
list_add_tail(&frag_desc->list, &pool->free_list);
spin_unlock(&port->desc_pool_lock);
}
static void rmnet_descriptor_add_frag(struct rmnet_port *port, struct list_head *list,
struct page *p, u32 page_offset, u32 len)
{
struct rmnet_frag_descriptor *frag_desc;
frag_desc = rmnet_get_frag_descriptor(port);
if (!frag_desc)
return;
rmnet_frag_fill(frag_desc, p, page_offset, len);
list_add_tail(&frag_desc->list, list);
}
static u8 rmnet_frag_do_flow_control(struct rmnet_map_header *qmap,
struct rmnet_port *port,
int enable)
{
struct rmnet_map_control_command *cmd;
struct rmnet_endpoint *ep;
struct net_device *vnd;
u16 ip_family;
u16 fc_seq;
u32 qos_id;
u8 mux_id;
int r;
mux_id = qmap->mux_id;
cmd = (struct rmnet_map_control_command *)
((char *)qmap + sizeof(*qmap));
if (mux_id >= RMNET_MAX_LOGICAL_EP)
return RX_HANDLER_CONSUMED;
ep = rmnet_get_endpoint(port, mux_id);
if (!ep)
return RX_HANDLER_CONSUMED;
vnd = ep->egress_dev;
ip_family = cmd->flow_control.ip_family;
fc_seq = ntohs(cmd->flow_control.flow_control_seq_num);
qos_id = ntohl(cmd->flow_control.qos_id);
/* Ignore the ip family and pass the sequence number for both v4 and v6
* sequence. User space does not support creating dedicated flows for
* the 2 protocols
*/
r = rmnet_vnd_do_flow_control(vnd, enable);
if (r)
return RMNET_MAP_COMMAND_UNSUPPORTED;
else
return RMNET_MAP_COMMAND_ACK;
}
static void rmnet_frag_send_ack(struct rmnet_map_header *qmap,
unsigned char type,
struct rmnet_port *port)
{
struct rmnet_map_control_command *cmd;
struct net_device *dev = port->dev;
struct sk_buff *skb;
u16 alloc_len = ntohs(qmap->pkt_len) + sizeof(*qmap);
skb = alloc_skb(alloc_len, GFP_ATOMIC);
if (!skb)
return;
skb->protocol = htons(ETH_P_MAP);
skb->dev = dev;
cmd = rmnet_map_get_cmd_start(skb);
cmd->cmd_type = type & 0x03;
netif_tx_lock(dev);
dev->netdev_ops->ndo_start_xmit(skb, dev);
netif_tx_unlock(dev);
}
/* Process MAP command frame and send N/ACK message as appropriate. Message cmd
* name is decoded here and appropriate handler is called.
*/
static void rmnet_frag_command(struct rmnet_map_header *qmap, struct rmnet_port *port)
{
struct rmnet_map_control_command *cmd;
unsigned char command_name;
unsigned char rc = 0;
cmd = (struct rmnet_map_control_command *)
((char *)qmap + sizeof(*qmap));
command_name = cmd->command_name;
switch (command_name) {
case RMNET_MAP_COMMAND_FLOW_ENABLE:
rc = rmnet_frag_do_flow_control(qmap, port, 1);
break;
case RMNET_MAP_COMMAND_FLOW_DISABLE:
rc = rmnet_frag_do_flow_control(qmap, port, 0);
break;
default:
rc = RMNET_MAP_COMMAND_UNSUPPORTED;
break;
}
if (rc == RMNET_MAP_COMMAND_ACK)
rmnet_frag_send_ack(qmap, rc, port);
}
static void rmnet_frag_deaggregate(skb_frag_t *frag, struct rmnet_port *port,
struct list_head *list)
{
struct rmnet_map_header *maph;
u8 *data = skb_frag_address(frag);
u32 offset = 0;
u32 packet_len;
while (offset < skb_frag_size(frag)) {
maph = (struct rmnet_map_header *)data;
packet_len = ntohs(maph->pkt_len);
/* Some hardware can send us empty frames. Catch them */
if (packet_len == 0)
return;
packet_len += sizeof(*maph);
if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) {
packet_len += sizeof(struct rmnet_map_dl_csum_trailer);
WARN_ON(1);
} else if (port->data_format &
(RMNET_FLAGS_INGRESS_MAP_CKSUMV5 |
RMNET_FLAGS_INGRESS_COALESCE) && !maph->cd_bit) {
u32 hsize = 0;
u8 type;
type = ((struct rmnet_map_v5_coal_header *)
(data + sizeof(*maph)))->header_type;
switch (type) {
case RMNET_MAP_HEADER_TYPE_COALESCING:
hsize = sizeof(struct rmnet_map_v5_coal_header);
break;
case RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD:
hsize = sizeof(struct rmnet_map_v5_csum_header);
break;
}
packet_len += hsize;
}
else {
//qmap_hex_dump(__func__, data, 64);
WARN_ON(1);
}
if ((int)skb_frag_size(frag) - (int)packet_len < 0)
return;
rmnet_descriptor_add_frag(port, list, skb_frag_page(frag),
frag->page_offset + offset,
packet_len);
offset += packet_len;
data += packet_len;
}
}
/* Allocate and populate an skb to contain the packet represented by the
* frag descriptor.
*/
static struct sk_buff *rmnet_alloc_skb(struct rmnet_frag_descriptor *frag_desc,
struct rmnet_port *port)
{
struct sk_buff *head_skb, *current_skb, *skb;
struct skb_shared_info *shinfo;
struct rmnet_frag_descriptor *sub_frag, *tmp;
/* Use the exact sizes if we know them (i.e. RSB/RSC, rmnet_perf) */
if (frag_desc->hdrs_valid) {
u16 hdr_len = frag_desc->ip_len + frag_desc->trans_len;
head_skb = alloc_skb(hdr_len + RMNET_MAP_DESC_HEADROOM,
GFP_ATOMIC);
if (!head_skb)
return NULL;
skb_reserve(head_skb, RMNET_MAP_DESC_HEADROOM);
skb_put_data(head_skb, frag_desc->hdr_ptr, hdr_len);
skb_reset_network_header(head_skb);
if (frag_desc->trans_len)
skb_set_transport_header(head_skb, frag_desc->ip_len);
/* Packets that have no data portion don't need any frags */
if (hdr_len == skb_frag_size(&frag_desc->frag))
goto skip_frags;
/* If the headers we added are the start of the page,
* we don't want to add them twice
*/
if (frag_desc->hdr_ptr == rmnet_frag_data_ptr(frag_desc)) {
if (!rmnet_frag_pull(frag_desc, port, hdr_len)) {
kfree_skb(head_skb);
return NULL;
}
}
} else {
/* Allocate enough space to avoid penalties in the stack
* from __pskb_pull_tail()
*/
head_skb = alloc_skb(256 + RMNET_MAP_DESC_HEADROOM,
GFP_ATOMIC);
if (!head_skb)
return NULL;
skb_reserve(head_skb, RMNET_MAP_DESC_HEADROOM);
}
/* Add main fragment */
get_page(skb_frag_page(&frag_desc->frag));
skb_add_rx_frag(head_skb, 0, skb_frag_page(&frag_desc->frag),
frag_desc->frag.page_offset,
skb_frag_size(&frag_desc->frag),
skb_frag_size(&frag_desc->frag));
shinfo = skb_shinfo(head_skb);
current_skb = head_skb;
/* Add in any frags from rmnet_perf */
list_for_each_entry_safe(sub_frag, tmp, &frag_desc->sub_frags, list) {
skb_frag_t *frag;
u32 frag_size;
frag = &sub_frag->frag;
frag_size = skb_frag_size(frag);
add_frag:
if (shinfo->nr_frags < MAX_SKB_FRAGS) {
get_page(skb_frag_page(frag));
skb_add_rx_frag(current_skb, shinfo->nr_frags,
skb_frag_page(frag), frag->page_offset,
frag_size, frag_size);
if (current_skb != head_skb) {
head_skb->len += frag_size;
head_skb->data_len += frag_size;
}
} else {
/* Alloc a new skb and try again */
skb = alloc_skb(0, GFP_ATOMIC);
if (!skb)
break;
if (current_skb == head_skb)
shinfo->frag_list = skb;
else
current_skb->next = skb;
current_skb = skb;
shinfo = skb_shinfo(current_skb);
goto add_frag;
}
rmnet_recycle_frag_descriptor(sub_frag, port);
}
skip_frags:
head_skb->dev = frag_desc->dev;
rmnet_set_skb_proto(head_skb);
/* Handle any header metadata that needs to be updated after RSB/RSC
* segmentation
*/
if (frag_desc->ip_id_set) {
struct iphdr *iph;
iph = (struct iphdr *)rmnet_map_data_ptr(head_skb);
csum_replace2(&iph->check, iph->id, frag_desc->ip_id);
iph->id = frag_desc->ip_id;
}
if (frag_desc->tcp_seq_set) {
struct tcphdr *th;
th = (struct tcphdr *)
(rmnet_map_data_ptr(head_skb) + frag_desc->ip_len);
th->seq = frag_desc->tcp_seq;
}
/* Handle csum offloading */
if (frag_desc->csum_valid && frag_desc->hdrs_valid) {
/* Set the partial checksum information */
//rmnet_frag_partial_csum(head_skb, frag_desc);
WARN_ON(1);
} else if (frag_desc->csum_valid) {
/* Non-RSB/RSC/perf packet. The current checksum is fine */
head_skb->ip_summed = CHECKSUM_UNNECESSARY;
} else if (frag_desc->hdrs_valid &&
(frag_desc->trans_proto == IPPROTO_TCP ||
frag_desc->trans_proto == IPPROTO_UDP)) {
/* Unfortunately, we have to fake a bad checksum here, since
* the original bad value is lost by the hardware. The only
* reliable way to do it is to calculate the actual checksum
* and corrupt it.
*/
__sum16 *check;
__wsum csum;
unsigned int offset = skb_transport_offset(head_skb);
__sum16 pseudo;
WARN_ON(1);
/* Calculate pseudo header and update header fields */
if (frag_desc->ip_proto == 4) {
struct iphdr *iph = ip_hdr(head_skb);
__be16 tot_len = htons(head_skb->len);
csum_replace2(&iph->check, iph->tot_len, tot_len);
iph->tot_len = tot_len;
pseudo = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
head_skb->len -
frag_desc->ip_len,
frag_desc->trans_proto, 0);
} else {
struct ipv6hdr *ip6h = ipv6_hdr(head_skb);
ip6h->payload_len = htons(head_skb->len -
sizeof(*ip6h));
pseudo = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
head_skb->len -
frag_desc->ip_len,
frag_desc->trans_proto, 0);
}
if (frag_desc->trans_proto == IPPROTO_TCP) {
check = &tcp_hdr(head_skb)->check;
} else {
udp_hdr(head_skb)->len = htons(head_skb->len -
frag_desc->ip_len);
check = &udp_hdr(head_skb)->check;
}
*check = pseudo;
csum = skb_checksum(head_skb, offset, head_skb->len - offset,
0);
/* Add 1 to corrupt. This cannot produce a final value of 0
* since csum_fold() can't return a value of 0xFFFF
*/
*check = csum16_add(csum_fold(csum), htons(1));
head_skb->ip_summed = CHECKSUM_NONE;
}
/* Handle any rmnet_perf metadata */
if (frag_desc->hash) {
head_skb->hash = frag_desc->hash;
head_skb->sw_hash = 1;
}
if (frag_desc->flush_shs)
head_skb->cb[0] = 1;
/* Handle coalesced packets */
//if (frag_desc->gso_segs > 1)
// rmnet_frag_gso_stamp(head_skb, frag_desc);
return head_skb;
}
/* Deliver the packets contained within a frag descriptor */
static void rmnet_frag_deliver(struct rmnet_frag_descriptor *frag_desc,
struct rmnet_port *port)
{
struct sk_buff *skb;
skb = rmnet_alloc_skb(frag_desc, port);
if (skb)
rmnet_deliver_skb(skb, port);
rmnet_recycle_frag_descriptor(frag_desc, port);
}
/* Process a QMAPv5 packet header */
static int rmnet_frag_process_next_hdr_packet(struct rmnet_frag_descriptor *frag_desc,
struct rmnet_port *port,
struct list_head *list,
u16 len)
{
int rc = 0;
switch (rmnet_frag_get_next_hdr_type(frag_desc)) {
case RMNET_MAP_HEADER_TYPE_COALESCING:
rc = -1;
WARN_ON(1);
break;
case RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD:
if (rmnet_frag_get_csum_valid(frag_desc)) {
frag_desc->csum_valid = true;
} else {
}
if (!rmnet_frag_pull(frag_desc, port,
sizeof(struct rmnet_map_header) +
sizeof(struct rmnet_map_v5_csum_header))) {
rc = -EINVAL;
break;
}
frag_desc->hdr_ptr = rmnet_frag_data_ptr(frag_desc);
/* Remove padding only for csum offload packets.
* Coalesced packets should never have padding.
*/
if (!rmnet_frag_trim(frag_desc, port, len)) {
rc = -EINVAL;
break;
}
list_del_init(&frag_desc->list);
list_add_tail(&frag_desc->list, list);
break;
default:
//qmap_hex_dump(__func__, rmnet_frag_data_ptr(frag_desc), 64);
rc = -EINVAL;
break;
}
return rc;
}
static void
__rmnet_frag_ingress_handler(struct rmnet_frag_descriptor *frag_desc,
struct rmnet_port *port)
{
struct rmnet_map_header *qmap;
struct rmnet_endpoint *ep;
struct rmnet_frag_descriptor *frag, *tmp;
LIST_HEAD(segs);
u16 len, pad;
u8 mux_id;
qmap = (struct rmnet_map_header *)skb_frag_address(&frag_desc->frag);
mux_id = qmap->mux_id;
pad = qmap->pad_len;
len = ntohs(qmap->pkt_len) - pad;
if (qmap->cd_bit) {
if (port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER) {
//rmnet_frag_flow_command(qmap, port, len);
goto recycle;
}
if (port->data_format & RMNET_FLAGS_INGRESS_MAP_COMMANDS)
rmnet_frag_command(qmap, port);
goto recycle;
}
if (mux_id >= RMNET_MAX_LOGICAL_EP)
goto recycle;
ep = rmnet_get_endpoint(port, mux_id);
if (!ep)
goto recycle;
frag_desc->dev = ep->egress_dev;
/* Handle QMAPv5 packet */
if (qmap->next_hdr &&
(port->data_format & (RMNET_FLAGS_INGRESS_COALESCE |
RMNET_FLAGS_INGRESS_MAP_CKSUMV5))) {
if (rmnet_frag_process_next_hdr_packet(frag_desc, port, &segs,
len))
goto recycle;
} else {
/* We only have the main QMAP header to worry about */
if (!rmnet_frag_pull(frag_desc, port, sizeof(*qmap)))
return;
frag_desc->hdr_ptr = rmnet_frag_data_ptr(frag_desc);
if (!rmnet_frag_trim(frag_desc, port, len))
return;
list_add_tail(&frag_desc->list, &segs);
}
list_for_each_entry_safe(frag, tmp, &segs, list) {
list_del_init(&frag->list);
rmnet_frag_deliver(frag, port);
}
return;
recycle:
rmnet_recycle_frag_descriptor(frag_desc, port);
}
static void rmnet_frag_ingress_handler(struct sk_buff *skb,
struct rmnet_port *port)
{
LIST_HEAD(desc_list);
int i = 0;
struct rmnet_nss_cb *nss_cb;
/* Deaggregation and freeing of HW originating
* buffers is done within here
*/
while (skb) {
struct sk_buff *skb_frag;
port->chain_head = NULL;
port->chain_tail = NULL;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
rmnet_frag_deaggregate(&skb_shinfo(skb)->frags[i], port,
&desc_list);
if (!list_empty(&desc_list)) {
struct rmnet_frag_descriptor *frag_desc, *tmp;
list_for_each_entry_safe(frag_desc, tmp,
&desc_list, list) {
list_del_init(&frag_desc->list);
__rmnet_frag_ingress_handler(frag_desc,
port);
}
}
}
nss_cb = rcu_dereference(rmnet_nss_callbacks);
if (nss_cb && port->chain_head) {
port->chain_head->cb[0] = 0;
netif_receive_skb(port->chain_head);
}
skb_frag = skb_shinfo(skb)->frag_list;
skb_shinfo(skb)->frag_list = NULL;
consume_skb(skb);
skb = skb_frag;
}
}
void rmnet_descriptor_deinit(struct rmnet_port *port)
{
struct rmnet_frag_descriptor_pool *pool;
struct rmnet_frag_descriptor *frag_desc, *tmp;
pool = port->frag_desc_pool;
list_for_each_entry_safe(frag_desc, tmp, &pool->free_list, list) {
kfree(frag_desc);
pool->pool_size--;
}
kfree(pool);
}
int rmnet_descriptor_init(struct rmnet_port *port)
{
struct rmnet_frag_descriptor_pool *pool;
int i;
spin_lock_init(&port->desc_pool_lock);
pool = kzalloc(sizeof(*pool), GFP_ATOMIC);
if (!pool)
return -ENOMEM;
INIT_LIST_HEAD(&pool->free_list);
port->frag_desc_pool = pool;
for (i = 0; i < RMNET_FRAG_DESCRIPTOR_POOL_SIZE; i++) {
struct rmnet_frag_descriptor *frag_desc;
frag_desc = kzalloc(sizeof(*frag_desc), GFP_ATOMIC);
if (!frag_desc)
return -ENOMEM;
INIT_LIST_HEAD(&frag_desc->list);
INIT_LIST_HEAD(&frag_desc->sub_frags);
list_add_tail(&frag_desc->list, &pool->free_list);
pool->pool_size++;
}
return 0;
}

View File

@ -0,0 +1,146 @@
/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* RMNET Packet Descriptor Framework
*
*/
#ifndef _RMNET_DESCRIPTOR_H_
#define _RMNET_DESCRIPTOR_H_
#include <linux/netdevice.h>
#include <linux/list.h>
#include <linux/skbuff.h>
#include "rmnet_config.h"
#include "rmnet_map.h"
struct rmnet_frag_descriptor_pool {
struct list_head free_list;
u32 pool_size;
};
struct rmnet_frag_descriptor {
struct list_head list;
struct list_head sub_frags;
skb_frag_t frag;
u8 *hdr_ptr;
struct net_device *dev;
u32 hash;
__be32 tcp_seq;
__be16 ip_id;
u16 data_offset;
u16 gso_size;
u16 gso_segs;
u16 ip_len;
u16 trans_len;
u8 ip_proto;
u8 trans_proto;
u8 pkt_id;
u8 csum_valid:1,
hdrs_valid:1,
ip_id_set:1,
tcp_seq_set:1,
flush_shs:1,
reserved:3;
};
/* Descriptor management */
static struct rmnet_frag_descriptor *
rmnet_get_frag_descriptor(struct rmnet_port *port);
static void rmnet_recycle_frag_descriptor(struct rmnet_frag_descriptor *frag_desc,
struct rmnet_port *port);
static void rmnet_descriptor_add_frag(struct rmnet_port *port, struct list_head *list,
struct page *p, u32 page_offset, u32 len);
/* QMAP command packets */
/* Ingress data handlers */
static void rmnet_frag_deaggregate(skb_frag_t *frag, struct rmnet_port *port,
struct list_head *list);
static void rmnet_frag_deliver(struct rmnet_frag_descriptor *frag_desc,
struct rmnet_port *port);
static int rmnet_frag_process_next_hdr_packet(struct rmnet_frag_descriptor *frag_desc,
struct rmnet_port *port,
struct list_head *list,
u16 len);
static void rmnet_frag_ingress_handler(struct sk_buff *skb,
struct rmnet_port *port);
static int rmnet_descriptor_init(struct rmnet_port *port);
static void rmnet_descriptor_deinit(struct rmnet_port *port);
static inline void *rmnet_frag_data_ptr(struct rmnet_frag_descriptor *frag_desc)
{
return skb_frag_address(&frag_desc->frag);
}
static inline void *rmnet_frag_pull(struct rmnet_frag_descriptor *frag_desc,
struct rmnet_port *port,
unsigned int size)
{
if (size >= skb_frag_size(&frag_desc->frag)) {
pr_info("%s(): Pulling %u bytes from %u byte pkt. Dropping\n",
__func__, size, skb_frag_size(&frag_desc->frag));
rmnet_recycle_frag_descriptor(frag_desc, port);
return NULL;
}
frag_desc->frag.page_offset += size;
skb_frag_size_sub(&frag_desc->frag, size);
return rmnet_frag_data_ptr(frag_desc);
}
static inline void *rmnet_frag_trim(struct rmnet_frag_descriptor *frag_desc,
struct rmnet_port *port,
unsigned int size)
{
if (!size) {
pr_info("%s(): Trimming %u byte pkt to 0. Dropping\n",
__func__, skb_frag_size(&frag_desc->frag));
rmnet_recycle_frag_descriptor(frag_desc, port);
return NULL;
}
if (size < skb_frag_size(&frag_desc->frag))
skb_frag_size_set(&frag_desc->frag, size);
return rmnet_frag_data_ptr(frag_desc);
}
static inline void rmnet_frag_fill(struct rmnet_frag_descriptor *frag_desc,
struct page *p, u32 page_offset, u32 len)
{
get_page(p);
__skb_frag_set_page(&frag_desc->frag, p);
skb_frag_size_set(&frag_desc->frag, len);
frag_desc->frag.page_offset = page_offset;
}
static inline u8
rmnet_frag_get_next_hdr_type(struct rmnet_frag_descriptor *frag_desc)
{
unsigned char *data = rmnet_frag_data_ptr(frag_desc);
data += sizeof(struct rmnet_map_header);
return ((struct rmnet_map_v5_coal_header *)data)->header_type;
}
static inline bool
rmnet_frag_get_csum_valid(struct rmnet_frag_descriptor *frag_desc)
{
unsigned char *data = rmnet_frag_data_ptr(frag_desc);
data += sizeof(struct rmnet_map_header);
return ((struct rmnet_map_v5_csum_header *)data)->csum_valid_required;
}
#endif /* _RMNET_DESCRIPTOR_H_ */

View File

@ -0,0 +1,374 @@
/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* RMNET Data ingress/egress handler
*
*/
#include <linux/netdevice.h>
#include <linux/netdev_features.h>
#include <linux/if_arp.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <net/sock.h>
#include <linux/tracepoint.h>
#include "rmnet_private.h"
#include "rmnet_config.h"
#include "rmnet_vnd.h"
#include "rmnet_map.h"
#include "rmnet_handlers.h"
#include "rmnet_descriptor.h"
#define RMNET_IP_VERSION_4 0x40
#define RMNET_IP_VERSION_6 0x60
/* Helper Functions */
static void rmnet_set_skb_proto(struct sk_buff *skb)
{
switch (rmnet_map_data_ptr(skb)[0] & 0xF0) {
case RMNET_IP_VERSION_4:
skb->protocol = htons(ETH_P_IP);
break;
case RMNET_IP_VERSION_6:
skb->protocol = htons(ETH_P_IPV6);
break;
default:
skb->protocol = htons(ETH_P_MAP);
break;
}
}
/* Generic handler */
static void
rmnet_deliver_skb(struct sk_buff *skb, struct rmnet_port *port)
{
struct rmnet_nss_cb *nss_cb;
rmnet_vnd_rx_fixup(skb->dev, skb->len);
/* Pass off the packet to NSS driver if we can */
nss_cb = rcu_dereference(rmnet_nss_callbacks);
if (nss_cb) {
if (!port->chain_head)
port->chain_head = skb;
else
skb_shinfo(port->chain_tail)->frag_list = skb;
port->chain_tail = skb;
return;
}
skb_reset_transport_header(skb);
skb_reset_network_header(skb);
skb->pkt_type = PACKET_HOST;
skb_set_mac_header(skb, 0);
//if (port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER) {
//} else {
//if (!rmnet_check_skb_can_gro(skb))
// gro_cells_receive(&priv->gro_cells, skb);
//else
netif_receive_skb(skb);
//}
}
/* Deliver a list of skbs after undoing coalescing */
static void rmnet_deliver_skb_list(struct sk_buff_head *head,
struct rmnet_port *port)
{
struct sk_buff *skb;
while ((skb = __skb_dequeue(head))) {
rmnet_set_skb_proto(skb);
rmnet_deliver_skb(skb, port);
}
}
/* MAP handler */
static void
_rmnet_map_ingress_handler(struct sk_buff *skb,
struct rmnet_port *port)
{
struct rmnet_map_header *qmap;
struct rmnet_endpoint *ep;
struct sk_buff_head list;
u16 len, pad;
u8 mux_id;
/* We don't need the spinlock since only we touch this */
__skb_queue_head_init(&list);
qmap = (struct rmnet_map_header *)rmnet_map_data_ptr(skb);
if (qmap->cd_bit) {
if (port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER) {
//if (!rmnet_map_flow_command(skb, port, false))
return;
}
if (port->data_format & RMNET_FLAGS_INGRESS_MAP_COMMANDS)
return rmnet_map_command(skb, port);
goto free_skb;
}
mux_id = qmap->mux_id;
pad = qmap->pad_len;
len = ntohs(qmap->pkt_len) - pad;
if (mux_id >= RMNET_MAX_LOGICAL_EP)
goto free_skb;
ep = rmnet_get_endpoint(port, mux_id);
if (!ep)
goto free_skb;
skb->dev = ep->egress_dev;
/* Handle QMAPv5 packet */
if (qmap->next_hdr &&
(port->data_format & (RMNET_FLAGS_INGRESS_COALESCE |
RMNET_FLAGS_INGRESS_MAP_CKSUMV5))) {
if (rmnet_map_process_next_hdr_packet(skb, &list, len))
goto free_skb;
} else {
/* We only have the main QMAP header to worry about */
pskb_pull(skb, sizeof(*qmap));
rmnet_set_skb_proto(skb);
if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) {
//if (!rmnet_map_checksum_downlink_packet(skb, len + pad))
// skb->ip_summed = CHECKSUM_UNNECESSARY;
}
pskb_trim(skb, len);
/* Push the single packet onto the list */
__skb_queue_tail(&list, skb);
}
rmnet_deliver_skb_list(&list, port);
return;
free_skb:
kfree_skb(skb);
}
static void
rmnet_map_ingress_handler(struct sk_buff *skb,
struct rmnet_port *port)
{
struct sk_buff *skbn;
if (port->data_format & (RMNET_FLAGS_INGRESS_COALESCE |
RMNET_FLAGS_INGRESS_MAP_CKSUMV5)) {
if (skb_is_nonlinear(skb)) {
rmnet_frag_ingress_handler(skb, port);
return;
}
}
/* Deaggregation and freeing of HW originating
* buffers is done within here
*/
while (skb) {
struct sk_buff *skb_frag = skb_shinfo(skb)->frag_list;
skb_shinfo(skb)->frag_list = NULL;
while ((skbn = rmnet_map_deaggregate(skb, port)) != NULL) {
_rmnet_map_ingress_handler(skbn, port);
if (skbn == skb)
goto next_skb;
}
consume_skb(skb);
next_skb:
skb = skb_frag;
}
}
static int rmnet_map_egress_handler(struct sk_buff *skb,
struct rmnet_port *port, u8 mux_id,
struct net_device *orig_dev)
{
int required_headroom, additional_header_len, csum_type;
struct rmnet_map_header *map_header;
additional_header_len = 0;
required_headroom = sizeof(struct rmnet_map_header);
csum_type = 0;
if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4) {
additional_header_len = sizeof(struct rmnet_map_ul_csum_header);
csum_type = RMNET_FLAGS_EGRESS_MAP_CKSUMV4;
} else if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV5) {
additional_header_len = sizeof(struct rmnet_map_v5_csum_header);
csum_type = RMNET_FLAGS_EGRESS_MAP_CKSUMV5;
}
required_headroom += additional_header_len;
if (skb_headroom(skb) < required_headroom) {
if (pskb_expand_head(skb, required_headroom, 0, GFP_ATOMIC))
return -ENOMEM;
}
if (csum_type)
rmnet_map_checksum_uplink_packet(skb, orig_dev, csum_type);
map_header = rmnet_map_add_map_header(skb, additional_header_len, 0,
port);
if (!map_header)
return -ENOMEM;
map_header->mux_id = mux_id;
if (port->data_format & RMNET_EGRESS_FORMAT_AGGREGATION) {
if (rmnet_map_tx_agg_skip(skb, required_headroom))
goto done;
rmnet_map_tx_aggregate(skb, port);
return -EINPROGRESS;
}
done:
skb->protocol = htons(ETH_P_MAP);
return 0;
}
static void
rmnet_bridge_handler(struct sk_buff *skb, struct net_device *bridge_dev)
{
if (bridge_dev) {
skb->dev = bridge_dev;
dev_queue_xmit(skb);
}
}
/* Ingress / Egress Entry Points */
/* Processes packet as per ingress data format for receiving device. Logical
* endpoint is determined from packet inspection. Packet is then sent to the
* egress device listed in the logical endpoint configuration.
*/
static rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
{
struct sk_buff *skb = *pskb;
struct rmnet_port *port;
struct net_device *dev;
if (!skb)
goto done;
if (skb->pkt_type == PACKET_LOOPBACK)
return RX_HANDLER_PASS;
dev = skb->dev;
port = rmnet_get_port(dev);
port->chain_head = NULL;
port->chain_tail = NULL;
switch (port->rmnet_mode) {
case RMNET_EPMODE_VND:
rmnet_map_ingress_handler(skb, port);
break;
case RMNET_EPMODE_BRIDGE:
rmnet_bridge_handler(skb, port->bridge_ep);
break;
}
done:
return RX_HANDLER_CONSUMED;
}
static rx_handler_result_t rmnet_rx_priv_handler(struct sk_buff **pskb)
{
struct sk_buff *skb = *pskb;
struct rmnet_nss_cb *nss_cb;
if (!skb)
return RX_HANDLER_CONSUMED;
if (nss_debug) printk("%s skb=%p, len=%d, protocol=%x, hdr_len=%d\n", __func__, skb, skb->len, skb->protocol, skb->hdr_len);
if (skb->pkt_type == PACKET_LOOPBACK)
return RX_HANDLER_PASS;
/* Check this so that we dont loop around netif_receive_skb */
if (skb->cb[0] == 1) {
skb->cb[0] = 0;
skb->dev->stats.rx_packets++;
return RX_HANDLER_PASS;
}
while (skb) {
struct sk_buff *skb_frag = skb_shinfo(skb)->frag_list;
skb_shinfo(skb)->frag_list = NULL;
nss_cb = rcu_dereference(rmnet_nss_callbacks);
if (nss_cb)
nss_cb->nss_tx(skb);
skb = skb_frag;
}
return RX_HANDLER_CONSUMED;
}
/* Modifies packet as per logical endpoint configuration and egress data format
* for egress device configured in logical endpoint. Packet is then transmitted
* on the egress device.
*/
static void rmnet_egress_handler(struct sk_buff *skb)
{
struct net_device *orig_dev;
struct rmnet_port *port;
struct rmnet_priv *priv;
u8 mux_id;
int err;
u32 skb_len;
skb_orphan(skb);
orig_dev = skb->dev;
priv = netdev_priv(orig_dev);
skb->dev = priv->real_dev;
mux_id = priv->mux_id;
port = rmnet_get_port(skb->dev);
if (!port)
goto drop;
skb_len = skb->len;
err = rmnet_map_egress_handler(skb, port, mux_id, orig_dev);
if (err == -ENOMEM)
goto drop;
else if (err == -EINPROGRESS) {
rmnet_vnd_tx_fixup(orig_dev, skb_len);
return;
}
rmnet_vnd_tx_fixup(orig_dev, skb_len);
dev_queue_xmit(skb);
return;
drop:
this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
kfree_skb(skb);
}

View File

@ -0,0 +1,32 @@
/* Copyright (c) 2013, 2016-2017, 2019
* The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* RMNET Data ingress/egress handler
*
*/
#ifndef _RMNET_HANDLERS_H_
#define _RMNET_HANDLERS_H_
#include "rmnet_config.h"
enum rmnet_packet_context {
RMNET_NET_RX_CTX,
RMNET_WQ_CTX,
};
static void rmnet_egress_handler(struct sk_buff *skb);
static void rmnet_deliver_skb(struct sk_buff *skb, struct rmnet_port *port);
static void rmnet_set_skb_proto(struct sk_buff *skb);
static rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb);
static rx_handler_result_t rmnet_rx_priv_handler(struct sk_buff **pskb);
#endif /* _RMNET_HANDLERS_H_ */

View File

@ -0,0 +1,272 @@
/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _RMNET_MAP_H_
#define _RMNET_MAP_H_
#include <linux/skbuff.h>
#include "rmnet_config.h"
struct rmnet_map_control_command {
u8 command_name;
u8 cmd_type:2;
u8 reserved:6;
u16 reserved2;
u32 transaction_id;
union {
struct {
u16 ip_family:2;
u16 reserved:14;
__be16 flow_control_seq_num;
__be32 qos_id;
} flow_control;
u8 data[0];
};
} __aligned(1);
enum rmnet_map_commands {
RMNET_MAP_COMMAND_NONE,
RMNET_MAP_COMMAND_FLOW_DISABLE,
RMNET_MAP_COMMAND_FLOW_ENABLE,
RMNET_MAP_COMMAND_FLOW_START = 7,
RMNET_MAP_COMMAND_FLOW_END = 8,
/* These should always be the last 2 elements */
RMNET_MAP_COMMAND_UNKNOWN,
RMNET_MAP_COMMAND_ENUM_LENGTH
};
enum rmnet_map_v5_header_type {
RMNET_MAP_HEADER_TYPE_UNKNOWN,
RMNET_MAP_HEADER_TYPE_COALESCING = 0x1,
RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD = 0x2,
RMNET_MAP_HEADER_TYPE_ENUM_LENGTH
};
enum rmnet_map_v5_close_type {
RMNET_MAP_COAL_CLOSE_NON_COAL,
RMNET_MAP_COAL_CLOSE_IP_MISS,
RMNET_MAP_COAL_CLOSE_TRANS_MISS,
RMNET_MAP_COAL_CLOSE_HW,
RMNET_MAP_COAL_CLOSE_COAL,
};
enum rmnet_map_v5_close_value {
RMNET_MAP_COAL_CLOSE_HW_NL,
RMNET_MAP_COAL_CLOSE_HW_PKT,
RMNET_MAP_COAL_CLOSE_HW_BYTE,
RMNET_MAP_COAL_CLOSE_HW_TIME,
RMNET_MAP_COAL_CLOSE_HW_EVICT,
};
/* Main QMAP header */
struct rmnet_map_header {
u8 pad_len:6;
u8 next_hdr:1;
u8 cd_bit:1;
u8 mux_id;
__be16 pkt_len;
} __aligned(1);
/* QMAP v5 headers */
struct rmnet_map_v5_csum_header {
u8 next_hdr:1;
u8 header_type:7;
u8 hw_reserved:7;
u8 csum_valid_required:1;
__be16 reserved;
} __aligned(1);
struct rmnet_map_v5_nl_pair {
__be16 pkt_len;
u8 csum_error_bitmap;
u8 num_packets;
} __aligned(1);
/* NLO: Number-length object */
#define RMNET_MAP_V5_MAX_NLOS (6)
#define RMNET_MAP_V5_MAX_PACKETS (48)
struct rmnet_map_v5_coal_header {
u8 next_hdr:1;
u8 header_type:7;
u8 reserved1:4;
u8 num_nlos:3;
u8 csum_valid:1;
u8 close_type:4;
u8 close_value:4;
u8 reserved2:4;
u8 virtual_channel_id:4;
struct rmnet_map_v5_nl_pair nl_pairs[RMNET_MAP_V5_MAX_NLOS];
} __aligned(1);
/* QMAP v4 headers */
struct rmnet_map_dl_csum_trailer {
u8 reserved1;
u8 valid:1;
u8 reserved2:7;
u16 csum_start_offset;
u16 csum_length;
__be16 csum_value;
} __aligned(1);
struct rmnet_map_ul_csum_header {
__be16 csum_start_offset;
u16 csum_insert_offset:14;
u16 udp_ind:1;
u16 csum_enabled:1;
} __aligned(1);
struct rmnet_map_control_command_header {
u8 command_name;
u8 cmd_type:2;
u8 reserved:5;
u8 e:1;
u16 source_id:15;
u16 ext:1;
u32 transaction_id;
} __aligned(1);
struct rmnet_map_flow_info_le {
__be32 mux_id;
__be32 flow_id;
__be32 bytes;
__be32 pkts;
} __aligned(1);
struct rmnet_map_flow_info_be {
u32 mux_id;
u32 flow_id;
u32 bytes;
u32 pkts;
} __aligned(1);
struct rmnet_map_dl_ind_hdr {
union {
struct {
u32 seq;
u32 bytes;
u32 pkts;
u32 flows;
struct rmnet_map_flow_info_le flow[0];
} le __aligned(1);
struct {
__be32 seq;
__be32 bytes;
__be32 pkts;
__be32 flows;
struct rmnet_map_flow_info_be flow[0];
} be __aligned(1);
} __aligned(1);
} __aligned(1);
struct rmnet_map_dl_ind_trl {
union {
__be32 seq_be;
u32 seq_le;
} __aligned(1);
} __aligned(1);
struct rmnet_map_dl_ind {
u8 priority;
union {
void (*dl_hdr_handler)(struct rmnet_map_dl_ind_hdr *);
void (*dl_hdr_handler_v2)(struct rmnet_map_dl_ind_hdr *,
struct
rmnet_map_control_command_header *);
} __aligned(1);
union {
void (*dl_trl_handler)(struct rmnet_map_dl_ind_trl *);
void (*dl_trl_handler_v2)(struct rmnet_map_dl_ind_trl *,
struct
rmnet_map_control_command_header *);
} __aligned(1);
struct list_head list;
};
#define RMNET_MAP_GET_MUX_ID(Y) (((struct rmnet_map_header *) \
(Y)->data)->mux_id)
#define RMNET_MAP_GET_CD_BIT(Y) (((struct rmnet_map_header *) \
(Y)->data)->cd_bit)
#define RMNET_MAP_GET_PAD(Y) (((struct rmnet_map_header *) \
(Y)->data)->pad_len)
#define RMNET_MAP_GET_CMD_START(Y) ((struct rmnet_map_control_command *) \
((Y)->data + \
sizeof(struct rmnet_map_header)))
#define RMNET_MAP_GET_LENGTH(Y) (ntohs(((struct rmnet_map_header *) \
(Y)->data)->pkt_len))
#define RMNET_MAP_DEAGGR_SPACING 64
#define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2)
#define RMNET_MAP_DESC_HEADROOM 128
#define RMNET_MAP_COMMAND_REQUEST 0
#define RMNET_MAP_COMMAND_ACK 1
#define RMNET_MAP_COMMAND_UNSUPPORTED 2
#define RMNET_MAP_COMMAND_INVALID 3
#define RMNET_MAP_NO_PAD_BYTES 0
#define RMNET_MAP_ADD_PAD_BYTES 1
static inline unsigned char *rmnet_map_data_ptr(struct sk_buff *skb)
{
/* Nonlinear packets we receive are entirely within frag 0 */
if (skb_is_nonlinear(skb) && skb->len == skb->data_len)
return skb_frag_address(skb_shinfo(skb)->frags);
return skb->data;
}
static inline struct rmnet_map_control_command *
rmnet_map_get_cmd_start(struct sk_buff *skb)
{
unsigned char *data = rmnet_map_data_ptr(skb);
data += sizeof(struct rmnet_map_header);
return (struct rmnet_map_control_command *)data;
}
static inline u8 rmnet_map_get_next_hdr_type(struct sk_buff *skb)
{
unsigned char *data = rmnet_map_data_ptr(skb);
data += sizeof(struct rmnet_map_header);
return ((struct rmnet_map_v5_coal_header *)data)->header_type;
}
static inline bool rmnet_map_get_csum_valid(struct sk_buff *skb)
{
unsigned char *data = rmnet_map_data_ptr(skb);
data += sizeof(struct rmnet_map_header);
return ((struct rmnet_map_v5_csum_header *)data)->csum_valid_required;
}
static struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
struct rmnet_port *port);
static struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
int hdrlen, int pad,
struct rmnet_port *port);
static void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port);
static void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
struct net_device *orig_dev,
int csum_type);
static int rmnet_map_process_next_hdr_packet(struct sk_buff *skb,
struct sk_buff_head *list,
u16 len);
static int rmnet_map_tx_agg_skip(struct sk_buff *skb, int offset);
static void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port);
static void rmnet_map_tx_aggregate_init(struct rmnet_port *port);
static void rmnet_map_tx_aggregate_exit(struct rmnet_port *port);
static void rmnet_map_cmd_init(struct rmnet_port *port);
static void rmnet_map_cmd_exit(struct rmnet_port *port);
#endif /* _RMNET_MAP_H_ */

View File

@ -0,0 +1,143 @@
/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/netdevice.h>
#include "rmnet_config.h"
#include "rmnet_map.h"
#include "rmnet_private.h"
#include "rmnet_vnd.h"
#define RMNET_DL_IND_HDR_SIZE (sizeof(struct rmnet_map_dl_ind_hdr) + \
sizeof(struct rmnet_map_header) + \
sizeof(struct rmnet_map_control_command_header))
#define RMNET_MAP_CMD_SIZE (sizeof(struct rmnet_map_header) + \
sizeof(struct rmnet_map_control_command_header))
#define RMNET_DL_IND_TRL_SIZE (sizeof(struct rmnet_map_dl_ind_trl) + \
sizeof(struct rmnet_map_header) + \
sizeof(struct rmnet_map_control_command_header))
static u8 rmnet_map_do_flow_control(struct sk_buff *skb,
struct rmnet_port *port,
int enable)
{
struct rmnet_map_header *qmap;
struct rmnet_map_control_command *cmd;
struct rmnet_endpoint *ep;
struct net_device *vnd;
u16 ip_family;
u16 fc_seq;
u32 qos_id;
u8 mux_id;
int r;
qmap = (struct rmnet_map_header *)rmnet_map_data_ptr(skb);
mux_id = qmap->mux_id;
cmd = rmnet_map_get_cmd_start(skb);
if (mux_id >= RMNET_MAX_LOGICAL_EP) {
kfree_skb(skb);
return RX_HANDLER_CONSUMED;
}
ep = rmnet_get_endpoint(port, mux_id);
if (!ep) {
kfree_skb(skb);
return RX_HANDLER_CONSUMED;
}
vnd = ep->egress_dev;
ip_family = cmd->flow_control.ip_family;
fc_seq = ntohs(cmd->flow_control.flow_control_seq_num);
qos_id = ntohl(cmd->flow_control.qos_id);
/* Ignore the ip family and pass the sequence number for both v4 and v6
* sequence. User space does not support creating dedicated flows for
* the 2 protocols
*/
r = rmnet_vnd_do_flow_control(vnd, enable);
if (r) {
kfree_skb(skb);
return RMNET_MAP_COMMAND_UNSUPPORTED;
} else {
return RMNET_MAP_COMMAND_ACK;
}
}
static void rmnet_map_send_ack(struct sk_buff *skb,
unsigned char type,
struct rmnet_port *port)
{
struct rmnet_map_control_command *cmd;
struct net_device *dev = skb->dev;
if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
pskb_trim(skb,
skb->len - sizeof(struct rmnet_map_dl_csum_trailer));
skb->protocol = htons(ETH_P_MAP);
cmd = rmnet_map_get_cmd_start(skb);
cmd->cmd_type = type & 0x03;
netif_tx_lock(dev);
dev->netdev_ops->ndo_start_xmit(skb, dev);
netif_tx_unlock(dev);
}
/* Process MAP command frame and send N/ACK message as appropriate. Message cmd
* name is decoded here and appropriate handler is called.
*/
static void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port)
{
struct rmnet_map_control_command *cmd;
unsigned char command_name;
unsigned char rc = 0;
cmd = rmnet_map_get_cmd_start(skb);
command_name = cmd->command_name;
switch (command_name) {
case RMNET_MAP_COMMAND_FLOW_ENABLE:
rc = rmnet_map_do_flow_control(skb, port, 1);
break;
case RMNET_MAP_COMMAND_FLOW_DISABLE:
rc = rmnet_map_do_flow_control(skb, port, 0);
break;
default:
rc = RMNET_MAP_COMMAND_UNSUPPORTED;
kfree_skb(skb);
break;
}
if (rc == RMNET_MAP_COMMAND_ACK)
rmnet_map_send_ack(skb, rc, port);
}
static void rmnet_map_cmd_exit(struct rmnet_port *port)
{
struct rmnet_map_dl_ind *tmp, *idx;
list_for_each_entry_safe(tmp, idx, &port->dl_list, list)
list_del_rcu(&tmp->list);
}
static void rmnet_map_cmd_init(struct rmnet_port *port)
{
INIT_LIST_HEAD(&port->dl_list);
port->dl_marker_flush = -1;
}

View File

@ -0,0 +1,682 @@
/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* RMNET Data MAP protocol
*
*/
#include <linux/netdevice.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <net/ip6_checksum.h>
#include "rmnet_config.h"
#include "rmnet_map.h"
#include "rmnet_private.h"
#include "rmnet_handlers.h"
#define RMNET_MAP_PKT_COPY_THRESHOLD 64
#define RMNET_MAP_DEAGGR_SPACING 64
#define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2)
struct rmnet_map_coal_metadata {
void *ip_header;
void *trans_header;
u16 ip_len;
u16 trans_len;
u16 data_offset;
u16 data_len;
u8 ip_proto;
u8 trans_proto;
u8 pkt_id;
u8 pkt_count;
};
static __sum16 *rmnet_map_get_csum_field(unsigned char protocol,
const void *txporthdr)
{
__sum16 *check = NULL;
switch (protocol) {
case IPPROTO_TCP:
check = &(((struct tcphdr *)txporthdr)->check);
break;
case IPPROTO_UDP:
check = &(((struct udphdr *)txporthdr)->check);
break;
default:
check = NULL;
break;
}
return check;
}
static void rmnet_map_complement_ipv4_txporthdr_csum_field(void *iphdr)
{
struct iphdr *ip4h = (struct iphdr *)iphdr;
void *txphdr;
u16 *csum;
txphdr = iphdr + ip4h->ihl * 4;
if (ip4h->protocol == IPPROTO_TCP || ip4h->protocol == IPPROTO_UDP) {
csum = (u16 *)rmnet_map_get_csum_field(ip4h->protocol, txphdr);
*csum = ~(*csum);
}
}
static void
rmnet_map_ipv4_ul_csum_header(void *iphdr,
struct rmnet_map_ul_csum_header *ul_header,
struct sk_buff *skb)
{
struct iphdr *ip4h = (struct iphdr *)iphdr;
__be16 *hdr = (__be16 *)ul_header, offset;
offset = htons((__force u16)(skb_transport_header(skb) -
(unsigned char *)iphdr));
ul_header->csum_start_offset = offset;
ul_header->csum_insert_offset = skb->csum_offset;
ul_header->csum_enabled = 1;
if (ip4h->protocol == IPPROTO_UDP)
ul_header->udp_ind = 1;
else
ul_header->udp_ind = 0;
/* Changing remaining fields to network order */
hdr++;
*hdr = htons((__force u16)*hdr);
skb->ip_summed = CHECKSUM_NONE;
rmnet_map_complement_ipv4_txporthdr_csum_field(iphdr);
}
#if IS_ENABLED(CONFIG_IPV6)
static void rmnet_map_complement_ipv6_txporthdr_csum_field(void *ip6hdr)
{
struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr;
void *txphdr;
u16 *csum;
txphdr = ip6hdr + sizeof(struct ipv6hdr);
if (ip6h->nexthdr == IPPROTO_TCP || ip6h->nexthdr == IPPROTO_UDP) {
csum = (u16 *)rmnet_map_get_csum_field(ip6h->nexthdr, txphdr);
*csum = ~(*csum);
}
}
static void
rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
struct rmnet_map_ul_csum_header *ul_header,
struct sk_buff *skb)
{
struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr;
__be16 *hdr = (__be16 *)ul_header, offset;
offset = htons((__force u16)(skb_transport_header(skb) -
(unsigned char *)ip6hdr));
ul_header->csum_start_offset = offset;
ul_header->csum_insert_offset = skb->csum_offset;
ul_header->csum_enabled = 1;
if (ip6h->nexthdr == IPPROTO_UDP)
ul_header->udp_ind = 1;
else
ul_header->udp_ind = 0;
/* Changing remaining fields to network order */
hdr++;
*hdr = htons((__force u16)*hdr);
skb->ip_summed = CHECKSUM_NONE;
rmnet_map_complement_ipv6_txporthdr_csum_field(ip6hdr);
}
#endif
/* Adds MAP header to front of skb->data
* Padding is calculated and set appropriately in MAP header. Mux ID is
* initialized to 0.
*/
static struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
int hdrlen, int pad,
struct rmnet_port *port)
{
struct rmnet_map_header *map_header;
u32 padding, map_datalen;
u8 *padbytes;
map_datalen = skb->len - hdrlen;
map_header = (struct rmnet_map_header *)
skb_push(skb, sizeof(struct rmnet_map_header));
memset(map_header, 0, sizeof(struct rmnet_map_header));
/* Set next_hdr bit for csum offload packets */
if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV5)
map_header->next_hdr = 1;
if (pad == RMNET_MAP_NO_PAD_BYTES) {
map_header->pkt_len = htons(map_datalen);
return map_header;
}
padding = ALIGN(map_datalen, 4) - map_datalen;
if (padding == 0)
goto done;
if (skb_tailroom(skb) < padding)
return NULL;
padbytes = (u8 *)skb_put(skb, padding);
memset(padbytes, 0, padding);
done:
map_header->pkt_len = htons(map_datalen + padding);
map_header->pad_len = padding & 0x3F;
return map_header;
}
/* Deaggregates a single packet
* A whole new buffer is allocated for each portion of an aggregated frame.
* Caller should keep calling deaggregate() on the source skb until 0 is
* returned, indicating that there are no more packets to deaggregate. Caller
* is responsible for freeing the original skb.
*/
static struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
struct rmnet_port *port)
{
struct rmnet_map_header *maph;
struct sk_buff *skbn;
unsigned char *data = rmnet_map_data_ptr(skb), *next_hdr = NULL;
u32 packet_len;
if (skb->len == 0)
return NULL;
maph = (struct rmnet_map_header *)data;
packet_len = ntohs(maph->pkt_len) + sizeof(struct rmnet_map_header);
if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
packet_len += sizeof(struct rmnet_map_dl_csum_trailer);
else if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV5) {
if (!maph->cd_bit) {
packet_len += sizeof(struct rmnet_map_v5_csum_header);
/* Coalescing headers require MAPv5 */
next_hdr = data + sizeof(*maph);
}
}
if (((int)skb->len - (int)packet_len) < 0)
return NULL;
/* Some hardware can send us empty frames. Catch them */
if (ntohs(maph->pkt_len) == 0)
return NULL;
if (next_hdr &&
((struct rmnet_map_v5_coal_header *)next_hdr)->header_type ==
RMNET_MAP_HEADER_TYPE_COALESCING)
return skb;
if (skb_is_nonlinear(skb)) {
skb_frag_t *frag0 = skb_shinfo(skb)->frags;
struct page *page = skb_frag_page(frag0);
skbn = alloc_skb(RMNET_MAP_DEAGGR_HEADROOM, GFP_ATOMIC);
if (!skbn)
return NULL;
skb_append_pagefrags(skbn, page, frag0->page_offset,
packet_len);
skbn->data_len += packet_len;
skbn->len += packet_len;
} else {
skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING,
GFP_ATOMIC);
if (!skbn)
return NULL;
skb_reserve(skbn, RMNET_MAP_DEAGGR_HEADROOM);
skb_put(skbn, packet_len);
memcpy(skbn->data, data, packet_len);
}
pskb_pull(skb, packet_len);
return skbn;
}
static void rmnet_map_v4_checksum_uplink_packet(struct sk_buff *skb,
struct net_device *orig_dev)
{
struct rmnet_priv *priv = netdev_priv(orig_dev);
struct rmnet_map_ul_csum_header *ul_header;
void *iphdr;
ul_header = (struct rmnet_map_ul_csum_header *)
skb_push(skb, sizeof(struct rmnet_map_ul_csum_header));
if (unlikely(!(orig_dev->features &
(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))))
goto sw_csum;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
iphdr = (char *)ul_header +
sizeof(struct rmnet_map_ul_csum_header);
if (skb->protocol == htons(ETH_P_IP)) {
rmnet_map_ipv4_ul_csum_header(iphdr, ul_header, skb);
priv->stats.csum_hw++;
return;
} else if (skb->protocol == htons(ETH_P_IPV6)) {
#if IS_ENABLED(CONFIG_IPV6)
rmnet_map_ipv6_ul_csum_header(iphdr, ul_header, skb);
priv->stats.csum_hw++;
return;
#else
priv->stats.csum_err_invalid_ip_version++;
goto sw_csum;
#endif
} else {
priv->stats.csum_err_invalid_ip_version++;
}
}
sw_csum:
ul_header->csum_start_offset = 0;
ul_header->csum_insert_offset = 0;
ul_header->csum_enabled = 0;
ul_header->udp_ind = 0;
priv->stats.csum_sw++;
}
static void rmnet_map_v5_checksum_uplink_packet(struct sk_buff *skb,
struct net_device *orig_dev)
{
struct rmnet_priv *priv = netdev_priv(orig_dev);
struct rmnet_map_v5_csum_header *ul_header;
ul_header = (struct rmnet_map_v5_csum_header *)
skb_push(skb, sizeof(*ul_header));
memset(ul_header, 0, sizeof(*ul_header));
ul_header->header_type = RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
void *iph = (char *)ul_header + sizeof(*ul_header);
void *trans;
__sum16 *check;
u8 proto;
if (skb->protocol == htons(ETH_P_IP)) {
u16 ip_len = ((struct iphdr *)iph)->ihl * 4;
proto = ((struct iphdr *)iph)->protocol;
trans = iph + ip_len;
} else if (skb->protocol == htons(ETH_P_IPV6)) {
u16 ip_len = sizeof(struct ipv6hdr);
proto = ((struct ipv6hdr *)iph)->nexthdr;
trans = iph + ip_len;
} else {
priv->stats.csum_err_invalid_ip_version++;
goto sw_csum;
}
check = rmnet_map_get_csum_field(proto, trans);
if (check) {
*check = 0;
skb->ip_summed = CHECKSUM_NONE;
/* Ask for checksum offloading */
ul_header->csum_valid_required = 1;
priv->stats.csum_hw++;
return;
}
}
sw_csum:
priv->stats.csum_sw++;
}
/* Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP
* packets that are supported for UL checksum offload.
*/
void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
struct net_device *orig_dev,
int csum_type)
{
switch (csum_type) {
case RMNET_FLAGS_EGRESS_MAP_CKSUMV4:
rmnet_map_v4_checksum_uplink_packet(skb, orig_dev);
break;
case RMNET_FLAGS_EGRESS_MAP_CKSUMV5:
rmnet_map_v5_checksum_uplink_packet(skb, orig_dev);
break;
default:
break;
}
}
static void rmnet_map_move_headers(struct sk_buff *skb)
{
struct iphdr *iph;
u16 ip_len;
u16 trans_len = 0;
u8 proto;
/* This only applies to non-linear SKBs */
if (!skb_is_nonlinear(skb))
return;
iph = (struct iphdr *)rmnet_map_data_ptr(skb);
if (iph->version == 4) {
ip_len = iph->ihl * 4;
proto = iph->protocol;
if (iph->frag_off & htons(IP_OFFSET))
/* No transport header information */
goto pull;
} else if (iph->version == 6) {
struct ipv6hdr *ip6h = (struct ipv6hdr *)iph;
__be16 frag_off;
u8 nexthdr = ip6h->nexthdr;
ip_len = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr,
&frag_off);
if (ip_len < 0)
return;
proto = nexthdr;
} else {
return;
}
if (proto == IPPROTO_TCP) {
struct tcphdr *tp = (struct tcphdr *)((u8 *)iph + ip_len);
trans_len = tp->doff * 4;
} else if (proto == IPPROTO_UDP) {
trans_len = sizeof(struct udphdr);
} else if (proto == NEXTHDR_FRAGMENT) {
/* Non-first fragments don't have the fragment length added by
* ipv6_skip_exthdr() and sho up as proto NEXTHDR_FRAGMENT, so
* we account for the length here.
*/
ip_len += sizeof(struct frag_hdr);
}
pull:
__pskb_pull_tail(skb, ip_len + trans_len);
skb_reset_network_header(skb);
if (trans_len)
skb_set_transport_header(skb, ip_len);
}
/* Process a QMAPv5 packet header */
static int rmnet_map_process_next_hdr_packet(struct sk_buff *skb,
struct sk_buff_head *list,
u16 len)
{
struct rmnet_priv *priv = netdev_priv(skb->dev);
int rc = 0;
switch (rmnet_map_get_next_hdr_type(skb)) {
case RMNET_MAP_HEADER_TYPE_COALESCING:
priv->stats.coal.coal_rx++;
break;
case RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD:
if (rmnet_map_get_csum_valid(skb)) {
priv->stats.csum_ok++;
skb->ip_summed = CHECKSUM_UNNECESSARY;
} else {
priv->stats.csum_valid_unset++;
}
/* Pull unnecessary headers and move the rest to the linear
* section of the skb.
*/
pskb_pull(skb,
(sizeof(struct rmnet_map_header) +
sizeof(struct rmnet_map_v5_csum_header)));
rmnet_map_move_headers(skb);
/* Remove padding only for csum offload packets.
* Coalesced packets should never have padding.
*/
pskb_trim(skb, len);
__skb_queue_tail(list, skb);
break;
default:
rc = -EINVAL;
break;
}
return rc;
}
long rmnet_agg_time_limit __read_mostly = 1000000L;
long rmnet_agg_bypass_time __read_mostly = 10000000L;
static int rmnet_map_tx_agg_skip(struct sk_buff *skb, int offset)
{
u8 *packet_start = skb->data + offset;
int is_icmp = 0;
if (skb->protocol == htons(ETH_P_IP)) {
struct iphdr *ip4h = (struct iphdr *)(packet_start);
if (ip4h->protocol == IPPROTO_ICMP)
is_icmp = 1;
} else if (skb->protocol == htons(ETH_P_IPV6)) {
struct ipv6hdr *ip6h = (struct ipv6hdr *)(packet_start);
if (ip6h->nexthdr == IPPROTO_ICMPV6) {
is_icmp = 1;
} else if (ip6h->nexthdr == NEXTHDR_FRAGMENT) {
struct frag_hdr *frag;
frag = (struct frag_hdr *)(packet_start
+ sizeof(struct ipv6hdr));
if (frag->nexthdr == IPPROTO_ICMPV6)
is_icmp = 1;
}
}
return is_icmp;
}
static void rmnet_map_flush_tx_packet_work(struct work_struct *work)
{
struct sk_buff *skb = NULL;
struct rmnet_port *port;
unsigned long flags;
port = container_of(work, struct rmnet_port, agg_wq);
spin_lock_irqsave(&port->agg_lock, flags);
if (likely(port->agg_state == -EINPROGRESS)) {
/* Buffer may have already been shipped out */
if (likely(port->agg_skb)) {
skb = port->agg_skb;
port->agg_skb = NULL;
port->agg_count = 0;
memset(&port->agg_time, 0, sizeof(struct timespec));
}
port->agg_state = 0;
}
spin_unlock_irqrestore(&port->agg_lock, flags);
if (skb)
dev_queue_xmit(skb);
}
static enum hrtimer_restart rmnet_map_flush_tx_packet_queue(struct hrtimer *t)
{
struct rmnet_port *port;
port = container_of(t, struct rmnet_port, hrtimer);
schedule_work(&port->agg_wq);
return HRTIMER_NORESTART;
}
static void rmnet_map_linearize_copy(struct sk_buff *dst, struct sk_buff *src)
{
unsigned int linear = src->len - src->data_len, target = src->len;
unsigned char *src_buf;
struct sk_buff *skb;
src_buf = src->data;
skb_put_data(dst, src_buf, linear);
target -= linear;
skb = src;
while (target) {
unsigned int i = 0, non_linear = 0;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
non_linear = skb_frag_size(&skb_shinfo(skb)->frags[i]);
src_buf = skb_frag_address(&skb_shinfo(skb)->frags[i]);
skb_put_data(dst, src_buf, non_linear);
target -= non_linear;
}
if (skb_shinfo(skb)->frag_list) {
skb = skb_shinfo(skb)->frag_list;
continue;
}
if (skb->next)
skb = skb->next;
}
}
static void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port)
{
struct timespec diff, last;
int size, agg_count = 0;
struct sk_buff *agg_skb;
unsigned long flags;
new_packet:
spin_lock_irqsave(&port->agg_lock, flags);
memcpy(&last, &port->agg_last, sizeof(struct timespec));
getnstimeofday(&port->agg_last);
if (!port->agg_skb) {
/* Check to see if we should agg first. If the traffic is very
* sparse, don't aggregate. We will need to tune this later
*/
diff = timespec_sub(port->agg_last, last);
size = port->egress_agg_params.agg_size - skb->len;
if (diff.tv_sec > 0 || diff.tv_nsec > rmnet_agg_bypass_time ||
size <= 0) {
spin_unlock_irqrestore(&port->agg_lock, flags);
skb->protocol = htons(ETH_P_MAP);
dev_queue_xmit(skb);
return;
}
port->agg_skb = alloc_skb(port->egress_agg_params.agg_size,
GFP_ATOMIC);
if (!port->agg_skb) {
port->agg_skb = 0;
port->agg_count = 0;
memset(&port->agg_time, 0, sizeof(struct timespec));
spin_unlock_irqrestore(&port->agg_lock, flags);
skb->protocol = htons(ETH_P_MAP);
dev_queue_xmit(skb);
return;
}
rmnet_map_linearize_copy(port->agg_skb, skb);
port->agg_skb->dev = skb->dev;
port->agg_skb->protocol = htons(ETH_P_MAP);
port->agg_count = 1;
getnstimeofday(&port->agg_time);
dev_kfree_skb_any(skb);
goto schedule;
}
diff = timespec_sub(port->agg_last, port->agg_time);
size = port->egress_agg_params.agg_size - port->agg_skb->len;
if (skb->len > size ||
port->agg_count >= port->egress_agg_params.agg_count ||
diff.tv_sec > 0 || diff.tv_nsec > rmnet_agg_time_limit) {
agg_skb = port->agg_skb;
agg_count = port->agg_count;
port->agg_skb = 0;
port->agg_count = 0;
memset(&port->agg_time, 0, sizeof(struct timespec));
port->agg_state = 0;
spin_unlock_irqrestore(&port->agg_lock, flags);
hrtimer_cancel(&port->hrtimer);
dev_queue_xmit(agg_skb);
goto new_packet;
}
rmnet_map_linearize_copy(port->agg_skb, skb);
port->agg_count++;
dev_kfree_skb_any(skb);
schedule:
if (port->agg_state != -EINPROGRESS) {
port->agg_state = -EINPROGRESS;
hrtimer_start(&port->hrtimer,
ns_to_ktime(port->egress_agg_params.agg_time),
HRTIMER_MODE_REL);
}
spin_unlock_irqrestore(&port->agg_lock, flags);
}
static void rmnet_map_tx_aggregate_init(struct rmnet_port *port)
{
hrtimer_init(&port->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
port->hrtimer.function = rmnet_map_flush_tx_packet_queue;
port->egress_agg_params.agg_size = 8192;
port->egress_agg_params.agg_count = 20;
port->egress_agg_params.agg_time = 3000000;
spin_lock_init(&port->agg_lock);
INIT_WORK(&port->agg_wq, rmnet_map_flush_tx_packet_work);
}
static void rmnet_map_tx_aggregate_exit(struct rmnet_port *port)
{
unsigned long flags;
hrtimer_cancel(&port->hrtimer);
cancel_work_sync(&port->agg_wq);
spin_lock_irqsave(&port->agg_lock, flags);
if (port->agg_state == -EINPROGRESS) {
if (port->agg_skb) {
kfree_skb(port->agg_skb);
port->agg_skb = NULL;
port->agg_count = 0;
memset(&port->agg_time, 0, sizeof(struct timespec));
}
port->agg_state = 0;
}
spin_unlock_irqrestore(&port->agg_lock, flags);
}

View File

@ -0,0 +1,34 @@
/* Copyright (c) 2013-2014, 2016-2019 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _RMNET_PRIVATE_H_
#define _RMNET_PRIVATE_H_
#define RMNET_MAX_PACKET_SIZE 16384
#define RMNET_DFLT_PACKET_SIZE 1500
#define RMNET_NEEDED_HEADROOM 16
#define RMNET_TX_QUEUE_LEN 1000
/* Constants */
#define RMNET_EGRESS_FORMAT_AGGREGATION BIT(31)
#define RMNET_INGRESS_FORMAT_DL_MARKER_V1 BIT(30)
#define RMNET_INGRESS_FORMAT_DL_MARKER_V2 BIT(29)
#define RMNET_INGRESS_FORMAT_DL_MARKER (RMNET_INGRESS_FORMAT_DL_MARKER_V1 |\
RMNET_INGRESS_FORMAT_DL_MARKER_V2)
/* Replace skb->dev to a virtual rmnet device and pass up the stack */
#define RMNET_EPMODE_VND (1)
/* Pass the frame directly to another device with dev_queue_xmit() */
#define RMNET_EPMODE_BRIDGE (2)
#endif /* _RMNET_PRIVATE_H_ */

View File

@ -0,0 +1,257 @@
/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM rmnet
#define TRACE_INCLUDE_FILE rmnet_trace
#if !defined(_RMNET_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
#define _RMNET_TRACE_H_
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/tracepoint.h>
/*****************************************************************************/
/* Trace events for rmnet module */
/*****************************************************************************/
DECLARE_EVENT_CLASS
(rmnet_mod_template,
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2),
TP_STRUCT__entry(
__field(u8, func)
__field(u8, evt)
__field(u32, uint1)
__field(u32, uint2)
__field(u64, ulong1)
__field(u64, ulong2)
__field(void *, ptr1)
__field(void *, ptr2)
),
TP_fast_assign(
__entry->func = func;
__entry->evt = evt;
__entry->uint1 = uint1;
__entry->uint2 = uint2;
__entry->ulong1 = ulong1;
__entry->ulong2 = ulong2;
__entry->ptr1 = ptr1;
__entry->ptr2 = ptr2;
),
TP_printk("fun:%u ev:%u u1:%u u2:%u ul1:%llu ul2:%llu p1:0x%pK p2:0x%pK",
__entry->func, __entry->evt,
__entry->uint1, __entry->uint2,
__entry->ulong1, __entry->ulong2,
__entry->ptr1, __entry->ptr2)
)
DEFINE_EVENT
(rmnet_mod_template, rmnet_low,
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2)
);
DEFINE_EVENT
(rmnet_mod_template, rmnet_high,
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2)
);
DEFINE_EVENT
(rmnet_mod_template, rmnet_err,
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2)
);
/*****************************************************************************/
/* Trace events for rmnet_perf module */
/*****************************************************************************/
DEFINE_EVENT
(rmnet_mod_template, rmnet_perf_low,
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2)
);
DEFINE_EVENT
(rmnet_mod_template, rmnet_perf_high,
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2)
);
DEFINE_EVENT
(rmnet_mod_template, rmnet_perf_err,
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2)
);
/*****************************************************************************/
/* Trace events for rmnet_shs module */
/*****************************************************************************/
DEFINE_EVENT
(rmnet_mod_template, rmnet_shs_low,
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2)
);
DEFINE_EVENT
(rmnet_mod_template, rmnet_shs_high,
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2)
);
DEFINE_EVENT
(rmnet_mod_template, rmnet_shs_err,
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2)
);
DEFINE_EVENT
(rmnet_mod_template, rmnet_shs_wq_low,
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2)
);
DEFINE_EVENT
(rmnet_mod_template, rmnet_shs_wq_high,
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2)
);
DEFINE_EVENT
(rmnet_mod_template, rmnet_shs_wq_err,
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2)
);
DECLARE_EVENT_CLASS
(rmnet_freq_template,
TP_PROTO(u8 core, u32 newfreq),
TP_ARGS(core, newfreq),
TP_STRUCT__entry(
__field(u8, core)
__field(u32, newfreq)
),
TP_fast_assign(
__entry->core = core;
__entry->newfreq = newfreq;
),
TP_printk("freq policy core:%u freq floor :%u",
__entry->core, __entry->newfreq)
);
DEFINE_EVENT
(rmnet_freq_template, rmnet_freq_boost,
TP_PROTO(u8 core, u32 newfreq),
TP_ARGS(core, newfreq)
);
DEFINE_EVENT
(rmnet_freq_template, rmnet_freq_reset,
TP_PROTO(u8 core, u32 newfreq),
TP_ARGS(core, newfreq)
);
TRACE_EVENT
(rmnet_freq_update,
TP_PROTO(u8 core, u32 lowfreq, u32 highfreq),
TP_ARGS(core, lowfreq, highfreq),
TP_STRUCT__entry(
__field(u8, core)
__field(u32, lowfreq)
__field(u32, highfreq)
),
TP_fast_assign(
__entry->core = core;
__entry->lowfreq = lowfreq;
__entry->highfreq = highfreq;
),
TP_printk("freq policy update core:%u policy freq floor :%u freq ceil :%u",
__entry->core, __entry->lowfreq, __entry->highfreq)
);
#endif /* _RMNET_TRACE_H_ */
/* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH ../drivers/net/ethernet/qualcomm/rmnet
#include <trace/define_trace.h>

View File

@ -0,0 +1,382 @@
/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* RMNET Data virtual network driver
*
*/
#include <linux/etherdevice.h>
#include <linux/if_arp.h>
#include <linux/ip.h>
#include <net/pkt_sched.h>
#include "rmnet_config.h"
#include "rmnet_handlers.h"
#include "rmnet_private.h"
#include "rmnet_map.h"
#include "rmnet_vnd.h"
/* RX/TX Fixup */
static void rmnet_vnd_rx_fixup(struct net_device *dev, u32 skb_len)
{
struct rmnet_priv *priv = netdev_priv(dev);
struct rmnet_pcpu_stats *pcpu_ptr;
pcpu_ptr = this_cpu_ptr(priv->pcpu_stats);
u64_stats_update_begin(&pcpu_ptr->syncp);
pcpu_ptr->stats.rx_pkts++;
pcpu_ptr->stats.rx_bytes += skb_len;
u64_stats_update_end(&pcpu_ptr->syncp);
}
static void rmnet_vnd_tx_fixup(struct net_device *dev, u32 skb_len)
{
struct rmnet_priv *priv = netdev_priv(dev);
struct rmnet_pcpu_stats *pcpu_ptr;
pcpu_ptr = this_cpu_ptr(priv->pcpu_stats);
u64_stats_update_begin(&pcpu_ptr->syncp);
pcpu_ptr->stats.tx_pkts++;
pcpu_ptr->stats.tx_bytes += skb_len;
u64_stats_update_end(&pcpu_ptr->syncp);
}
/* Network Device Operations */
static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct rmnet_priv *priv;
priv = netdev_priv(dev);
if (priv->real_dev) {
rmnet_egress_handler(skb);
} else {
this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
kfree_skb(skb);
}
return NETDEV_TX_OK;
}
static int rmnet_vnd_change_mtu(struct net_device *rmnet_dev, int new_mtu)
{
if (new_mtu < 0 || new_mtu > RMNET_MAX_PACKET_SIZE)
return -EINVAL;
rmnet_dev->mtu = new_mtu;
return 0;
}
static int rmnet_vnd_get_iflink(const struct net_device *dev)
{
struct rmnet_priv *priv = netdev_priv(dev);
return priv->real_dev->ifindex;
}
static int rmnet_vnd_init(struct net_device *dev)
{
struct rmnet_priv *priv = netdev_priv(dev);
int err;
priv->pcpu_stats = alloc_percpu(struct rmnet_pcpu_stats);
if (!priv->pcpu_stats)
return -ENOMEM;
err = gro_cells_init(&priv->gro_cells, dev);
if (err) {
free_percpu(priv->pcpu_stats);
return err;
}
return 0;
}
static void rmnet_vnd_uninit(struct net_device *dev)
{
struct rmnet_priv *priv = netdev_priv(dev);
gro_cells_destroy(&priv->gro_cells);
free_percpu(priv->pcpu_stats);
}
static struct rtnl_link_stats64* rmnet_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *s)
{
struct rmnet_priv *priv = netdev_priv(dev);
struct rmnet_vnd_stats total_stats;
struct rmnet_pcpu_stats *pcpu_ptr;
unsigned int cpu, start;
memset(&total_stats, 0, sizeof(struct rmnet_vnd_stats));
for_each_possible_cpu(cpu) {
pcpu_ptr = per_cpu_ptr(priv->pcpu_stats, cpu);
do {
start = u64_stats_fetch_begin_irq(&pcpu_ptr->syncp);
total_stats.rx_pkts += pcpu_ptr->stats.rx_pkts;
total_stats.rx_bytes += pcpu_ptr->stats.rx_bytes;
total_stats.tx_pkts += pcpu_ptr->stats.tx_pkts;
total_stats.tx_bytes += pcpu_ptr->stats.tx_bytes;
} while (u64_stats_fetch_retry_irq(&pcpu_ptr->syncp, start));
total_stats.tx_drops += pcpu_ptr->stats.tx_drops;
}
s->rx_packets = total_stats.rx_pkts;
s->rx_bytes = total_stats.rx_bytes;
s->tx_packets = total_stats.tx_pkts;
s->tx_bytes = total_stats.tx_bytes;
s->tx_dropped = total_stats.tx_drops;
return s;
}
static const struct net_device_ops rmnet_vnd_ops = {
.ndo_start_xmit = rmnet_vnd_start_xmit,
.ndo_change_mtu = rmnet_vnd_change_mtu,
.ndo_get_iflink = rmnet_vnd_get_iflink,
//.ndo_add_slave = rmnet_add_bridge,
//.ndo_del_slave = rmnet_del_bridge,
.ndo_init = rmnet_vnd_init,
.ndo_uninit = rmnet_vnd_uninit,
.ndo_get_stats64 = rmnet_get_stats64,
};
static const char rmnet_gstrings_stats[][ETH_GSTRING_LEN] = {
"Checksum ok",
"Checksum valid bit not set",
"Checksum validation failed",
"Checksum error bad buffer",
"Checksum error bad ip version",
"Checksum error bad transport",
"Checksum skipped on ip fragment",
"Checksum skipped",
"Checksum computed in software",
"Checksum computed in hardware",
"Coalescing packets received",
"Coalesced packets",
"Coalescing header NLO errors",
"Coalescing header pcount errors",
"Coalescing checksum errors",
"Coalescing packet reconstructs",
"Coalescing IP version invalid",
"Coalescing L4 header invalid",
"Coalescing close Non-coalescable",
"Coalescing close L3 mismatch",
"Coalescing close L4 mismatch",
"Coalescing close HW NLO limit",
"Coalescing close HW packet limit",
"Coalescing close HW byte limit",
"Coalescing close HW time limit",
"Coalescing close HW eviction",
"Coalescing close Coalescable",
"Coalescing packets over VEID0",
"Coalescing packets over VEID1",
"Coalescing packets over VEID2",
"Coalescing packets over VEID3",
};
static const char rmnet_port_gstrings_stats[][ETH_GSTRING_LEN] = {
"MAP Cmd last version",
"MAP Cmd last ep id",
"MAP Cmd last transaction id",
"DL header last seen sequence",
"DL header last seen bytes",
"DL header last seen packets",
"DL header last seen flows",
"DL header pkts received",
"DL header total bytes received",
"DL header total pkts received",
"DL trailer last seen sequence",
"DL trailer pkts received",
};
static void rmnet_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
switch (stringset) {
case ETH_SS_STATS:
memcpy(buf, &rmnet_gstrings_stats,
sizeof(rmnet_gstrings_stats));
memcpy(buf + sizeof(rmnet_gstrings_stats),
&rmnet_port_gstrings_stats,
sizeof(rmnet_port_gstrings_stats));
break;
}
}
static int rmnet_get_sset_count(struct net_device *dev, int sset)
{
switch (sset) {
case ETH_SS_STATS:
return ARRAY_SIZE(rmnet_gstrings_stats) +
ARRAY_SIZE(rmnet_port_gstrings_stats);
default:
return -EOPNOTSUPP;
}
}
static void rmnet_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct rmnet_priv *priv = netdev_priv(dev);
struct rmnet_priv_stats *st = &priv->stats;
struct rmnet_port_priv_stats *stp;
struct rmnet_port *port;
port = rmnet_get_port(priv->real_dev);
if (!data || !port)
return;
stp = &port->stats;
memcpy(data, st, ARRAY_SIZE(rmnet_gstrings_stats) * sizeof(u64));
memcpy(data + ARRAY_SIZE(rmnet_gstrings_stats), stp,
ARRAY_SIZE(rmnet_port_gstrings_stats) * sizeof(u64));
}
static int rmnet_stats_reset(struct net_device *dev)
{
struct rmnet_priv *priv = netdev_priv(dev);
struct rmnet_port_priv_stats *stp;
struct rmnet_port *port;
port = rmnet_get_port(priv->real_dev);
if (!port)
return -EINVAL;
stp = &port->stats;
memset(stp, 0, sizeof(*stp));
return 0;
}
static const struct ethtool_ops rmnet_ethtool_ops = {
.get_ethtool_stats = rmnet_get_ethtool_stats,
.get_strings = rmnet_get_strings,
.get_sset_count = rmnet_get_sset_count,
.nway_reset = rmnet_stats_reset,
};
/* Called by kernel whenever a new rmnet<n> device is created. Sets MTU,
* flags, ARP type, needed headroom, etc...
*/
void rmnet_vnd_setup(struct net_device *rmnet_dev)
{
rmnet_dev->netdev_ops = &rmnet_vnd_ops;
rmnet_dev->mtu = RMNET_DFLT_PACKET_SIZE;
rmnet_dev->needed_headroom = RMNET_NEEDED_HEADROOM;
random_ether_addr(rmnet_dev->dev_addr);
rmnet_dev->tx_queue_len = RMNET_TX_QUEUE_LEN;
/* Raw IP mode */
rmnet_dev->header_ops = NULL; /* No header */
rmnet_dev->type = ARPHRD_RAWIP;
rmnet_dev->hard_header_len = 0;
rmnet_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
//rmnet_dev->needs_free_netdev = true;
rmnet_dev->hw_features = NETIF_F_RXCSUM;
rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
//rmnet_dev->hw_features |= NETIF_F_SG;
//rmnet_dev->hw_features |= NETIF_F_GRO_HW;
}
/* Exposed API */
static int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
struct rmnet_port *port,
struct net_device *real_dev,
struct rmnet_endpoint *ep)
{
struct rmnet_priv *priv = netdev_priv(rmnet_dev);
struct rmnet_nss_cb *nss_cb;
int rc;
if (ep->egress_dev)
return -EINVAL;
if (rmnet_get_endpoint(port, id))
return -EBUSY;
rmnet_dev->hw_features = NETIF_F_RXCSUM;
rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
rmnet_dev->hw_features |= NETIF_F_SG;
priv->real_dev = real_dev;
rc = register_netdevice(rmnet_dev);
if (!rc) {
ep->egress_dev = rmnet_dev;
ep->mux_id = id;
port->nr_rmnet_devs++;
//rmnet_dev->rtnl_link_ops = &rmnet_link_ops;
priv->mux_id = id;
netdev_dbg(rmnet_dev, "rmnet dev created\n");
}
nss_cb = rcu_dereference(rmnet_nss_callbacks);
if (nss_cb) {
rc = nss_cb->nss_create(rmnet_dev);
if (rc) {
/* Log, but don't fail the device creation */
netdev_err(rmnet_dev, "Device will not use NSS path: %d\n", rc);
rc = 0;
} else {
netdev_dbg(rmnet_dev, "NSS context created\n");
}
}
return rc;
}
static int rmnet_vnd_dellink(u8 id, struct rmnet_port *port,
struct rmnet_endpoint *ep)
{
struct rmnet_nss_cb *nss_cb;
if (id >= RMNET_MAX_LOGICAL_EP || !ep->egress_dev)
return -EINVAL;
if (ep->egress_dev) {
nss_cb = rcu_dereference(rmnet_nss_callbacks);
if (nss_cb)
nss_cb->nss_free(ep->egress_dev);
}
ep->egress_dev = NULL;
port->nr_rmnet_devs--;
return 0;
}
static int rmnet_vnd_do_flow_control(struct net_device *rmnet_dev, int enable)
{
netdev_dbg(rmnet_dev, "Setting VND TX queue state to %d\n", enable);
/* Although we expect similar number of enable/disable
* commands, optimize for the disable. That is more
* latency sensitive than enable
*/
if (unlikely(enable))
netif_wake_queue(rmnet_dev);
else
netif_stop_queue(rmnet_dev);
return 0;
}

View File

@ -0,0 +1,29 @@
/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* RMNET Data Virtual Network Device APIs
*
*/
#ifndef _RMNET_VND_H_
#define _RMNET_VND_H_
static int rmnet_vnd_do_flow_control(struct net_device *dev, int enable);
static int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
struct rmnet_port *port,
struct net_device *real_dev,
struct rmnet_endpoint *ep);
static int rmnet_vnd_dellink(u8 id, struct rmnet_port *port,
struct rmnet_endpoint *ep);
static void rmnet_vnd_rx_fixup(struct net_device *dev, u32 skb_len);
static void rmnet_vnd_tx_fixup(struct net_device *dev, u32 skb_len);
static void rmnet_vnd_setup(struct net_device *dev);
#endif /* _RMNET_VND_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,424 @@
/* Copyright (c) 2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/hashtable.h>
#include <linux/if_ether.h>
#include <linux/ip.h>
#include <qca-nss-drv/nss_api_if.h>
#include <linux/rmnet_nss.h>
#define RMNET_NSS_HASH_BITS 8
#define hash_add_ptr(table, node, key) \
hlist_add_head(node, &table[hash_ptr(key, HASH_BITS(table))])
static DEFINE_HASHTABLE(rmnet_nss_ctx_hashtable, RMNET_NSS_HASH_BITS);
struct rmnet_nss_ctx {
struct hlist_node hnode;
struct net_device *rmnet_dev;
struct nss_rmnet_rx_handle *nss_ctx;
};
enum __rmnet_nss_stat {
RMNET_NSS_RX_ETH,
RMNET_NSS_RX_FAIL,
RMNET_NSS_RX_NON_ETH,
RMNET_NSS_RX_BUSY,
RMNET_NSS_TX_NO_CTX,
RMNET_NSS_TX_SUCCESS,
RMNET_NSS_TX_FAIL,
RMNET_NSS_TX_NONLINEAR,
RMNET_NSS_TX_BAD_IP,
RMNET_NSS_EXCEPTIONS,
RMNET_NSS_EX_BAD_HDR,
RMNET_NSS_EX_BAD_IP,
RMNET_NSS_EX_SUCCESS,
RMNET_NSS_TX_BAD_FRAGS,
RMNET_NSS_TX_LINEARIZE_FAILS,
RMNET_NSS_TX_NON_ZERO_HEADLEN_FRAGS,
RMNET_NSS_TX_BUSY_LOOP,
RMNET_NSS_NUM_STATS,
};
static unsigned long rmnet_nss_stats[RMNET_NSS_NUM_STATS];
#define RMNET_NSS_STAT(name, counter, desc) \
module_param_named(name, rmnet_nss_stats[counter], ulong, 0444); \
MODULE_PARM_DESC(name, desc)
RMNET_NSS_STAT(rmnet_nss_rx_ethernet, RMNET_NSS_RX_ETH,
"Number of Ethernet headers successfully removed");
RMNET_NSS_STAT(rmnet_nss_rx_fail, RMNET_NSS_RX_FAIL,
"Number of Ethernet headers that could not be removed");
RMNET_NSS_STAT(rmnet_nss_rx_non_ethernet, RMNET_NSS_RX_NON_ETH,
"Number of non-Ethernet packets received");
RMNET_NSS_STAT(rmnet_nss_rx_busy, RMNET_NSS_RX_BUSY,
"Number of packets dropped decause rmnet_data device was busy");
RMNET_NSS_STAT(rmnet_nss_tx_slow, RMNET_NSS_TX_NO_CTX,
"Number of packets sent over non-NSS-accelerated rmnet device");
RMNET_NSS_STAT(rmnet_nss_tx_fast, RMNET_NSS_TX_SUCCESS,
"Number of packets sent over NSS-accelerated rmnet device");
RMNET_NSS_STAT(rmnet_nss_tx_fail, RMNET_NSS_TX_FAIL,
"Number of packets that NSS could not transmit");
RMNET_NSS_STAT(rmnet_nss_tx_nonlinear, RMNET_NSS_TX_NONLINEAR,
"Number of non linear sent over NSS-accelerated rmnet device");
RMNET_NSS_STAT(rmnet_nss_tx_invalid_ip, RMNET_NSS_TX_BAD_IP,
"Number of ingress packets with invalid IP headers");
RMNET_NSS_STAT(rmnet_nss_tx_invalid_frags, RMNET_NSS_TX_BAD_FRAGS,
"Number of ingress packets with invalid frag format");
RMNET_NSS_STAT(rmnet_nss_tx_linearize_fail, RMNET_NSS_TX_LINEARIZE_FAILS,
"Number of ingress packets where linearize in tx fails");
RMNET_NSS_STAT(rmnet_nss_tx_exceptions, RMNET_NSS_EXCEPTIONS,
"Number of times our DL exception handler was invoked");
RMNET_NSS_STAT(rmnet_nss_exception_non_ethernet, RMNET_NSS_EX_BAD_HDR,
"Number of non-Ethernet exception packets");
RMNET_NSS_STAT(rmnet_nss_exception_invalid_ip, RMNET_NSS_EX_BAD_IP,
"Number of exception packets with invalid IP headers");
RMNET_NSS_STAT(rmnet_nss_exception_success, RMNET_NSS_EX_SUCCESS,
"Number of exception packets handled successfully");
RMNET_NSS_STAT(rmnet_nss_tx_non_zero_headlen_frags, RMNET_NSS_TX_NON_ZERO_HEADLEN_FRAGS,
"Number of packets with non zero headlen");
RMNET_NSS_STAT(rmnet_nss_tx_busy_loop, RMNET_NSS_TX_BUSY_LOOP,
"Number of times tx packets busy looped");
static void rmnet_nss_inc_stat(enum __rmnet_nss_stat stat)
{
if (stat >= 0 && stat < RMNET_NSS_NUM_STATS)
rmnet_nss_stats[stat]++;
}
static struct rmnet_nss_ctx *rmnet_nss_find_ctx(struct net_device *dev)
{
struct rmnet_nss_ctx *ctx;
struct hlist_head *bucket;
u32 hash;
hash = hash_ptr(dev, HASH_BITS(rmnet_nss_ctx_hashtable));
bucket = &rmnet_nss_ctx_hashtable[hash];
hlist_for_each_entry(ctx, bucket, hnode) {
if (ctx->rmnet_dev == dev)
return ctx;
}
return NULL;
}
static void rmnet_nss_free_ctx(struct rmnet_nss_ctx *ctx)
{
if (ctx) {
hash_del(&ctx->hnode);
nss_rmnet_rx_xmit_callback_unregister(ctx->nss_ctx);
nss_rmnet_rx_destroy_sync(ctx->nss_ctx);
kfree(ctx);
}
}
/* Pull off an ethernet header, if possible */
static int rmnet_nss_ethhdr_pull(struct sk_buff *skb)
{
if (!skb->protocol || skb->protocol == htons(ETH_P_802_3)) {
void *ret = skb_pull(skb, sizeof(struct ethhdr));
rmnet_nss_inc_stat((ret) ? RMNET_NSS_RX_ETH :
RMNET_NSS_RX_FAIL);
return !ret;
}
rmnet_nss_inc_stat(RMNET_NSS_RX_NON_ETH);
return -1;
}
/* Copy headers to linear section for non linear packets */
static int rmnet_nss_adjust_header(struct sk_buff *skb)
{
struct iphdr *iph;
skb_frag_t *frag;
int bytes = 0;
u8 transport;
if (skb_shinfo(skb)->nr_frags != 1) {
rmnet_nss_inc_stat(RMNET_NSS_TX_BAD_FRAGS);
return -EINVAL;
}
if (skb_headlen(skb)) {
rmnet_nss_inc_stat(RMNET_NSS_TX_NON_ZERO_HEADLEN_FRAGS);
return 0;
}
frag = &skb_shinfo(skb)->frags[0];
iph = (struct iphdr *)(skb_frag_address(frag));
if (iph->version == 4) {
bytes = iph->ihl*4;
transport = iph->protocol;
} else if (iph->version == 6) {
struct ipv6hdr *ip6h = (struct ipv6hdr *)iph;
bytes = sizeof(struct ipv6hdr);
/* Dont have to account for extension headers yet */
transport = ip6h->nexthdr;
} else {
rmnet_nss_inc_stat(RMNET_NSS_TX_BAD_IP);
return -EINVAL;
}
if (transport == IPPROTO_TCP) {
struct tcphdr *th;
th = (struct tcphdr *)((u8 *)iph + bytes);
bytes += th->doff * 4;
} else if (transport == IPPROTO_UDP) {
bytes += sizeof(struct udphdr);
} else {
/* cant do anything else here unfortunately so linearize */
if (skb_linearize(skb)) {
rmnet_nss_inc_stat(RMNET_NSS_TX_LINEARIZE_FAILS);
return -EINVAL;
} else {
return 0;
}
}
if (bytes > skb_frag_size(frag)) {
rmnet_nss_inc_stat(RMNET_NSS_TX_BAD_FRAGS);
return -EINVAL;
}
skb_push(skb, bytes);
memcpy(skb->data, iph, bytes);
/* subtract to account for skb_push */
skb->len -= bytes;
frag->page_offset += bytes;
skb_frag_size_sub(frag, bytes);
/* subtract to account for skb_frag_size_sub */
skb->data_len -= bytes;
return 0;
}
/* Main downlink handler
* Looks up NSS contex associated with the device. If the context is found,
* we add a dummy ethernet header with the approriate protocol field set,
* the pass the packet off to NSS for hardware acceleration.
*/
int rmnet_nss_tx(struct sk_buff *skb)
{
struct ethhdr *eth;
struct rmnet_nss_ctx *ctx;
struct net_device *dev = skb->dev;
nss_tx_status_t rc;
unsigned int len;
u8 version;
if (skb_is_nonlinear(skb)) {
if (rmnet_nss_adjust_header(skb))
goto fail;
else
rmnet_nss_inc_stat(RMNET_NSS_TX_NONLINEAR);
}
version = ((struct iphdr *)skb->data)->version;
ctx = rmnet_nss_find_ctx(dev);
if (!ctx) {
rmnet_nss_inc_stat(RMNET_NSS_TX_NO_CTX);
return -EINVAL;
}
eth = (struct ethhdr *)skb_push(skb, sizeof(*eth));
memset(&eth->h_dest, 0, ETH_ALEN * 2);
if (version == 4) {
eth->h_proto = htons(ETH_P_IP);
} else if (version == 6) {
eth->h_proto = htons(ETH_P_IPV6);
} else {
rmnet_nss_inc_stat(RMNET_NSS_TX_BAD_IP);
goto fail;
}
skb->protocol = htons(ETH_P_802_3);
/* Get length including ethhdr */
len = skb->len;
transmit:
rc = nss_rmnet_rx_tx_buf(ctx->nss_ctx, skb);
if (rc == NSS_TX_SUCCESS) {
/* Increment rmnet_data device stats.
* Don't call rmnet_data_vnd_rx_fixup() to do this, as
* there's no guarantee the skb pointer is still valid.
*/
dev->stats.rx_packets++;
dev->stats.rx_bytes += len;
rmnet_nss_inc_stat(RMNET_NSS_TX_SUCCESS);
return 0;
} else if (rc == NSS_TX_FAILURE_QUEUE) {
rmnet_nss_inc_stat(RMNET_NSS_TX_BUSY_LOOP);
goto transmit;
}
fail:
rmnet_nss_inc_stat(RMNET_NSS_TX_FAIL);
kfree_skb(skb);
return 1;
}
/* Called by NSS in the DL exception case.
* Since the packet cannot be sent over the accelerated path, we need to
* handle it. Remove the ethernet header and pass it onward to the stack
* if possible.
*/
void rmnet_nss_receive(struct net_device *dev, struct sk_buff *skb,
struct napi_struct *napi)
{
rmnet_nss_inc_stat(RMNET_NSS_EXCEPTIONS);
if (!skb)
return;
if (rmnet_nss_ethhdr_pull(skb)) {
rmnet_nss_inc_stat(RMNET_NSS_EX_BAD_HDR);
goto drop;
}
/* reset header pointers */
skb_reset_transport_header(skb);
skb_reset_network_header(skb);
skb_reset_mac_header(skb);
/* reset packet type */
skb->pkt_type = PACKET_HOST;
skb->dev = dev;
/* reset protocol type */
switch (skb->data[0] & 0xF0) {
case 0x40:
skb->protocol = htons(ETH_P_IP);
break;
case 0x60:
skb->protocol = htons(ETH_P_IPV6);
break;
default:
rmnet_nss_inc_stat(RMNET_NSS_EX_BAD_IP);
goto drop;
}
rmnet_nss_inc_stat(RMNET_NSS_EX_SUCCESS);
/* Set this so that we dont loop around netif_receive_skb */
skb->cb[0] = 1;
netif_receive_skb(skb);
return;
drop:
kfree_skb(skb);
}
/* Called by NSS in the UL acceleration case.
* We are guaranteed to have an ethernet packet here from the NSS hardware,
* We need to pull the header off and invoke our ndo_start_xmit function
* to handle transmitting the packet to the network stack.
*/
void rmnet_nss_xmit(struct net_device *dev, struct sk_buff *skb)
{
netdev_tx_t ret;
skb_pull(skb, sizeof(struct ethhdr));
rmnet_nss_inc_stat(RMNET_NSS_RX_ETH);
/* NSS takes care of shaping, so bypassing Qdiscs like this is OK */
ret = dev->netdev_ops->ndo_start_xmit(skb, dev);
if (unlikely(ret == NETDEV_TX_BUSY)) {
dev_kfree_skb_any(skb);
rmnet_nss_inc_stat(RMNET_NSS_RX_BUSY);
}
}
/* Create and register an NSS context for an rmnet_data device */
int rmnet_nss_create_vnd(struct net_device *dev)
{
struct rmnet_nss_ctx *ctx;
ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
if (!ctx)
return -ENOMEM;
ctx->rmnet_dev = dev;
ctx->nss_ctx = nss_rmnet_rx_create_sync_nexthop(dev, NSS_N2H_INTERFACE,
NSS_C2C_TX_INTERFACE);
if (!ctx->nss_ctx) {
kfree(ctx);
return -1;
}
nss_rmnet_rx_register(ctx->nss_ctx, rmnet_nss_receive, dev);
nss_rmnet_rx_xmit_callback_register(ctx->nss_ctx, rmnet_nss_xmit);
hash_add_ptr(rmnet_nss_ctx_hashtable, &ctx->hnode, dev);
return 0;
}
/* Unregister and destroy the NSS context for an rmnet_data device */
int rmnet_nss_free_vnd(struct net_device *dev)
{
struct rmnet_nss_ctx *ctx;
ctx = rmnet_nss_find_ctx(dev);
rmnet_nss_free_ctx(ctx);
return 0;
}
static const struct rmnet_nss_cb rmnet_nss = {
.nss_create = rmnet_nss_create_vnd,
.nss_free = rmnet_nss_free_vnd,
.nss_tx = rmnet_nss_tx,
};
int __init rmnet_nss_init(void)
{
pr_err("%s(): initializing rmnet_nss\n", __func__);
RCU_INIT_POINTER(rmnet_nss_callbacks, &rmnet_nss);
return 0;
}
void __exit rmnet_nss_exit(void)
{
struct hlist_node *tmp;
struct rmnet_nss_ctx *ctx;
int bkt;
pr_err("%s(): exiting rmnet_nss\n", __func__);
RCU_INIT_POINTER(rmnet_nss_callbacks, NULL);
/* Tear down all NSS contexts */
hash_for_each_safe(rmnet_nss_ctx_hashtable, bkt, tmp, ctx, hnode)
rmnet_nss_free_ctx(ctx);
}
#if 0
MODULE_LICENSE("GPL v2");
module_init(rmnet_nss_init);
module_exit(rmnet_nss_exit);
#endif

View File

@ -0,0 +1,31 @@
root@imx6qsabresd:~# busybox microcom /dev/mhi_DUN
[ 384.652992] [I][mhi_uci_open] Node open, ref counts 1
[ 384.658144] [I][mhi_uci_open] Starting channel
[ 384.662612] [I][__mhi_prepare_channel] Entered: preparing channel:32
[ 384.680397] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1
[ 384.685890] [I][__mhi_prepare_channel] Chan:32 successfully moved to start state
[ 384.693312] [I][__mhi_prepare_channel] Entered: preparing channel:33
[ 384.708692] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1
[ 384.714324] [I][__mhi_prepare_channel] Chan:33 successfully moved to start state
RDY
+CFUN: 1
+CPIN: READY
+QUSIM: 1
+QIND: SMS DONE
+QIND: PB DONE
ati
Quectel
EM20
Revision: EM20GR01A01M4G
OK
at+cpin?
+CPIN: READY
OK

View File

@ -0,0 +1,145 @@
root@OpenWrt:~# insmod pcie_mhi.ko mhi_mbim_enabled=1
root@OpenWrt:~# dmesg | grep mhi
[ 65.587160] mhi_init Quectel_Linux_PCIE_MHI_Driver_V1.3.0.6
[ 65.597089] mhi_pci_probe pci_dev->name = 0000:01:00.0, domain=0, bus=1, slot=0, vendor=17CB, device=0306
[ 65.602250] mhi_q 0000:01:00.0: BAR 0: assigned [mem 0x20300000-0x20300fff 64bit]
[ 65.611690] mhi_q 0000:01:00.0: enabling device (0140 -> 0142)
[ 65.619307] [I][mhi_init_pci_dev] msi_required = 5, msi_allocated = 5, msi_irq = 63
[ 65.619327] [I][mhi_power_up] dev_state:RESET
[ 65.619331] [I][mhi_async_power_up] Requested to power on
[ 65.619449] [I][mhi_alloc_coherent] size = 114688, dma_handle = 6fca0000
[ 65.619462] [I][mhi_init_dev_ctxt] mhi_ctxt->ctrl_seg = c221e000
[ 65.619731] [I][mhi_async_power_up] dev_state:RESET ee:AMSS
[ 65.619747] [I][mhi_pm_st_worker] Transition to state:READY
[ 65.619760] [I][mhi_pm_st_worker] INVALID_EE -> AMSS
[ 65.619764] [I][mhi_ready_state_transition] Waiting to enter READY state
[ 65.619885] [I][mhi_async_power_up] Power on setup success
[ 65.619897] [I][mhi_pci_probe] Return successful
[ 65.665114] [I][mhi_ready_state_transition] Device in READY State
[ 65.665125] [I][mhi_intvec_threaded_handlr] device ee:AMSS dev_state:READY, pm_state:POR
[ 65.665131] [I][mhi_intvec_threaded_handlr] device ee:AMSS dev_state:READY, INVALID_EE
[ 65.665133] [I][mhi_tryset_pm_state] Transition to pm state from:POR to:POR
[ 65.665137] [I][mhi_init_mmio] Initializing MMIO
[ 65.665142] [I][mhi_init_mmio] CHDBOFF:0x300
[ 65.665151] [I][mhi_init_mmio] ERDBOFF:0x700
[ 65.665156] [I][mhi_init_mmio] Programming all MMIO values.
[ 65.786283] [I][mhi_dump_tre] carl_ev evt_state_change mhistate=2
[ 65.786289] [I][mhi_process_ctrl_ev_ring] MHI state change event to state:M0
[ 65.786295] [I][mhi_pm_m0_transition] Entered With State:READY PM_STATE:POR
[ 65.786300] [I][mhi_tryset_pm_state] Transition to pm state from:POR to:M0
[ 65.789734] [I][mhi_dump_tre] carl_ev evt_ee_state execenv=2
[ 65.789739] [I][mhi_process_ctrl_ev_ring] MHI EE received event:AMSS
[ 65.789756] [I][mhi_pm_st_worker] Transition to state:MISSION MODE
[ 65.789767] [I][mhi_pm_st_worker] INVALID_EE -> AMSS
[ 65.789771] [I][mhi_pm_mission_mode_transition] Processing Mission Mode Transition
[ 65.789787] [I][mhi_init_timesync] No timesync capability found
[ 65.789791] [I][mhi_pm_mission_mode_transition] Adding new devices
[ 65.790570] [I][mhi_dtr_probe] Enter for DTR control channel
[ 65.790577] [I][__mhi_prepare_channel] Entered: preparing channel:18
[ 65.797036] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1
[ 65.797051] [I][__mhi_prepare_channel] Chan:18 successfully moved to start state
[ 65.797055] [I][__mhi_prepare_channel] Entered: preparing channel:19
[ 65.802457] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1
[ 65.802469] [I][__mhi_prepare_channel] Chan:19 successfully moved to start state
[ 65.802485] [I][mhi_dtr_probe] Exit with ret:0
[ 65.802748] [I][mhi_netdev_enable_iface] Prepare the channels for transfer
[ 65.802772] [I][__mhi_prepare_channel] Entered: preparing channel:100
[ 65.825279] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1
[ 65.825293] [I][__mhi_prepare_channel] Chan:100 successfully moved to start state
[ 65.825297] [I][__mhi_prepare_channel] Entered: preparing channel:101
[ 65.835565] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1
[ 65.835578] [I][__mhi_prepare_channel] Chan:101 successfully moved to start state
[ 65.839141] [I][mhi_netdev_enable_iface] Exited.
[ 65.839875] rmnet_vnd_register_device(rmnet_mhi0.1)=0
[ 65.843278] net rmnet_mhi0 rmnet_mhi0.1: NSS context created
[ 65.861808] [I][mhi_pm_mission_mode_transition] Exit with ret:0
[ 68.625595] [I][__mhi_prepare_channel] Entered: preparing channel:12
[ 68.634610] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1
[ 68.634622] [I][__mhi_prepare_channel] Chan:12 successfully moved to start state
[ 68.634625] [I][__mhi_prepare_channel] Entered: preparing channel:13
[ 68.644978] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1
[ 68.644987] [I][__mhi_prepare_channel] Chan:13 successfully moved to start state
[ 69.170666] net rmnet_mhi0: link_state 0x0 -> 0x1
[ 69.177035] [I][mhi_netdev_open] Opened net dev interface
[ 71.655431] [I][mhi_netdev_open] Opened net dev interface
root@OpenWrt:~# ./quectel-CM &
[04-02_04:14:12:134] Quectel_QConnectManager_Linux_V1.6.0.5
[04-02_04:14:12:134] Find /sys/bus/usb/devices/4-1 idVendor=0x2c7c idProduct=0x800, bus=0x004, dev=0x002
[04-02_04:14:12:135] network interface '' or qmidev '' is not exist
[04-02_04:14:12:135] netcard driver = pcie_mhi, driver version = V1.3.0.6
[04-02_04:14:12:135] Modem works in MBIM mode
[04-02_04:14:12:135] apn (null), user (null), passwd (null), auth 0
[04-02_04:14:12:135] IP Proto MBIMContextIPTypeIPv4
[04-02_04:14:12:154] mbim_read_thread is created
sh: can't create /sys/class/net/rmnet_mhi0/mbim/link_state: nonexistent directory
[04-02_04:14:12:156] system(echo 0 > /sys/class/net/rmnet_mhi0/mbim/link_state)=256
[04-02_04:14:12:185] system(ip address flush dev rmnet_mhi0)=0
[04-02_04:14:12:187] system(ip link set dev rmnet_mhi0 down)=0
[04-02_04:14:12:188] mbim_open_device()
[04-02_04:14:12:605] mbim_device_caps_query()
[04-02_04:14:12:610] DeviceId: 869710030002905
[04-02_04:14:12:610] HardwareInfo: 0
[04-02_04:14:12:610] mbim_set_radio_state( 1 )
[04-02_04:14:12:613] HwRadioState: 1, SwRadioState: 1
[04-02_04:14:12:613] mbim_subscriber_status_query()
[04-02_04:14:12:620] SubscriberReadyState NotInitialized -> Initialized
[04-02_04:14:12:620] mbim_register_state_query()
[04-02_04:14:12:625] RegisterState Unknown -> Home
[04-02_04:14:12:625] mbim_packet_service_query()
[04-02_04:14:12:629] PacketServiceState Unknown -> Attached
[04-02_04:14:12:629] mbim_query_connect(sessionID=0)
[04-02_04:14:12:633] ActivationState Unknown -> Deactivated
[04-02_04:14:12:633] mbim_set_connect(onoff=1, sessionID=0)
[ 69.170666] net rmnet_mhi0: link_state 0x0 -> 0x1
[04-02_04:14:12:680] ActivationState Deactivated -> Activated
[ 69.177035] [I][mhi_netdev_open] Opened net dev interface
[04-02_04:14:12:680] mbim_ip_config(sessionID=0)
[04-02_04:14:12:683] < SessionId = 0
[04-02_04:14:12:683] < IPv4ConfigurationAvailable = 0xf
[04-02_04:14:12:683] < IPv6ConfigurationAvailable = 0x0
[04-02_04:14:12:683] < IPv4AddressCount = 0x1
[04-02_04:14:12:683] < IPv4AddressOffset = 0x3c
[04-02_04:14:12:683] < IPv6AddressCount = 0x0
[04-02_04:14:12:683] < IPv6AddressOffset = 0x0
[04-02_04:14:12:683] < IPv4 = 10.129.59.93/30
[04-02_04:14:12:683] < gw = 10.129.59.94
[04-02_04:14:12:683] < dns1 = 211.138.180.2
[04-02_04:14:12:683] < dns2 = 211.138.180.3
[04-02_04:14:12:683] < ipv4 mtu = 1500
sh: can't create /sys/class/net/rmnet_mhi0/mbim/link_state: nonexistent directory
[04-02_04:14:12:684] system(echo 1 > /sys/class/net/rmnet_mhi0/mbim/link_state)=256
[04-02_04:14:12:689] system(ip link set dev rmnet_mhi0 up)=0
[04-02_04:14:12:692] system(ip -4 address flush dev rmnet_mhi0)=0
[04-02_04:14:12:694] system(ip -4 address add 10.129.59.93/30 dev rmnet_mhi0)=0
[04-02_04:14:12:697] system(ip -4 route add default via 10.129.59.94 dev rmnet_mhi0)=0
[04-02_04:14:12:699] system(ip -4 link set dev rmnet_mhi0 mtu 1500)=0
root@OpenWrt:~# ifconfig rmnet_mhi0
rmnet_mhi0 Link encap:UNSPEC HWaddr 00-00-00-00-00-00-00-00-00-00-00-00-00-00-00-00
UP RUNNING NOARP MTU:1500 Metric:1
RX packets:99379 errors:0 dropped:0 overruns:0 frame:0
TX packets:176569 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:1528181052 (1.4 GiB) TX bytes:62467192 (59.5 MiB)
root@OpenWrt:~# ifconfig rmnet_mhi0.1
rmnet_mhi0.1 Link encap:UNSPEC HWaddr 02-50-F4-00-00-00-00-00-00-00-00-00-00-00-00-00
inet addr:10.129.59.93 Mask:255.255.255.252
inet6 addr: fe80::50:f4ff:fe00:0/64 Scope:Link
UP RUNNING NOARP MTU:1500 Metric:1
RX packets:1089360 errors:0 dropped:0 overruns:0 frame:0
TX packets:176581 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:1521449058 (1.4 GiB) TX bytes:57525792 (54.8 MiB)
# adjust CPU load balancing
root@OpenWrt:~# echo 2 > /sys/class/net/rmnet_mhi0/queues/rx-0/rps_cpus
root@OpenWrt:~# echo 4 > /sys/class/net/rmnet_mhi0.1/queues/rx-0/rps_cpus
root@OpenWrt:~# echo 2000 > /proc/sys/net/core/netdev_max_backlog
root@OpenWrt:~# cat /sys/class/net/rmnet_mhi0/queues/rx-0/rps_cpus
2
root@OpenWrt:~# cat /sys/class/net/rmnet_mhi0.1/queues/rx-0/rps_cpus
4
root@OpenWrt:~# cat /proc/sys/net/core/netdev_max_backlog
2000

View File

@ -0,0 +1,134 @@
disable ccflags-y += -DCONFIG_MHI_NETDEV_MBIM in pcie_mhi/Makefile
root@OpenWrt:~# insmod pcie_mhi.ko
root@OpenWrt:~# dmesg | grep mhi
[ 138.483252] mhi_init Quectel_Linux_PCIE_MHI_Driver_V1.3.0.6
[ 138.492350] mhi_pci_probe pci_dev->name = 0000:01:00.0, domain=0, bus=1, slot=0, vendor=17CB, device=0306
[ 138.497564] mhi_q 0000:01:00.0: BAR 0: assigned [mem 0x20300000-0x20300fff 64bit]
[ 138.506952] mhi_q 0000:01:00.0: enabling device (0140 -> 0142)
[ 138.514562] [I][mhi_init_pci_dev] msi_required = 5, msi_allocated = 5, msi_irq = 63
[ 138.514581] [I][mhi_power_up] dev_state:RESET
[ 138.514587] [I][mhi_async_power_up] Requested to power on
[ 138.514728] [I][mhi_alloc_coherent] size = 114688, dma_handle = 72160000
[ 138.514734] [I][mhi_init_dev_ctxt] mhi_ctxt->ctrl_seg = c221f000
[ 138.515030] [I][mhi_async_power_up] dev_state:RESET ee:AMSS
[ 138.515056] [I][mhi_pm_st_worker] Transition to state:READY
[ 138.515067] [I][mhi_pm_st_worker] INVALID_EE -> AMSS
[ 138.515073] [I][mhi_ready_state_transition] Waiting to enter READY state
[ 138.515210] [I][mhi_async_power_up] Power on setup success
[ 138.515227] [I][mhi_pci_probe] Return successful
[ 138.589013] [I][mhi_ready_state_transition] Device in READY State
[ 138.589029] [I][mhi_intvec_threaded_handlr] device ee:AMSS dev_state:READY, pm_state:POR
[ 138.589038] [I][mhi_intvec_threaded_handlr] device ee:AMSS dev_state:READY, INVALID_EE
[ 138.589041] [I][mhi_tryset_pm_state] Transition to pm state from:POR to:POR
[ 138.589046] [I][mhi_init_mmio] Initializing MMIO
[ 138.589050] [I][mhi_init_mmio] CHDBOFF:0x300
[ 138.589060] [I][mhi_init_mmio] ERDBOFF:0x700
[ 138.589065] [I][mhi_init_mmio] Programming all MMIO values.
[ 138.706124] [I][mhi_dump_tre] carl_ev evt_state_change mhistate=2
[ 138.706132] [I][mhi_process_ctrl_ev_ring] MHI state change event to state:M0
[ 138.706140] [I][mhi_pm_m0_transition] Entered With State:READY PM_STATE:POR
[ 138.706146] [I][mhi_tryset_pm_state] Transition to pm state from:POR to:M0
[ 138.708699] [I][mhi_dump_tre] carl_ev evt_ee_state execenv=2
[ 138.708706] [I][mhi_process_ctrl_ev_ring] MHI EE received event:AMSS
[ 138.708726] [I][mhi_pm_st_worker] Transition to state:MISSION MODE
[ 138.708736] [I][mhi_pm_st_worker] INVALID_EE -> AMSS
[ 138.708742] [I][mhi_pm_mission_mode_transition] Processing Mission Mode Transition
[ 138.708758] [I][mhi_init_timesync] No timesync capability found
[ 138.708764] [I][mhi_pm_mission_mode_transition] Adding new devices
[ 138.709785] [I][mhi_dtr_probe] Enter for DTR control channel
[ 138.709794] [I][__mhi_prepare_channel] Entered: preparing channel:18
[ 138.715378] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1
[ 138.715397] [I][__mhi_prepare_channel] Chan:18 successfully moved to start state
[ 138.715403] [I][__mhi_prepare_channel] Entered: preparing channel:19
[ 138.720201] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1
[ 138.720218] [I][__mhi_prepare_channel] Chan:19 successfully moved to start state
[ 138.720236] [I][mhi_dtr_probe] Exit with ret:0
[ 138.720590] [I][mhi_netdev_enable_iface] Prepare the channels for transfer
[ 138.720630] [I][__mhi_prepare_channel] Entered: preparing channel:100
[ 138.757230] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1
[ 138.757253] [I][__mhi_prepare_channel] Chan:100 successfully moved to start state
[ 138.757259] [I][__mhi_prepare_channel] Entered: preparing channel:101
[ 138.774352] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1
[ 138.774370] [I][__mhi_prepare_channel] Chan:101 successfully moved to start state
[ 138.778137] [I][mhi_netdev_enable_iface] Exited.
[ 138.779018] rmnet_vnd_register_device(rmnet_mhi0.1)=0
[ 138.782283] net rmnet_mhi0 rmnet_mhi0.1: NSS context created
[ 138.800865] [I][mhi_pm_mission_mode_transition] Exit with ret:0
root@OpenWrt:~# ./quectel-CM &
root@OpenWrt:~# [04-02_04:12:16:477] Quectel_QConnectManager_Linux_V1.6.0.5
[04-02_04:12:16:477] Find /sys/bus/usb/devices/4-1 idVendor=0x2c7c idProduct=0x800, bus=0x004, dev=0x002
[04-02_04:12:16:478] network interface '' or qmidev '' is not exist
[04-02_04:12:16:478] netcard driver = pcie_mhi, driver version = V1.3.0.6
[04-02_04:12:16:479] qmap_mode = 1, qmap_version = 9, qmap_size = 16384, muxid = 0x81, qmap_netcard = rmnet_mhi0.1
[04-02_04:12:16:479] Modem works in QMI mode
[04-02_04:12:16:505] cdc_wdm_fd = 7
[04-02_04:12:17:506] QmiThreadSendQMITimeout pthread_cond_timeout_np timeout
[04-02_04:12:18:516] Get clientWDS = 19
[04-02_04:12:18:520] Get clientDMS = 1
[04-02_04:12:18:524] Get clientNAS = 3
[04-02_04:12:18:527] Get clientUIM = 1
[04-02_04:12:18:531] Get clientWDA = 1
[04-02_04:12:18:535] requestBaseBandVersion RM500QGLAAR03A01M4G_BETA_20200107F 1 [Dec 30 2019 17:00:00]
[04-02_04:12:18:539] qmap_settings.rx_urb_size = 16384
[04-02_04:12:18:539] qmap_settings.ul_data_aggregation_max_datagrams = 16
[04-02_04:12:18:539] qmap_settings.ul_data_aggregation_max_size = 8192
[04-02_04:12:18:539] qmap_settings.dl_minimum_padding = 0
[04-02_04:12:18:550] requestSetLoopBackState(loopback_state=1, replication_factor=14)
[04-02_04:12:18:557] requestGetSIMStatus SIMStatus: SIM_ABSENT
[04-02_04:12:18:560] requestGetProfile[1] ///0
[04-02_04:12:18:563] requestRegistrationState2 MCC: 0, MNC: 0, PS: Detached, DataCap: UNKNOW
[04-02_04:12:18:565] requestQueryDataCall IPv4ConnectionStatus: DISCONNECTED
[04-02_04:12:18:566] ifconfig rmnet_mhi0.1 down
[04-02_04:12:18:571] ifconfig rmnet_mhi0.1 0.0.0.0
ifconfig: SIOCSIFFLAGS: Network is down
[04-02_04:12:18:575] SetLoopBackInd: loopback_state=1, replication_factor=14
[04-02_04:12:18:591] requestSetupDataCall WdsConnectionIPv4Handle: 0xe40182a0
[04-02_04:12:18:601] ifconfig rmnet_mhi0 up
[04-02_04:12:18:607] ifconfig rmnet_mhi0.1 up
[04-02_04:12:18:613] you are use OpenWrt?
[04-02_04:12:18:614] should not calling udhcpc manually?
[04-02_04:12:18:614] should modify /etc/config/network as below?
[04-02_04:12:18:614] config interface wan
[04-02_04:12:18:614] option ifname rmnet_mhi0.1
[04-02_04:12:18:614] option proto dhcp
[04-02_04:12:18:614] should use "/sbin/ifstaus wan" to check rmnet_mhi0.1 's status?
[04-02_04:12:18:614] busybox udhcpc -f -n -q -t 5 -i rmnet_mhi0.1
udhcpc: started, v1.28.3
udhcpc: sending discover
udhcpc: sending select for 192.168.48.171
udhcpc: lease of 192.168.48.171 obtained, lease time 7200
[04-02_04:12:18:809] udhcpc: ifconfig rmnet_mhi0.1 192.168.48.171 netmask 255.255.255.248 broadcast +
[04-02_04:12:18:819] udhcpc: setting default routers: 192.168.48.172
root@OpenWrt:~# ifconfig rmnet_mhi0
rmnet_mhi0 Link encap:Ethernet HWaddr 02:50:F4:00:00:00
inet6 addr: fe80::50:f4ff:fe00:0/64 Scope:Link
UP RUNNING NOARP MTU:1500 Metric:1
RX packets:2 errors:0 dropped:0 overruns:0 frame:0
TX packets:2 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:608 (608.0 B) TX bytes:672 (672.0 B)
root@OpenWrt:~# ifconfig rmnet_mhi0.1
rmnet_mhi0.1 Link encap:UNSPEC HWaddr 02-50-F4-00-00-00-00-00-00-00-00-00-00-00-00-00
inet addr:192.168.48.171 Mask:255.255.255.248
inet6 addr: fe80::50:f4ff:fe00:0/64 Scope:Link
UP RUNNING NOARP MTU:1500 Metric:1
RX packets:2 errors:0 dropped:0 overruns:0 frame:0
TX packets:2 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:592 (592.0 B) TX bytes:656 (656.0 B)
# adjust CPU load balancing
root@OpenWrt:~# echo 2 > /sys/class/net/rmnet_mhi0/queues/rx-0/rps_cpus
root@OpenWrt:~# echo 4 > /sys/class/net/rmnet_mhi0.1/queues/rx-0/rps_cpus
root@OpenWrt:~# echo 2000 > /proc/sys/net/core/netdev_max_backlog
root@OpenWrt:~# cat /sys/class/net/rmnet_mhi0/queues/rx-0/rps_cpus
2
root@OpenWrt:~# cat /sys/class/net/rmnet_mhi0.1/queues/rx-0/rps_cpus
4
root@OpenWrt:~# cat /proc/sys/net/core/netdev_max_backlog
2000

View File

@ -0,0 +1,14 @@
root@imx6qsabresd:~# ./QLog -p /dev/mhi_DIAG -s log &
root@imx6qsabresd:~# [000.000]QLog Version: Quectel_QLog_Linux&Android_V1.2.4
[ 298.597963] [I][mhi_uci_open] Node open, ref counts 1
[ 298.605601] [I][mhi_uci_open] Starting channel
[ 298.612159] [I][__mhi_prepare_channel] Entered: preparing channel:4
[ 298.629906] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1
[ 298.635415] [I][__mhi_prepare_channel] Chan:4 successfully moved to start state
[ 298.642749] [I][__mhi_prepare_channel] Entered: preparing channel:5
[ 298.658043] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1
[ 298.663543] [I][__mhi_prepare_channel] Chan:5 successfully moved to start state
[000.075]open /dev/mhi_DIAG ttyfd = 3
[000.075]Press CTRL+C to stop catch log.
[000.096]qlog_logfile_create log/20160920_145758_0000.qmdl logfd=4
[005.268]recv: 0M 70K 490B in 5181 msec

View File

@ -0,0 +1,47 @@
#
# Copyright (C) 2015 OpenWrt.org
#
# This is free software, licensed under the GNU General Public License v2.
# See /LICENSE for more information.
#
include $(TOPDIR)/rules.mk
PKG_NAME:=qmi_wwan_q
PKG_VERSION:=3.0
PKG_RELEASE:=1
include $(INCLUDE_DIR)/kernel.mk
include $(INCLUDE_DIR)/package.mk
define KernelPackage/qmi_wwan_q
SUBMENU:=qmiwwan Support
TITLE:=Quectel Linux USB QMI WWAN Driver
DEPENDS:=+kmod-usb-net kmod-usb-wdm
FILES:=$(PKG_BUILD_DIR)/qmi_wwan_q.ko
AUTOLOAD:=$(call AutoLoad,81,qmi_wwan_q)
endef
define KernelPackage/qmi_wwan_q/description
Quectel Linux USB QMI WWAN Driver
endef
MAKE_OPTS:= \
ARCH="$(LINUX_KARCH)" \
CROSS_COMPILE="$(TARGET_CROSS)" \
CXXFLAGS="$(TARGET_CXXFLAGS)" \
M="$(PKG_BUILD_DIR)" \
$(EXTRA_KCONFIG)
define Build/Prepare
mkdir -p $(PKG_BUILD_DIR)
$(CP) ./src/* $(PKG_BUILD_DIR)/
endef
define Build/Compile
$(MAKE) -C "$(LINUX_DIR)" \
$(MAKE_OPTS) \
modules
endef
$(eval $(call KernelPackage,qmi_wwan_q))

View File

@ -0,0 +1,36 @@
obj-m += qmi_wwan_q.o
PWD := $(shell pwd)
OUTPUTDIR=/lib/modules/`uname -r`/kernel/drivers/net/usb/
ifeq ($(ARCH),)
ARCH := $(shell uname -m)
endif
ifeq ($(CROSS_COMPILE),)
CROSS_COMPILE :=
endif
ifeq ($(KDIR),)
KDIR := /lib/modules/$(shell uname -r)/build
ifeq ($(ARCH),i686)
ifeq ($(wildcard $KDIR/arch/$ARCH),)
ARCH=i386
endif
endif
endif
ifneq ($(findstring &,${PWD}),)
$(warning "${PWD}")
$(warning "current directory contain special char '&' !")
$(error "please remove it!")
endif
default:
$(MAKE) ARCH=${ARCH} CROSS_COMPILE=${CROSS_COMPILE} -C $(KDIR) M=$(PWD) modules
install: default
cp $(PWD)/qmi_wwan_q.ko /lib/modules/$(shell uname -r)/kernel/drivers/net/usb/
depmod
clean:
rm -rf *~ .tmp_versions modules.order Module.symvers
find . -type f -name "*~" -o -name "*.o" -o -name "*.ko" -o -name "*.cmd" -o -name "*.mod.c" | xargs rm -rf

View File

@ -0,0 +1,146 @@
Release Notes
[V1.2.2]
Date: 9/7/2022
enhancement:
1. Optimization, the network card send queue wakeup is changed from callback to tasklet
2. Add the function of returning LAN packets in bridge mode
3. support ndo ioctl on kernel>5.14
4. Allow setting MTU greater than 1500
fix:
[V1.2.1]
Date: 9/26/2021
enhancement:
1. support IPQ5018's NSS
2. use 'qsdk/qca/src/data-kernel/drivers/rmnet-nss/rmnet_nss.c' instead myself rmnet_nss.c
and qmi_wwan_q.ko must load after rmnet_nss.ko
fix:
[V1.2.0.25]
Date: 9/17/2021
enhancement:
fix:
1. add sdx6x platfrom support
[V1.2.0.24]
Date: 9/6/2021
enhancement:
fix:
1. add BG95 support
2. support Linux 5.14.0
[V1.2.0.23]
Date: 3/23/2021
enhancement:
fix:
1. add sdx12 platfrom support
[V1.2.0.22]
Date: 2/5/2021
enhancement:
fix:
1. fix panic (memory-access-after-free) when do modem reboot stress test
[V1.2.0.21]
Date: 2/4/2021
enhancement:
1. Code refactoring - QMAP and rmnet
fix:
1. qmap_qmi_wwan_rx_fixup: change skb_dequeue to __skb_dequeue
[V1.2.0.20]
Date: 11/2/2020
enhancement:
fix:
1. LTE-A modems can not obtain IP by DHCP
[V1.2.0.19]
Date: 10/9/2020
enhancement:
fix:
1. X55 can not access internet after usb resume
[V1.2.0.18]
Date: 10/9/2020
enhancement:
fix:
1. X55: rename rmnet_usb0.1 to wwan0_1
1.1 if there is '.', openwrt will think it is vlan, and auto create vlan
1.2 if there is '.', android will think it is not vaild
1.3 if named as rmnet_usb0, and SOC is QCOM, QCOM's netmgr will auto manager it
[V1.2.0.17]
Date: 9/14/2020
enhancement:
1. Code refactoring - QMAP size and version
fix:
[V1.2.0.16]
Date: 9/14/2020
enhancement:
1. rx_fixup() check if this is enough skb_headroom() to fill ethernet header
fix:
1. fix "WARNING: suspicious RCU usage"
[V1.2.0.15]
Date: 9/10/2020
enhancement:
fix:
1. fix compile errors on kernel 3.10~3.13
[V1.2.0.14]
Date: 7/24/2020
enhancement:
fix:
1. fix QMAP V5 bug on Big Endian CPU
[V1.2.0.13]
Date: 6/22/2020
enhancement:
fix:
1. fix no data traffic when do Upload TPUT test
[V1.2.0.12]
Date: 5/29/2020
enhancement:
fix:
1. IPQ8074: when enable hyfi, quectel-CM will crash system crash
[V1.2.0.9]
Date: 5/13/2020
enhancement:
fix:
1. IPQ8074: enable CONFIG_QCA_NSS_DRV by CONFIG_PINCTRL_IPQ807x (from CONFIG_ARCH_IPQ807x)
[V1.2.0.8]
Date: 5/9/2020
enhancement:
fix:
1. fix compile errors on kernel V3.10
[V1.2.0.7]
Date: 4/25/2020
enhancement:
1. X55 support bridge mode
fix:
[V1.2.0.6]
Date: 4/20/2020
enhancement:
1. add stat64, or the rx/tx statics will become to 0 when data > 4G
2. do not use skb_clone, will make QCOM's NSS and SFE 's cpu loading very high
fix:
[V1.2.0.5]
Date: 4/8/2020
enhancement:
1. add attrite link_state, change carrier state accoring link_state
quectel-CM will set link_state to 1 when QMI setup call success.
fix:
[V1.2.0.4]
Date: 4/8/2020
enhancement:
1. support X55's QMAP V5
fix:

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,424 @@
/* Copyright (c) 2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/hashtable.h>
#include <linux/if_ether.h>
#include <linux/ip.h>
#include <qca-nss-drv/nss_api_if.h>
#include <linux/rmnet_nss.h>
#define RMNET_NSS_HASH_BITS 8
#define hash_add_ptr(table, node, key) \
hlist_add_head(node, &table[hash_ptr(key, HASH_BITS(table))])
static DEFINE_HASHTABLE(rmnet_nss_ctx_hashtable, RMNET_NSS_HASH_BITS);
struct rmnet_nss_ctx {
struct hlist_node hnode;
struct net_device *rmnet_dev;
struct nss_rmnet_rx_handle *nss_ctx;
};
enum __rmnet_nss_stat {
RMNET_NSS_RX_ETH,
RMNET_NSS_RX_FAIL,
RMNET_NSS_RX_NON_ETH,
RMNET_NSS_RX_BUSY,
RMNET_NSS_TX_NO_CTX,
RMNET_NSS_TX_SUCCESS,
RMNET_NSS_TX_FAIL,
RMNET_NSS_TX_NONLINEAR,
RMNET_NSS_TX_BAD_IP,
RMNET_NSS_EXCEPTIONS,
RMNET_NSS_EX_BAD_HDR,
RMNET_NSS_EX_BAD_IP,
RMNET_NSS_EX_SUCCESS,
RMNET_NSS_TX_BAD_FRAGS,
RMNET_NSS_TX_LINEARIZE_FAILS,
RMNET_NSS_TX_NON_ZERO_HEADLEN_FRAGS,
RMNET_NSS_TX_BUSY_LOOP,
RMNET_NSS_NUM_STATS,
};
static unsigned long rmnet_nss_stats[RMNET_NSS_NUM_STATS];
#define RMNET_NSS_STAT(name, counter, desc) \
module_param_named(name, rmnet_nss_stats[counter], ulong, 0444); \
MODULE_PARM_DESC(name, desc)
RMNET_NSS_STAT(rmnet_nss_rx_ethernet, RMNET_NSS_RX_ETH,
"Number of Ethernet headers successfully removed");
RMNET_NSS_STAT(rmnet_nss_rx_fail, RMNET_NSS_RX_FAIL,
"Number of Ethernet headers that could not be removed");
RMNET_NSS_STAT(rmnet_nss_rx_non_ethernet, RMNET_NSS_RX_NON_ETH,
"Number of non-Ethernet packets received");
RMNET_NSS_STAT(rmnet_nss_rx_busy, RMNET_NSS_RX_BUSY,
"Number of packets dropped decause rmnet_data device was busy");
RMNET_NSS_STAT(rmnet_nss_tx_slow, RMNET_NSS_TX_NO_CTX,
"Number of packets sent over non-NSS-accelerated rmnet device");
RMNET_NSS_STAT(rmnet_nss_tx_fast, RMNET_NSS_TX_SUCCESS,
"Number of packets sent over NSS-accelerated rmnet device");
RMNET_NSS_STAT(rmnet_nss_tx_fail, RMNET_NSS_TX_FAIL,
"Number of packets that NSS could not transmit");
RMNET_NSS_STAT(rmnet_nss_tx_nonlinear, RMNET_NSS_TX_NONLINEAR,
"Number of non linear sent over NSS-accelerated rmnet device");
RMNET_NSS_STAT(rmnet_nss_tx_invalid_ip, RMNET_NSS_TX_BAD_IP,
"Number of ingress packets with invalid IP headers");
RMNET_NSS_STAT(rmnet_nss_tx_invalid_frags, RMNET_NSS_TX_BAD_FRAGS,
"Number of ingress packets with invalid frag format");
RMNET_NSS_STAT(rmnet_nss_tx_linearize_fail, RMNET_NSS_TX_LINEARIZE_FAILS,
"Number of ingress packets where linearize in tx fails");
RMNET_NSS_STAT(rmnet_nss_tx_exceptions, RMNET_NSS_EXCEPTIONS,
"Number of times our DL exception handler was invoked");
RMNET_NSS_STAT(rmnet_nss_exception_non_ethernet, RMNET_NSS_EX_BAD_HDR,
"Number of non-Ethernet exception packets");
RMNET_NSS_STAT(rmnet_nss_exception_invalid_ip, RMNET_NSS_EX_BAD_IP,
"Number of exception packets with invalid IP headers");
RMNET_NSS_STAT(rmnet_nss_exception_success, RMNET_NSS_EX_SUCCESS,
"Number of exception packets handled successfully");
RMNET_NSS_STAT(rmnet_nss_tx_non_zero_headlen_frags, RMNET_NSS_TX_NON_ZERO_HEADLEN_FRAGS,
"Number of packets with non zero headlen");
RMNET_NSS_STAT(rmnet_nss_tx_busy_loop, RMNET_NSS_TX_BUSY_LOOP,
"Number of times tx packets busy looped");
static void rmnet_nss_inc_stat(enum __rmnet_nss_stat stat)
{
if (stat >= 0 && stat < RMNET_NSS_NUM_STATS)
rmnet_nss_stats[stat]++;
}
static struct rmnet_nss_ctx *rmnet_nss_find_ctx(struct net_device *dev)
{
struct rmnet_nss_ctx *ctx;
struct hlist_head *bucket;
u32 hash;
hash = hash_ptr(dev, HASH_BITS(rmnet_nss_ctx_hashtable));
bucket = &rmnet_nss_ctx_hashtable[hash];
hlist_for_each_entry(ctx, bucket, hnode) {
if (ctx->rmnet_dev == dev)
return ctx;
}
return NULL;
}
static void rmnet_nss_free_ctx(struct rmnet_nss_ctx *ctx)
{
if (ctx) {
hash_del(&ctx->hnode);
nss_rmnet_rx_xmit_callback_unregister(ctx->nss_ctx);
nss_rmnet_rx_destroy_sync(ctx->nss_ctx);
kfree(ctx);
}
}
/* Pull off an ethernet header, if possible */
static int rmnet_nss_ethhdr_pull(struct sk_buff *skb)
{
if (!skb->protocol || skb->protocol == htons(ETH_P_802_3)) {
void *ret = skb_pull(skb, sizeof(struct ethhdr));
rmnet_nss_inc_stat((ret) ? RMNET_NSS_RX_ETH :
RMNET_NSS_RX_FAIL);
return !ret;
}
rmnet_nss_inc_stat(RMNET_NSS_RX_NON_ETH);
return -1;
}
/* Copy headers to linear section for non linear packets */
static int rmnet_nss_adjust_header(struct sk_buff *skb)
{
struct iphdr *iph;
skb_frag_t *frag;
int bytes = 0;
u8 transport;
if (skb_shinfo(skb)->nr_frags != 1) {
rmnet_nss_inc_stat(RMNET_NSS_TX_BAD_FRAGS);
return -EINVAL;
}
if (skb_headlen(skb)) {
rmnet_nss_inc_stat(RMNET_NSS_TX_NON_ZERO_HEADLEN_FRAGS);
return 0;
}
frag = &skb_shinfo(skb)->frags[0];
iph = (struct iphdr *)(skb_frag_address(frag));
if (iph->version == 4) {
bytes = iph->ihl*4;
transport = iph->protocol;
} else if (iph->version == 6) {
struct ipv6hdr *ip6h = (struct ipv6hdr *)iph;
bytes = sizeof(struct ipv6hdr);
/* Dont have to account for extension headers yet */
transport = ip6h->nexthdr;
} else {
rmnet_nss_inc_stat(RMNET_NSS_TX_BAD_IP);
return -EINVAL;
}
if (transport == IPPROTO_TCP) {
struct tcphdr *th;
th = (struct tcphdr *)((u8 *)iph + bytes);
bytes += th->doff * 4;
} else if (transport == IPPROTO_UDP) {
bytes += sizeof(struct udphdr);
} else {
/* cant do anything else here unfortunately so linearize */
if (skb_linearize(skb)) {
rmnet_nss_inc_stat(RMNET_NSS_TX_LINEARIZE_FAILS);
return -EINVAL;
} else {
return 0;
}
}
if (bytes > skb_frag_size(frag)) {
rmnet_nss_inc_stat(RMNET_NSS_TX_BAD_FRAGS);
return -EINVAL;
}
skb_push(skb, bytes);
memcpy(skb->data, iph, bytes);
/* subtract to account for skb_push */
skb->len -= bytes;
frag->page_offset += bytes;
skb_frag_size_sub(frag, bytes);
/* subtract to account for skb_frag_size_sub */
skb->data_len -= bytes;
return 0;
}
/* Main downlink handler
* Looks up NSS contex associated with the device. If the context is found,
* we add a dummy ethernet header with the approriate protocol field set,
* the pass the packet off to NSS for hardware acceleration.
*/
int rmnet_nss_tx(struct sk_buff *skb)
{
struct ethhdr *eth;
struct rmnet_nss_ctx *ctx;
struct net_device *dev = skb->dev;
nss_tx_status_t rc;
unsigned int len;
u8 version;
if (skb_is_nonlinear(skb)) {
if (rmnet_nss_adjust_header(skb))
goto fail;
else
rmnet_nss_inc_stat(RMNET_NSS_TX_NONLINEAR);
}
version = ((struct iphdr *)skb->data)->version;
ctx = rmnet_nss_find_ctx(dev);
if (!ctx) {
rmnet_nss_inc_stat(RMNET_NSS_TX_NO_CTX);
return -EINVAL;
}
eth = (struct ethhdr *)skb_push(skb, sizeof(*eth));
memset(&eth->h_dest, 0, ETH_ALEN * 2);
if (version == 4) {
eth->h_proto = htons(ETH_P_IP);
} else if (version == 6) {
eth->h_proto = htons(ETH_P_IPV6);
} else {
rmnet_nss_inc_stat(RMNET_NSS_TX_BAD_IP);
goto fail;
}
skb->protocol = htons(ETH_P_802_3);
/* Get length including ethhdr */
len = skb->len;
transmit:
rc = nss_rmnet_rx_tx_buf(ctx->nss_ctx, skb);
if (rc == NSS_TX_SUCCESS) {
/* Increment rmnet_data device stats.
* Don't call rmnet_data_vnd_rx_fixup() to do this, as
* there's no guarantee the skb pointer is still valid.
*/
dev->stats.rx_packets++;
dev->stats.rx_bytes += len;
rmnet_nss_inc_stat(RMNET_NSS_TX_SUCCESS);
return 0;
} else if (rc == NSS_TX_FAILURE_QUEUE) {
rmnet_nss_inc_stat(RMNET_NSS_TX_BUSY_LOOP);
goto transmit;
}
fail:
rmnet_nss_inc_stat(RMNET_NSS_TX_FAIL);
kfree_skb(skb);
return 1;
}
/* Called by NSS in the DL exception case.
* Since the packet cannot be sent over the accelerated path, we need to
* handle it. Remove the ethernet header and pass it onward to the stack
* if possible.
*/
void rmnet_nss_receive(struct net_device *dev, struct sk_buff *skb,
struct napi_struct *napi)
{
rmnet_nss_inc_stat(RMNET_NSS_EXCEPTIONS);
if (!skb)
return;
if (rmnet_nss_ethhdr_pull(skb)) {
rmnet_nss_inc_stat(RMNET_NSS_EX_BAD_HDR);
goto drop;
}
/* reset header pointers */
skb_reset_transport_header(skb);
skb_reset_network_header(skb);
skb_reset_mac_header(skb);
/* reset packet type */
skb->pkt_type = PACKET_HOST;
skb->dev = dev;
/* reset protocol type */
switch (skb->data[0] & 0xF0) {
case 0x40:
skb->protocol = htons(ETH_P_IP);
break;
case 0x60:
skb->protocol = htons(ETH_P_IPV6);
break;
default:
rmnet_nss_inc_stat(RMNET_NSS_EX_BAD_IP);
goto drop;
}
rmnet_nss_inc_stat(RMNET_NSS_EX_SUCCESS);
/* Set this so that we dont loop around netif_receive_skb */
skb->cb[0] = 1;
netif_receive_skb(skb);
return;
drop:
kfree_skb(skb);
}
/* Called by NSS in the UL acceleration case.
* We are guaranteed to have an ethernet packet here from the NSS hardware,
* We need to pull the header off and invoke our ndo_start_xmit function
* to handle transmitting the packet to the network stack.
*/
void rmnet_nss_xmit(struct net_device *dev, struct sk_buff *skb)
{
netdev_tx_t ret;
skb_pull(skb, sizeof(struct ethhdr));
rmnet_nss_inc_stat(RMNET_NSS_RX_ETH);
/* NSS takes care of shaping, so bypassing Qdiscs like this is OK */
ret = dev->netdev_ops->ndo_start_xmit(skb, dev);
if (unlikely(ret == NETDEV_TX_BUSY)) {
dev_kfree_skb_any(skb);
rmnet_nss_inc_stat(RMNET_NSS_RX_BUSY);
}
}
/* Create and register an NSS context for an rmnet_data device */
int rmnet_nss_create_vnd(struct net_device *dev)
{
struct rmnet_nss_ctx *ctx;
ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
if (!ctx)
return -ENOMEM;
ctx->rmnet_dev = dev;
ctx->nss_ctx = nss_rmnet_rx_create_sync_nexthop(dev, NSS_N2H_INTERFACE,
NSS_C2C_TX_INTERFACE);
if (!ctx->nss_ctx) {
kfree(ctx);
return -1;
}
nss_rmnet_rx_register(ctx->nss_ctx, rmnet_nss_receive, dev);
nss_rmnet_rx_xmit_callback_register(ctx->nss_ctx, rmnet_nss_xmit);
hash_add_ptr(rmnet_nss_ctx_hashtable, &ctx->hnode, dev);
return 0;
}
/* Unregister and destroy the NSS context for an rmnet_data device */
int rmnet_nss_free_vnd(struct net_device *dev)
{
struct rmnet_nss_ctx *ctx;
ctx = rmnet_nss_find_ctx(dev);
rmnet_nss_free_ctx(ctx);
return 0;
}
static const struct rmnet_nss_cb rmnet_nss = {
.nss_create = rmnet_nss_create_vnd,
.nss_free = rmnet_nss_free_vnd,
.nss_tx = rmnet_nss_tx,
};
int __init rmnet_nss_init(void)
{
pr_err("%s(): initializing rmnet_nss\n", __func__);
RCU_INIT_POINTER(rmnet_nss_callbacks, &rmnet_nss);
return 0;
}
void __exit rmnet_nss_exit(void)
{
struct hlist_node *tmp;
struct rmnet_nss_ctx *ctx;
int bkt;
pr_err("%s(): exiting rmnet_nss\n", __func__);
RCU_INIT_POINTER(rmnet_nss_callbacks, NULL);
/* Tear down all NSS contexts */
hash_for_each_safe(rmnet_nss_ctx_hashtable, bkt, tmp, ctx, hnode)
rmnet_nss_free_ctx(ctx);
}
#if 0
MODULE_LICENSE("GPL v2");
module_init(rmnet_nss_init);
module_exit(rmnet_nss_exit);
#endif

Some files were not shown because too many files have changed in this diff Show More