diff --git a/package/wwan/app/luci-app-gobinetmodem/Makefile b/package/wwan/app/luci-app-gobinetmodem/Makefile new file mode 100755 index 000000000..f12f31aee --- /dev/null +++ b/package/wwan/app/luci-app-gobinetmodem/Makefile @@ -0,0 +1,20 @@ +# +# Copyright (C) 2015 OpenWrt.org +# +# This is free software, licensed under the GNU General Public License v2. +# See /LICENSE for more information. +# + +include $(TOPDIR)/rules.mk + +LUCI_TITLE:=Modem Server +LUCI_DEPENDS:=+luci-compat +kmod-usb-net +kmod-usb-net-cdc-ether +kmod-usb-acm \ + +kmod-usb-net-qmi-wwan +kmod-usb-net-rndis +kmod-usb-serial-qualcomm \ + +kmod-usb-net-sierrawireless +kmod-usb-ohci +kmod-usb-serial \ + +kmod-usb-serial-option \ + +kmod-usb2 +kmod-usb3 \ + +quectel-CM-5G +kmod-gobinet + +include $(TOPDIR)/feeds/luci/luci.mk + +# call BuildPackage - OpenWrt buildroot signature diff --git a/package/wwan/app/luci-app-gobinetmodem/luasrc/controller/gobinetmodem.lua b/package/wwan/app/luci-app-gobinetmodem/luasrc/controller/gobinetmodem.lua new file mode 100644 index 000000000..2a761d645 --- /dev/null +++ b/package/wwan/app/luci-app-gobinetmodem/luasrc/controller/gobinetmodem.lua @@ -0,0 +1,9 @@ +module("luci.controller.gobinetmodem", package.seeall) + +function index() + if not nixio.fs.access("/etc/config/gobinetmodem") then + return + end + + entry({"admin", "network", "gobinetmodem"}, cbi("gobinetmodem"), _("Gobinet Modem Server"), 80).dependent=false +end diff --git a/package/wwan/app/luci-app-gobinetmodem/luasrc/model/cbi/gobinetmodem.lua b/package/wwan/app/luci-app-gobinetmodem/luasrc/model/cbi/gobinetmodem.lua new file mode 100644 index 000000000..a117d7b0b --- /dev/null +++ b/package/wwan/app/luci-app-gobinetmodem/luasrc/model/cbi/gobinetmodem.lua @@ -0,0 +1,39 @@ +-- Copyright 2016 David Thornley +-- Licensed to the public under the Apache License 2.0. + + +mp=Map("gobinetmodem",translate("gobinet Modem Server")) +mp.description=translate("Modem Server For OpenWrt") + +s=mp:section(TypedSection,"service", "Base Setting") +s.anonymous = true + +enabled=s:option(Flag,"enabled",translate("Enable")) +enabled.default=0 +enabled.rmempty=false + +apn=s:option(Value,"apn",translate("APN")) +apn.rmempty=true + +pincode=s:option(Value,"pincode",translate("PIN")) +pincode.rmempty=true + +username=s:option(Value,"username",translate("PAP/CHAP username")) +username.rmempty=true + +password=s:option(Value,"password",translate("PAP/CHAP password")) +password.rmempty=true +auth=s:option(Value,"auth",translate("Authentication Type")) +password.rmempty=true +auth:value("",translate("-- Please choose --")) +auth:value("both","PAP/CHAP (both)") +auth:value("pap","PAP") +auth:value("chap","CHAP") +auth:value("none","NONE") + +tool=s:option(Value,"tool",translate("Tools")) +tool:value("quectel-CM","quectel-CM") +tool.rmempty=true + +return mp + diff --git a/package/wwan/app/luci-app-gobinetmodem/po/zh_Hans/usbmodem.po b/package/wwan/app/luci-app-gobinetmodem/po/zh_Hans/usbmodem.po new file mode 100644 index 000000000..df1f6df8e --- /dev/null +++ b/package/wwan/app/luci-app-gobinetmodem/po/zh_Hans/usbmodem.po @@ -0,0 +1,24 @@ +msgid "" +msgstr "" +"Project-Id-Version: \n" +"POT-Creation-Date: \n" +"PO-Revision-Date: \n" +"Last-Translator: dingpengyu \n" +"Language-Team: \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: zh_CN\n" +"X-Generator: Poedit 2.3.1\n" + +msgid "Base Setting" +msgstr "基本设置" + +msgid "gobinet Modem Server" +msgstr "gobinet移动网络拨号服务" + +msgid "Modem Server For OpenWrt" +msgstr "OpenWrt移动网络拨号服务" + +msgid "Tools" +msgstr "拨号工具" diff --git a/package/wwan/app/luci-app-gobinetmodem/root/etc/config/gobinetmodem b/package/wwan/app/luci-app-gobinetmodem/root/etc/config/gobinetmodem new file mode 100644 index 000000000..05fad9b41 --- /dev/null +++ b/package/wwan/app/luci-app-gobinetmodem/root/etc/config/gobinetmodem @@ -0,0 +1,4 @@ +config service + option tool 'quectel-CM' + option enabled '0' + diff --git a/package/wwan/app/luci-app-gobinetmodem/root/etc/init.d/gobinetmodem b/package/wwan/app/luci-app-gobinetmodem/root/etc/init.d/gobinetmodem new file mode 100755 index 000000000..953ee96c6 --- /dev/null +++ b/package/wwan/app/luci-app-gobinetmodem/root/etc/init.d/gobinetmodem @@ -0,0 +1,80 @@ +#!/bin/sh /etc/rc.common +# Copyright (C) 2006-2014 OpenWrt.org + +START=99 +STOP=16 +USE_PROCD=1 +#使用procd启动 + +run_4g() +{ + local enabled + config_get_bool enabled $1 enabled + + echo "run 4G" >> /tmp/log4g + + if [ "$enabled" = "1" ]; then + local user + local password + local apn + local auth + local pincode + local device + local tool + + # echo "enable 4G" >> /tmp/log4g + config_get user $1 user + config_get password $1 password + config_get apn $1 apn + config_get auth $1 auth + config_get pincode $1 pincode + config_get device $1 device + config_get tool $1 tool + config_get tty $1 tty + config_get atcmd $1 atcmd + + devname="$(basename "$device")" + devpath="$(readlink -f /sys/class/usbmisc/$devname/device/)" + ifname="$( ls "$devpath"/net )" + + if [ "$tool" = "at" ];then + at_tool "$atcmd" -d $tty + else + procd_open_instance + #创建一个实例, 在procd看来一个应用程序可以多个实\E4\BE? + #ubus call service list 可以查看实例 + procd_set_param command $tool -i $ifname -s $apn + if [ "$password" != "" ];then + procd_append_param command $user $password $auth + fi + if [ "$pincode" != "" ]; then + procd_append_param command -p $pincode + fi + # procd_append_param command -f /tmp/4g.log + procd_set_param respawn + echo "quectel-CM has started." + procd_close_instance + #关闭实例 + fi + + fi +} + + +service_triggers() +{ + procd_add_reload_trigger "gobinetmodem" +} + +start_service() { + config_load gobinetmodem + config_foreach run_4g service +} + +stop_service() +{ + echo "4G stop" >> /tmp/log4g + killall quectel-CM + echo "quectel-CM has stoped." +} + diff --git a/package/wwan/app/luci-app-gobinetmodem/root/etc/uci-defaults/luci-gobinetmodem b/package/wwan/app/luci-app-gobinetmodem/root/etc/uci-defaults/luci-gobinetmodem new file mode 100755 index 000000000..6eb5a0999 --- /dev/null +++ b/package/wwan/app/luci-app-gobinetmodem/root/etc/uci-defaults/luci-gobinetmodem @@ -0,0 +1,12 @@ +#!/bin/sh + +uci -q batch <<-EOF >/dev/null + delete ucitrack.@gobinetmodem[-1] + add ucitrack gobinetmodem + set ucitrack.@gobinetmodem[-1].init=gobinetmodem + commit ucitrack +EOF + +rm -f /tmp/luci-indexcache +exit 0 + diff --git a/package/wwan/app/luci-app-pcimodem/Makefile b/package/wwan/app/luci-app-pcimodem/Makefile new file mode 100755 index 000000000..1bcb7484d --- /dev/null +++ b/package/wwan/app/luci-app-pcimodem/Makefile @@ -0,0 +1,15 @@ +# +# Copyright (C) 2015 OpenWrt.org +# +# This is free software, licensed under the GNU General Public License v2. +# See /LICENSE for more information. +# + +include $(TOPDIR)/rules.mk + +LUCI_TITLE:=PCI Modem Server +LUCI_DEPENDS:=+kmod-pcie_mhi +pciutils +quectel-CM-5G + +include $(TOPDIR)/feeds/luci/luci.mk + +# call BuildPackage - OpenWrt buildroot signature diff --git a/package/wwan/app/luci-app-pcimodem/luasrc/controller/pcimodem.lua b/package/wwan/app/luci-app-pcimodem/luasrc/controller/pcimodem.lua new file mode 100644 index 000000000..e1630c5b8 --- /dev/null +++ b/package/wwan/app/luci-app-pcimodem/luasrc/controller/pcimodem.lua @@ -0,0 +1,9 @@ +module("luci.controller.pcimodem", package.seeall) + +function index() + if not nixio.fs.access("/etc/config/pcimodem") then + return + end + + entry({"admin", "network", "pcimodem"}, cbi("pcimodem"), _("PCI Modem Server"), 80).dependent=false +end diff --git a/package/wwan/app/luci-app-pcimodem/luasrc/model/cbi/pcimodem.lua b/package/wwan/app/luci-app-pcimodem/luasrc/model/cbi/pcimodem.lua new file mode 100644 index 000000000..591ba4b5a --- /dev/null +++ b/package/wwan/app/luci-app-pcimodem/luasrc/model/cbi/pcimodem.lua @@ -0,0 +1,39 @@ +-- Copyright 2016 David Thornley +-- Licensed to the public under the Apache License 2.0. + + +mp=Map("pcimodem",translate("PCI Modem Server")) +mp.description=translate("Modem Server For OpenWrt") + +s=mp:section(TypedSection,"service", "Base Setting") +s.anonymous = true + +enabled=s:option(Flag,"enabled",translate("Enable")) +enabled.default=0 +enabled.rmempty=false + +apn=s:option(Value,"apn",translate("APN")) +apn.rmempty=true + +pincode=s:option(Value,"pincode",translate("PIN")) +pincode.rmempty=true + +username=s:option(Value,"username",translate("PAP/CHAP username")) +username.rmempty=true + +password=s:option(Value,"password",translate("PAP/CHAP password")) +password.rmempty=true +auth=s:option(Value,"auth",translate("Authentication Type")) +password.rmempty=true +auth:value("",translate("-- Please choose --")) +auth:value("both","PAP/CHAP (both)") +auth:value("pap","PAP") +auth:value("chap","CHAP") +auth:value("none","NONE") + +tool=s:option(Value,"tool",translate("Tools")) +tool:value("quectel-CM","quectel-CM") +tool.rmempty=true + +return mp + diff --git a/package/wwan/app/luci-app-pcimodem/po/zh_Hans/pcimodem.po b/package/wwan/app/luci-app-pcimodem/po/zh_Hans/pcimodem.po new file mode 100644 index 000000000..fea348db3 --- /dev/null +++ b/package/wwan/app/luci-app-pcimodem/po/zh_Hans/pcimodem.po @@ -0,0 +1,24 @@ +msgid "" +msgstr "" +"Project-Id-Version: \n" +"POT-Creation-Date: \n" +"PO-Revision-Date: \n" +"Last-Translator: dingpengyu \n" +"Language-Team: \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: zh_CN\n" +"X-Generator: Poedit 2.3.1\n" + +msgid "Base Setting" +msgstr "基本设置" + +msgid "PCI Modem Server" +msgstr "PCI移动网络拨号服务" + +msgid "Modem Server For OpenWrt" +msgstr "OpenWrt移动网络拨号服务" + +msgid "Tools" +msgstr "拨号工具" diff --git a/package/wwan/app/luci-app-pcimodem/root/etc/config/pcimodem b/package/wwan/app/luci-app-pcimodem/root/etc/config/pcimodem new file mode 100644 index 000000000..05fad9b41 --- /dev/null +++ b/package/wwan/app/luci-app-pcimodem/root/etc/config/pcimodem @@ -0,0 +1,4 @@ +config service + option tool 'quectel-CM' + option enabled '0' + diff --git a/package/wwan/app/luci-app-pcimodem/root/etc/init.d/pcimodem b/package/wwan/app/luci-app-pcimodem/root/etc/init.d/pcimodem new file mode 100755 index 000000000..f60124f11 --- /dev/null +++ b/package/wwan/app/luci-app-pcimodem/root/etc/init.d/pcimodem @@ -0,0 +1,75 @@ +#!/bin/sh /etc/rc.common +# Copyright (C) 2006-2014 OpenWrt.org + +START=99 +STOP=16 +USE_PROCD=1 +#使用procd启动 + +run_5g() +{ + local enabled + config_get_bool enabled $1 enabled + + echo "run 4G" >> /tmp/log4g + + if [ "$enabled" = "1" ]; then + local user + local password + local apn + local auth + local pincode + local tool + + # echo "enable 5G" >> /tmp/log5g + config_get user $1 user + config_get password $1 password + config_get apn $1 apn + config_get auth $1 auth + config_get pincode $1 pincode + config_get tool $1 tool + config_get tty $1 tty + config_get atcmd $1 atcmd + + if [ "$tool" = "at" ];then + at_tool "$atcmd" -d $tty + else + procd_open_instance + #创建一个实例, 在procd看来一个应用程序可以多个实\E4\BE? + #ubus call service list 可以查看实例 + procd_set_param command $tool -i rmnet_mhi0 -s $apn + if [ "$password" != "" ];then + procd_append_param command $user $password $auth + fi + if [ "$pincode" != "" ]; then + procd_append_param command -p $pincode + fi + # procd_append_param command -f /tmp/4g.log + procd_set_param respawn + echo "quectel-CM has started." + procd_close_instance + #关闭实例 + fi + + fi +} + + +service_triggers() +{ + procd_add_reload_trigger "pcimodem" +} + +start_service() { + config_load pcimodem + config_foreach run_5g service +} + +stop_service() +{ + echo "5G stop" >> /tmp/log5g + killall quectel-CM + echo "quectel-CM has stoped." +} + + diff --git a/package/wwan/app/luci-app-pcimodem/root/etc/uci-defaults/luci-pcimodem b/package/wwan/app/luci-app-pcimodem/root/etc/uci-defaults/luci-pcimodem new file mode 100755 index 000000000..213c62cb6 --- /dev/null +++ b/package/wwan/app/luci-app-pcimodem/root/etc/uci-defaults/luci-pcimodem @@ -0,0 +1,12 @@ +#!/bin/sh + +uci -q batch <<-EOF >/dev/null + delete ucitrack.@pcimodem[-1] + add ucitrack pcimodem + set ucitrack.@pcimodem[-1].init=pcimodem + commit ucitrack +EOF + +rm -f /tmp/luci-indexcache +exit 0 + diff --git a/package/wwan/app/luci-app-spdmodem/Makefile b/package/wwan/app/luci-app-spdmodem/Makefile new file mode 100755 index 000000000..252ec31e1 --- /dev/null +++ b/package/wwan/app/luci-app-spdmodem/Makefile @@ -0,0 +1,15 @@ +# +# Copyright (C) 2015 OpenWrt.org +# +# This is free software, licensed under the GNU General Public License v2. +# See /LICENSE for more information. +# + +include $(TOPDIR)/rules.mk + +LUCI_TITLE:=SPD Modem Server +LUCI_DEPENDS:= + +include $(TOPDIR)/feeds/luci/luci.mk + +# call BuildPackage - OpenWrt buildroot signature diff --git a/package/wwan/app/luci-app-spdmodem/luasrc/controller/spdmodem.lua b/package/wwan/app/luci-app-spdmodem/luasrc/controller/spdmodem.lua new file mode 100644 index 000000000..81e8ad9e0 --- /dev/null +++ b/package/wwan/app/luci-app-spdmodem/luasrc/controller/spdmodem.lua @@ -0,0 +1,9 @@ +module("luci.controller.spdmodem", package.seeall) + +function index() + if not nixio.fs.access("/etc/config/spdmodem") then + return + end + + entry({"admin", "network", "spdmodem"}, cbi("spdmodem"), _("SPD Modem Server"), 80).dependent=false +end diff --git a/package/wwan/app/luci-app-spdmodem/luasrc/model/cbi/spdmodem.lua b/package/wwan/app/luci-app-spdmodem/luasrc/model/cbi/spdmodem.lua new file mode 100644 index 000000000..3456c3757 --- /dev/null +++ b/package/wwan/app/luci-app-spdmodem/luasrc/model/cbi/spdmodem.lua @@ -0,0 +1,39 @@ +-- Copyright 2016 David Thornley +-- Licensed to the public under the Apache License 2.0. + + +mp=Map("spdmodem",translate("SPD Modem Server")) +mp.description=translate("Modem Server For OpenWrt") + +s=mp:section(TypedSection,"service", "Base Setting") +s.anonymous = true + +enabled=s:option(Flag,"enabled",translate("Enable")) +enabled.default=0 +enabled.rmempty=false + +apn=s:option(Value,"apn",translate("APN")) +apn.rmempty=true + +pincode=s:option(Value,"pincode",translate("PIN")) +pincode.rmempty=true + +username=s:option(Value,"username",translate("PAP/CHAP username")) +username.rmempty=true + +password=s:option(Value,"password",translate("PAP/CHAP password")) +password.rmempty=true +auth=s:option(Value,"auth",translate("Authentication Type")) +password.rmempty=true +auth:value("",translate("-- Please choose --")) +auth:value("both","PAP/CHAP (both)") +auth:value("pap","PAP") +auth:value("chap","CHAP") +auth:value("none","NONE") + +tool=s:option(Value,"tool",translate("Tools")) +tool:value("quectel-CM","quectel-CM") +tool.rmempty=true + +return mp + diff --git a/package/wwan/app/luci-app-spdmodem/po/zh_Hans/spdmodem.po b/package/wwan/app/luci-app-spdmodem/po/zh_Hans/spdmodem.po new file mode 100644 index 000000000..b10da0fc4 --- /dev/null +++ b/package/wwan/app/luci-app-spdmodem/po/zh_Hans/spdmodem.po @@ -0,0 +1,24 @@ +msgid "" +msgstr "" +"Project-Id-Version: \n" +"POT-Creation-Date: \n" +"PO-Revision-Date: \n" +"Last-Translator: dingpengyu \n" +"Language-Team: \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: zh_CN\n" +"X-Generator: Poedit 2.3.1\n" + +msgid "Base Setting" +msgstr "基本设置" + +msgid "SPD Modem Server" +msgstr "SPD移动网络拨号服务" + +msgid "Modem Server For OpenWrt" +msgstr "OpenWrt移动网络拨号服务" + +msgid "Tools" +msgstr "拨号工具" diff --git a/package/wwan/app/luci-app-spdmodem/root/etc/config/spdmodem b/package/wwan/app/luci-app-spdmodem/root/etc/config/spdmodem new file mode 100644 index 000000000..05fad9b41 --- /dev/null +++ b/package/wwan/app/luci-app-spdmodem/root/etc/config/spdmodem @@ -0,0 +1,4 @@ +config service + option tool 'quectel-CM' + option enabled '0' + diff --git a/package/wwan/app/luci-app-spdmodem/root/etc/init.d/spdmodem b/package/wwan/app/luci-app-spdmodem/root/etc/init.d/spdmodem new file mode 100755 index 000000000..eece321a6 --- /dev/null +++ b/package/wwan/app/luci-app-spdmodem/root/etc/init.d/spdmodem @@ -0,0 +1,75 @@ +#!/bin/sh /etc/rc.common +# Copyright (C) 2006-2014 OpenWrt.org + +START=99 +STOP=16 +USE_PROCD=1 +#使用procd启动 + +run_5g() +{ + local enabled + config_get_bool enabled $1 enabled + + echo "run 4G" >> /tmp/log4g + + if [ "$enabled" = "1" ]; then + local user + local password + local apn + local auth + local pincode + local tool + + # echo "enable 5G" >> /tmp/log5g + config_get user $1 user + config_get password $1 password + config_get apn $1 apn + config_get auth $1 auth + config_get pincode $1 pincode + config_get tool $1 tool + config_get tty $1 tty + config_get atcmd $1 atcmd + + if [ "$tool" = "at" ];then + at_tool "$atcmd" -d $tty + else + procd_open_instance + #创建一个实例, 在procd看来一个应用程序可以多个实\E4\BE? + #ubus call service list 可以查看实例 + procd_set_param command $tool -i -s $apn + if [ "$password" != "" ];then + procd_append_param command $user $password $auth + fi + if [ "$pincode" != "" ]; then + procd_append_param command -p $pincode + fi + # procd_append_param command -f /tmp/4g.log + procd_set_param respawn + echo "quectel-CM has started." + procd_close_instance + #关闭实例 + fi + + fi +} + + +service_triggers() +{ + procd_add_reload_trigger "spdmodem" +} + +start_service() { + config_load spdmodem + config_foreach run_5g service +} + +stop_service() +{ + echo "5G stop" >> /tmp/log5g + killall quectel-CM + echo "quectel-CM has stoped." +} + + diff --git a/package/wwan/app/luci-app-spdmodem/root/etc/uci-defaults/luci-spdmodem b/package/wwan/app/luci-app-spdmodem/root/etc/uci-defaults/luci-spdmodem new file mode 100755 index 000000000..1bfe6e48c --- /dev/null +++ b/package/wwan/app/luci-app-spdmodem/root/etc/uci-defaults/luci-spdmodem @@ -0,0 +1,12 @@ +#!/bin/sh + +uci -q batch <<-EOF >/dev/null + delete ucitrack.@spdmodem[-1] + add ucitrack spdmodem + set ucitrack.@spdmodem[-1].init=spdmodem + commit ucitrack +EOF + +rm -f /tmp/luci-indexcache +exit 0 + diff --git a/package/wwan/app/luci-app-usbmodem/Makefile b/package/wwan/app/luci-app-usbmodem/Makefile new file mode 100755 index 000000000..334901993 --- /dev/null +++ b/package/wwan/app/luci-app-usbmodem/Makefile @@ -0,0 +1,20 @@ +# +# Copyright (C) 2015 OpenWrt.org +# +# This is free software, licensed under the GNU General Public License v2. +# See /LICENSE for more information. +# + +include $(TOPDIR)/rules.mk + +LUCI_TITLE:=Modem Server +LUCI_DEPENDS:=+luci-compat +kmod-usb-net +kmod-usb-net-cdc-ether +kmod-usb-acm \ + +kmod-usb-net-qmi-wwan +kmod-usb-net-rndis +kmod-usb-serial-qualcomm \ + +kmod-usb-net-sierrawireless +kmod-usb-ohci +kmod-usb-serial \ + +kmod-usb-serial-option +kmod-usb-wdm \ + +kmod-usb2 +kmod-usb3 \ + +quectel-CM-5G +kmod-qmi_wwan_q +kmod-usb-net-cdc-mbim + +include $(TOPDIR)/feeds/luci/luci.mk + +# call BuildPackage - OpenWrt buildroot signature diff --git a/package/wwan/app/luci-app-usbmodem/luasrc/controller/usbmodem.lua b/package/wwan/app/luci-app-usbmodem/luasrc/controller/usbmodem.lua new file mode 100644 index 000000000..9021a61c5 --- /dev/null +++ b/package/wwan/app/luci-app-usbmodem/luasrc/controller/usbmodem.lua @@ -0,0 +1,9 @@ +module("luci.controller.usbmodem", package.seeall) + +function index() + if not nixio.fs.access("/etc/config/usbmodem") then + return + end + + entry({"admin", "network", "usbmodem"}, cbi("usbmodem"), _("USB Modem Server"), 80).dependent=false +end diff --git a/package/wwan/app/luci-app-usbmodem/luasrc/model/cbi/usbmodem.lua b/package/wwan/app/luci-app-usbmodem/luasrc/model/cbi/usbmodem.lua new file mode 100644 index 000000000..11e4df9a8 --- /dev/null +++ b/package/wwan/app/luci-app-usbmodem/luasrc/model/cbi/usbmodem.lua @@ -0,0 +1,51 @@ +-- Copyright 2016 David Thornley +-- Licensed to the public under the Apache License 2.0. + + +mp=Map("usbmodem",translate("USB Modem Server")) +mp.description=translate("Modem Server For OpenWrt") + +s=mp:section(TypedSection,"service", "Base Setting") +s.anonymous = true + +enabled=s:option(Flag,"enabled",translate("Enable")) +enabled.default=0 +enabled.rmempty=false + +device=s:option(Value, "device", translate("Modem device")) +device.rmempty = false + +local device_suggestions = nixio.fs.glob("/dev/cdc-wdm*") + +if device_suggestions then + local node + for node in device_suggestions do + device:value(node) + end +end + +apn=s:option(Value,"apn",translate("APN")) +apn.rmempty=true + +pincode=s:option(Value,"pincode",translate("PIN")) +pincode.rmempty=true + +username=s:option(Value,"username",translate("PAP/CHAP username")) +username.rmempty=true + +password=s:option(Value,"password",translate("PAP/CHAP password")) +password.rmempty=true +auth=s:option(Value,"auth",translate("Authentication Type")) +password.rmempty=true +auth:value("",translate("-- Please choose --")) +auth:value("both","PAP/CHAP (both)") +auth:value("pap","PAP") +auth:value("chap","CHAP") +auth:value("none","NONE") + +tool=s:option(Value,"tool",translate("Tools")) +tool:value("quectel-CM","quectel-CM") +tool.rmempty=true + +return mp + diff --git a/package/wwan/app/luci-app-usbmodem/po/zh_Hans/usbmodem.po b/package/wwan/app/luci-app-usbmodem/po/zh_Hans/usbmodem.po new file mode 100644 index 000000000..6459a0078 --- /dev/null +++ b/package/wwan/app/luci-app-usbmodem/po/zh_Hans/usbmodem.po @@ -0,0 +1,24 @@ +msgid "" +msgstr "" +"Project-Id-Version: \n" +"POT-Creation-Date: \n" +"PO-Revision-Date: \n" +"Last-Translator: dingpengyu \n" +"Language-Team: \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: zh_CN\n" +"X-Generator: Poedit 2.3.1\n" + +msgid "Base Setting" +msgstr "基本设置" + +msgid "USB Modem Server" +msgstr "USB移动网络拨号服务" + +msgid "Modem Server For OpenWrt" +msgstr "OpenWrt移动网络拨号服务" + +msgid "Tools" +msgstr "拨号工具" diff --git a/package/wwan/app/luci-app-usbmodem/root/etc/config/usbmodem b/package/wwan/app/luci-app-usbmodem/root/etc/config/usbmodem new file mode 100644 index 000000000..8d7627b3b --- /dev/null +++ b/package/wwan/app/luci-app-usbmodem/root/etc/config/usbmodem @@ -0,0 +1,5 @@ +config service + option tool 'quectel-CM' + option device '/dev/cdc-wdm0' + option enabled '0' + diff --git a/package/wwan/app/luci-app-usbmodem/root/etc/init.d/usbmodem b/package/wwan/app/luci-app-usbmodem/root/etc/init.d/usbmodem new file mode 100755 index 000000000..e5f108119 --- /dev/null +++ b/package/wwan/app/luci-app-usbmodem/root/etc/init.d/usbmodem @@ -0,0 +1,80 @@ +#!/bin/sh /etc/rc.common +# Copyright (C) 2006-2014 OpenWrt.org + +START=99 +STOP=16 +USE_PROCD=1 +#使用procd启动 + +run_4g() +{ + local enabled + config_get_bool enabled $1 enabled + + echo "run 4G" >> /tmp/log4g + + if [ "$enabled" = "1" ]; then + local user + local password + local apn + local auth + local pincode + local device + local tool + + # echo "enable 4G" >> /tmp/log4g + config_get user $1 user + config_get password $1 password + config_get apn $1 apn + config_get auth $1 auth + config_get pincode $1 pincode + config_get device $1 device + config_get tool $1 tool + config_get tty $1 tty + config_get atcmd $1 atcmd + + devname="$(basename "$device")" + devpath="$(readlink -f /sys/class/usbmisc/$devname/device/)" + ifname="$( ls "$devpath"/net )" + + if [ "$tool" = "at" ];then + at_tool "$atcmd" -d $tty + else + procd_open_instance + #创建一个实例, 在procd看来一个应用程序可以多个实\E4\BE? + #ubus call service list 可以查看实例 + procd_set_param command $tool -i $ifname -s $apn + if [ "$password" != "" ];then + procd_append_param command $user $password $auth + fi + if [ "$pincode" != "" ]; then + procd_append_param command -p $pincode + fi + # procd_append_param command -f /tmp/4g.log + procd_set_param respawn + echo "quectel-CM has started." + procd_close_instance + #关闭实例 + fi + + fi +} + + +service_triggers() +{ + procd_add_reload_trigger "usbmodem" +} + +start_service() { + config_load usbmodem + config_foreach run_4g service +} + +stop_service() +{ + echo "4G stop" >> /tmp/log4g + killall quectel-CM + echo "quectel-CM has stoped." +} + diff --git a/package/wwan/app/luci-app-usbmodem/root/etc/uci-defaults/luci-usbmodem b/package/wwan/app/luci-app-usbmodem/root/etc/uci-defaults/luci-usbmodem new file mode 100755 index 000000000..ebc3b752d --- /dev/null +++ b/package/wwan/app/luci-app-usbmodem/root/etc/uci-defaults/luci-usbmodem @@ -0,0 +1,12 @@ +#!/bin/sh + +uci -q batch <<-EOF >/dev/null + delete ucitrack.@usbmodem[-1] + add ucitrack usbmodem + set ucitrack.@usbmodem[-1].init=usbmodem + commit ucitrack +EOF + +rm -f /tmp/luci-indexcache +exit 0 + diff --git a/package/wwan/app/luci-proto-3x/Makefile b/package/wwan/app/luci-proto-3x/Makefile new file mode 100755 index 000000000..ee45aba93 --- /dev/null +++ b/package/wwan/app/luci-proto-3x/Makefile @@ -0,0 +1,53 @@ +# +# Copyright (C) 2007-2013 OpenWrt.org +# Copyright (C) 2010 Vertical Communications +# +# This is free software, licensed under the GNU General Public License v2. +# See /LICENSE for more information. +# + +include $(TOPDIR)/rules.mk + +PKG_NAME:=luci-proto-3x +PKG_VERSION:=1.0 +PKG_RELEASE:=1 +PKG_MAINTAINER:=Dairyman +PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)-$(PKG_VERSION)-$(PKG_RELEASE) + +include $(INCLUDE_DIR)/package.mk + +define Package/luci-proto-3x/Default + VERSION:=$(PKG_VERSION)-$(PKG_RELEASE) + URL:=http://openwrt.org/ + MAINTAINER:=Dairyman +endef + +define Package/luci-proto-3x +$(call Package/luci-proto-3x/Default) + SECTION:=net + CATEGORY:=ROOter + SUBMENU:=Protocols + TITLE:=Support for 3x +endef + +define Package/luci-proto-3x/description + This package contains LuCI support for 3x +endef + +define Build/Prepare + mkdir -p $(PKG_BUILD_DIR) +endef + +define Build/Configure +endef + +define Build/Compile/Default +endef + +Build/Compile = $(Build/Compile/Default) + +define Package/luci-proto-3x/install + $(CP) ./files/* $(1)/ +endef + +$(eval $(call BuildPackage,luci-proto-3x)) diff --git a/package/wwan/app/luci-proto-3x/files/usr/lib/lua/luci/model/cbi/admin_network/proto_3x.lua b/package/wwan/app/luci-proto-3x/files/usr/lib/lua/luci/model/cbi/admin_network/proto_3x.lua new file mode 100755 index 000000000..59bf2f646 --- /dev/null +++ b/package/wwan/app/luci-proto-3x/files/usr/lib/lua/luci/model/cbi/admin_network/proto_3x.lua @@ -0,0 +1,146 @@ +-- Copyright 2011 Jo-Philipp Wich +-- Licensed to the public under the Apache License 2.0. + +local map, section, net = ... + +local device, apn, service, pincode, username, password, dialnumber +local ipv6, maxwait, defaultroute, metric, peerdns, dns, + keepalive_failure, keepalive_interval, demand + + +device = section:taboption("general", Value, "device", translate("Modem device")) +device.rmempty = false + +local device_suggestions = nixio.fs.glob("/dev/tty[A-Z]*") + or nixio.fs.glob("/dev/tts/*") + +if device_suggestions then + local node + for node in device_suggestions do + device:value(node) + end +end + + +service = section:taboption("general", Value, "service", translate("Service Type")) +service:value("", translate("-- Please choose --")) +service:value("umts", "UMTS/GPRS") +service:value("umts_only", translate("UMTS only")) +service:value("gprs_only", translate("GPRS only")) +service:value("evdo", "CDMA/EV-DO") + + +apn = section:taboption("general", Value, "apn", translate("APN")) + + +pincode = section:taboption("general", Value, "pincode", translate("PIN")) + + +username = section:taboption("general", Value, "username", translate("PAP/CHAP username")) + + +password = section:taboption("general", Value, "password", translate("PAP/CHAP password")) +password.password = true + +dialnumber = section:taboption("general", Value, "dialnumber", translate("Dial number")) +dialnumber.placeholder = "*99***1#" + +if luci.model.network:has_ipv6() then + + ipv6 = section:taboption("advanced", Flag, "ipv6", + translate("Enable IPv6 negotiation on the PPP link")) + + ipv6.default = ipv6.disabled + +end + + +maxwait = section:taboption("advanced", Value, "maxwait", + translate("Modem init timeout"), + translate("Maximum amount of seconds to wait for the modem to become ready")) + +maxwait.placeholder = "20" +maxwait.datatype = "min(1)" + + +defaultroute = section:taboption("advanced", Flag, "defaultroute", + translate("Use default gateway"), + translate("If unchecked, no default route is configured")) + +defaultroute.default = defaultroute.enabled + + +metric = section:taboption("advanced", Value, "metric", + translate("Use gateway metric")) + +metric.placeholder = "0" +metric.datatype = "uinteger" +metric:depends("defaultroute", defaultroute.enabled) + + +peerdns = section:taboption("advanced", Flag, "peerdns", + translate("Use DNS servers advertised by peer"), + translate("If unchecked, the advertised DNS server addresses are ignored")) + +peerdns.default = peerdns.enabled + + +dns = section:taboption("advanced", DynamicList, "dns", + translate("Use custom DNS servers")) + +dns:depends("peerdns", "") +dns.datatype = "ipaddr" +dns.cast = "string" + + +keepalive_failure = section:taboption("advanced", Value, "_keepalive_failure", + translate("LCP echo failure threshold"), + translate("Presume peer to be dead after given amount of LCP echo failures, use 0 to ignore failures")) + +function keepalive_failure.cfgvalue(self, section) + local v = m:get(section, "keepalive") + if v and #v > 0 then + return tonumber(v:match("^(%d+)[ ,]+%d+") or v) + end +end + +function keepalive_failure.write() end +function keepalive_failure.remove() end + +keepalive_failure.placeholder = "0" +keepalive_failure.datatype = "uinteger" + + +keepalive_interval = section:taboption("advanced", Value, "_keepalive_interval", + translate("LCP echo interval"), + translate("Send LCP echo requests at the given interval in seconds, only effective in conjunction with failure threshold")) + +function keepalive_interval.cfgvalue(self, section) + local v = m:get(section, "keepalive") + if v and #v > 0 then + return tonumber(v:match("^%d+[ ,]+(%d+)")) + end +end + +function keepalive_interval.write(self, section, value) + local f = tonumber(keepalive_failure:formvalue(section)) or 0 + local i = tonumber(value) or 5 + if i < 1 then i = 1 end + if f > 0 then + m:set(section, "keepalive", "%d %d" %{ f, i }) + else + m:del(section, "keepalive") + end +end + +keepalive_interval.remove = keepalive_interval.write +keepalive_interval.placeholder = "5" +keepalive_interval.datatype = "min(1)" + + +demand = section:taboption("advanced", Value, "demand", + translate("Inactivity timeout"), + translate("Close inactive connection after the given amount of seconds, use 0 to persist connection")) + +demand.placeholder = "0" +demand.datatype = "uinteger" diff --git a/package/wwan/app/luci-proto-3x/files/usr/lib/lua/luci/model/network/proto_3x.lua b/package/wwan/app/luci-proto-3x/files/usr/lib/lua/luci/model/network/proto_3x.lua new file mode 100644 index 000000000..e2ff6b9c8 --- /dev/null +++ b/package/wwan/app/luci-proto-3x/files/usr/lib/lua/luci/model/network/proto_3x.lua @@ -0,0 +1,49 @@ +-- Copyright 2018 Florian Eckert +-- Licensed to the public under the Apache License 2.0. + +local netmod = luci.model.network +local interface = luci.model.network.interface + +local proto = netmod:register_protocol("3x") + +function proto.get_i18n(self) + return luci.i18n.translate("UMTS/GPRS/EV-DO") +end + +function proto.ifname(self) + return "3x-" .. self.sid +end + +function proto.get_interface(self) + return interface(self:ifname(), self) +end + +function proto.is_installed(self) + return nixio.fs.access("/lib/netifd/proto/3x.sh") +end + +function proto.opkg_package(self) + return "comgt" +end + +function proto.is_floating(self) + return true +end + +function proto.is_virtual(self) + return true +end + +function proto.get_interfaces(self) + return nil +end + +function proto.contains_interface(self, ifc) + if self:is_floating() then + return (netmod:ifnameof(ifc) == self:ifname()) + else + return netmod.protocol.contains_interface(self, ifc) + end +end + +netmod:register_pattern_virtual("^3x%-%w") diff --git a/package/wwan/app/luci-proto-3x/files/www/luci-static/resources/protocol/3x.js b/package/wwan/app/luci-proto-3x/files/www/luci-static/resources/protocol/3x.js new file mode 100644 index 000000000..a1c213b9e --- /dev/null +++ b/package/wwan/app/luci-proto-3x/files/www/luci-static/resources/protocol/3x.js @@ -0,0 +1,11 @@ +'use strict';'require rpc';'require uci';'require form';'require network';var callFileList=rpc.declare({object:'file',method:'list',params:['path'],expect:{entries:[]},filter:function(list,params){var rv=[];for(var i=0;i0) +uci.set('network',section_id,'keepalive','%d %d'.format(f,i));else +uci.unset('network',section_id,'keepalive');} +return network.registerProtocol('3x',{getI18n:function(){return _('UMTS/GPRS/EV-DO');},getIfname:function(){return this._ubus('l3_device')||'3x-%s'.format(this.sid);},getOpkgPackage:function(){return'comgt';},isFloating:function(){return true;},isVirtual:function(){return true;},getDevices:function(){return null;},containsDevice:function(ifname){return(network.getIfnameOf(ifname)==this.getIfname());},renderFormOptions:function(s){var o;o=s.taboption('general',form.Value,'device',_('Modem device'));o.rmempty=false;o.load=function(section_id){return callFileList('/dev/').then(L.bind(function(devices){for(var i=0;i +-- Licensed to the public under the Apache License 2.0. + +local map, section, net = ... + +local device, apn, pincode, username, password +local auth, ipv6 + + +device = section:taboption("general", Value, "device", translate("Modem device")) +device.rmempty = false + +local device_suggestions = nixio.fs.glob("/dev/cdc-wdm*") + +if device_suggestions then + local node + for node in device_suggestions do + device:value(node) + end +end + + +apn = section:taboption("general", Value, "apn", translate("APN")) + + +pincode = section:taboption("general", Value, "pincode", translate("PIN")) + + +username = section:taboption("general", Value, "username", translate("PAP/CHAP username")) + + +password = section:taboption("general", Value, "password", translate("PAP/CHAP password")) +password.password = true + +auth = section:taboption("general", Value, "auth", translate("Authentication Type")) +auth:value("", translate("-- Please choose --")) +auth:value("both", "PAP/CHAP (both)") +auth:value("pap", "PAP") +auth:value("chap", "CHAP") +auth:value("none", "NONE") + +if luci.model.network:has_ipv6() then + ipv6 = section:taboption("advanced", Flag, "ipv6", translate("Enable IPv6 negotiation")) + ipv6.default = ipv6.disabled +end \ No newline at end of file diff --git a/package/wwan/app/luci-proto-mbim/files/usr/lib/lua/luci/model/network/proto_mbim.lua b/package/wwan/app/luci-proto-mbim/files/usr/lib/lua/luci/model/network/proto_mbim.lua new file mode 100755 index 000000000..3007480fc --- /dev/null +++ b/package/wwan/app/luci-proto-mbim/files/usr/lib/lua/luci/model/network/proto_mbim.lua @@ -0,0 +1,55 @@ +-- Copyright 2016 David Thornley +-- Licensed to the public under the Apache License 2.0. + +local netmod = luci.model.network +local interface = luci.model.network.interface +local proto = netmod:register_protocol("mbim") + +function proto.get_i18n(self) + return luci.i18n.translate("MBIM Cellular") +end + +function proto.ifname(self) + local base = netmod._M.protocol + local ifname = base.ifname(self) -- call base class "protocol.ifname(self)" + + -- Note: ifname might be nil if the adapter could not be determined through ubus (default name to mbim-wan in this case) + if ifname == nil then + ifname = "mbim-" .. self.sid + end + return ifname +end + +function proto.get_interface(self) + return interface(self:ifname(), self) +end + +function proto.opkg_package(self) + return "rmbim" +end + +function proto.is_installed(self) + return nixio.fs.access("/lib/netifd/proto/mbim.sh") +end + +function proto.is_floating(self) + return true +end + +function proto.is_virtual(self) + return true +end + +function proto.get_interfaces(self) + return nil +end + +function proto.contains_interface(self, ifc) + return (netmod:ifnameof(ifc) == self:ifname()) +end + +netmod:register_pattern_virtual("^mbim%-%w") + +netmod:register_error_code("CALL_FAILED", luci.i18n.translate("Call failed")) +netmod:register_error_code("NO_CID", luci.i18n.translate("Unable to obtain client ID")) +netmod:register_error_code("PLMN_FAILED", luci.i18n.translate("Setting PLMN failed")) diff --git a/package/wwan/app/luci-proto-mbim/files/www/luci-static/resources/protocol/mbim.js b/package/wwan/app/luci-proto-mbim/files/www/luci-static/resources/protocol/mbim.js new file mode 100644 index 000000000..337cdd364 --- /dev/null +++ b/package/wwan/app/luci-proto-mbim/files/www/luci-static/resources/protocol/mbim.js @@ -0,0 +1,107 @@ +'use strict'; +'require rpc'; +'require form'; +'require network'; + +var callFileList = rpc.declare({ + object: 'file', + method: 'list', + params: [ 'path' ], + expect: { entries: [] }, + filter: function(list, params) { + var rv = []; + for (var i = 0; i < list.length; i++) + if (list[i].name.match(/^cdc-wdm/)) + rv.push(params.path + list[i].name); + return rv.sort(); + } +}); + +network.registerPatternVirtual(/^mbim-.+$/); +network.registerErrorCode('CALL_FAILED', _('Call failed')); +network.registerErrorCode('NO_CID', _('Unable to obtain client ID')); +network.registerErrorCode('PLMN_FAILED', _('Setting PLMN failed')); + +return network.registerProtocol('mbim', { + getI18n: function() { + return _('MBIM Cellular'); + }, + + getIfname: function() { + return this._ubus('l3_device') || 'mbim-%s'.format(this.sid); + }, + + getOpkgPackage: function() { + return 'rmbim'; + }, + + isFloating: function() { + return true; + }, + + isVirtual: function() { + return true; + }, + + getDevices: function() { + return null; + }, + + containsDevice: function(ifname) { + return (network.getIfnameOf(ifname) == this.getIfname()); + }, + + renderFormOptions: function(s) { + var dev = this.getL3Device() || this.getDevice(), o; + + o = s.taboption('general', form.Value, 'device', _('Modem device')); + o.rmempty = false; + o.load = function(section_id) { + return callFileList('/dev/').then(L.bind(function(devices) { + for (var i = 0; i < devices.length; i++) + this.value(devices[i]); + return form.Value.prototype.load.apply(this, [section_id]); + }, this)); + }; + + s.taboption('general', form.Value, 'apn', _('APN')); + s.taboption('general', form.Value, 'pincode', _('PIN')); + + o = s.taboption('general', form.ListValue, 'auth', _('Authentication Type')); + o.value('both', 'PAP/CHAP'); + o.value('pap', 'PAP'); + o.value('chap', 'CHAP'); + o.value('none', 'NONE'); + o.default = 'none'; + + o = s.taboption('general', form.Value, 'username', _('PAP/CHAP username')); + o.depends('auth', 'pap'); + o.depends('auth', 'chap'); + o.depends('auth', 'both'); + + o = s.taboption('general', form.Value, 'password', _('PAP/CHAP password')); + o.depends('auth', 'pap'); + o.depends('auth', 'chap'); + o.depends('auth', 'both'); + o.password = true; + + if (L.hasSystemFeature('ipv6')) { + o = s.taboption('advanced', form.Flag, 'ipv6', _('Enable IPv6 negotiation')); + o.default = o.disabled; + } + + o = s.taboption('advanced', form.Value, 'delay', _('Modem init timeout'), _('Maximum amount of seconds to wait for the modem to become ready')); + o.placeholder = '10'; + o.datatype = 'min(1)'; + + o = s.taboption('advanced', form.Value, 'mtu', _('Override MTU')); + o.placeholder = dev ? (dev.getMTU() || '1500') : '1500'; + o.datatype = 'max(9200)'; + + o = s.taboption('general', form.ListValue, 'pdptype', _('PDP Type')); + o.value('ipv4v6', 'IPv4/IPv6'); + o.value('ipv4', 'IPv4'); + o.value('ipv6', 'IPv6'); + o.default = 'ipv4v6'; + } +}); diff --git a/package/wwan/driver/quectel_Gobinet/Makefile b/package/wwan/driver/quectel_Gobinet/Makefile new file mode 100755 index 000000000..5457008a4 --- /dev/null +++ b/package/wwan/driver/quectel_Gobinet/Makefile @@ -0,0 +1,47 @@ +# +# Copyright (C) 2015 OpenWrt.org +# +# This is free software, licensed under the GNU General Public License v2. +# See /LICENSE for more information. +# + +include $(TOPDIR)/rules.mk + +PKG_NAME:=gobinet +PKG_VERSION:=1.6.3 +PKG_RELEASE:=1 + +include $(INCLUDE_DIR)/kernel.mk +include $(INCLUDE_DIR)/package.mk + +define KernelPackage/gobinet + SUBMENU:=Gobinet Support + TITLE:=Quectel Linux USB Gobinet Driver + DEPENDS:=+kmod-usb-net + FILES:=$(PKG_BUILD_DIR)/GobiNet.ko + AUTOLOAD:=$(call AutoLoad,81,GobiNet) +endef + +define KernelPackage/gobinet/description + Quectel Linux USB gobinet Driver +endef + +MAKE_OPTS:= \ + ARCH="$(LINUX_KARCH)" \ + CROSS_COMPILE="$(TARGET_CROSS)" \ + CXXFLAGS="$(TARGET_CXXFLAGS)" \ + M="$(PKG_BUILD_DIR)" \ + $(EXTRA_KCONFIG) + +define Build/Prepare + mkdir -p $(PKG_BUILD_DIR) + $(CP) ./src/* $(PKG_BUILD_DIR)/ +endef + +define Build/Compile + $(MAKE) -C "$(LINUX_DIR)" \ + $(MAKE_OPTS) \ + modules +endef + +$(eval $(call KernelPackage,gobinet)) diff --git a/package/wwan/driver/quectel_Gobinet/src/GobiUSBNet.c b/package/wwan/driver/quectel_Gobinet/src/GobiUSBNet.c new file mode 100644 index 000000000..689c1733a --- /dev/null +++ b/package/wwan/driver/quectel_Gobinet/src/GobiUSBNet.c @@ -0,0 +1,3202 @@ +/*=========================================================================== +FILE: + GobiUSBNet.c + +DESCRIPTION: + Qualcomm USB Network device for Gobi 3000 + +FUNCTIONS: + GobiNetSuspend + GobiNetResume + GobiNetDriverBind + GobiNetDriverUnbind + GobiUSBNetURBCallback + GobiUSBNetTXTimeout + GobiUSBNetAutoPMThread + GobiUSBNetStartXmit + GobiUSBNetOpen + GobiUSBNetStop + GobiUSBNetProbe + GobiUSBNetModInit + GobiUSBNetModExit + +Copyright (c) 2011, Code Aurora Forum. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Code Aurora Forum nor + the names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. + + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. +===========================================================================*/ + +//--------------------------------------------------------------------------- +// Include Files +//--------------------------------------------------------------------------- + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#if LINUX_VERSION_CODE > KERNEL_VERSION(3,16,0) //8b094cd03b4a3793220d8d8d86a173bfea8c285b +#include +#else +#define timespec64 timespec +#define ktime_get_ts64 ktime_get_ts +#define timespec64_sub timespec_sub +#endif + +#include "Structs.h" +#include "QMIDevice.h" +#include "QMI.h" + +#ifndef ETH_P_MAP +#define ETH_P_MAP 0xDA1A +#endif + +#if (ETH_P_MAP == 0x00F9) +#undef ETH_P_MAP +#define ETH_P_MAP 0xDA1A +#endif + +//----------------------------------------------------------------------------- +// Definitions +//----------------------------------------------------------------------------- + +// Version Information +//add new module or new feature, increase major version. fix bug, increase minor version +#define VERSION_NUMBER "V1.6.3" +#define DRIVER_VERSION "Quectel_Linux&Android_GobiNet_Driver_"VERSION_NUMBER +#define DRIVER_AUTHOR "Qualcomm Innovation Center" +#define DRIVER_DESC "GobiNet" +static const char driver_name[] = "GobiNet"; + +// Debug flag +int quec_debug = 0; + +// Allow user interrupts +//int interruptible = 1; + +// Number of IP packets which may be queued up for transmit +static int txQueueLength = 100; + +// Class should be created during module init, so needs to be global +static struct class * gpClass; + +static const unsigned char ec20_mac[ETH_ALEN] = {0x02, 0x50, 0xf3, 0x00, 0x00, 0x00}; +static const unsigned char default_modem_addr[ETH_ALEN] = {0x02, 0x50, 0xf3, 0x00, 0x00, 0x00}; +static const unsigned char node_id[ETH_ALEN] = {0x02, 0x50, 0xf4, 0x00, 0x00, 0x00}; +//static const u8 broadcast_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; + +//setup data call by "AT$QCRMCALL=1,1" +static uint __read_mostly qcrmcall_mode = 0; +module_param( qcrmcall_mode, uint, S_IRUGO | S_IWUSR ); + +static struct sk_buff * ether_to_ip_fixup(struct net_device *dev, struct sk_buff *skb) { + const struct ethhdr *ehdr; + + skb_reset_mac_header(skb); + ehdr = eth_hdr(skb); + + if (ehdr->h_proto == htons(ETH_P_IP)) { + if (unlikely(skb->len <= (sizeof(struct ethhdr) + sizeof(struct iphdr)))) { + goto drop_skb; + } + } + else if (ehdr->h_proto == htons(ETH_P_IPV6)) { + if (unlikely(skb->len <= (sizeof(struct ethhdr) + sizeof(struct ipv6hdr)))) { + goto drop_skb; + } + } + else { + DBG("%s skb h_proto is %04x\n", dev->name, ntohs(ehdr->h_proto)); + goto drop_skb; + } + + if (unlikely(skb_pull(skb, ETH_HLEN))) + return skb; + +drop_skb: + return NULL; +} + +//#define QUECTEL_REMOVE_TX_ZLP +#define USB_CDC_SET_REMOVE_TX_ZLP_COMMAND 0x5D + +//#define QUECTEL_WWAN_MULTI_PACKAGES + +#ifdef QUECTEL_WWAN_MULTI_PACKAGES +static uint __read_mostly rx_packets = 10; +module_param( rx_packets, uint, S_IRUGO | S_IWUSR ); + +#define USB_CDC_SET_MULTI_PACKAGE_COMMAND (0x5C) +#define QUEC_NET_MSG_SPEC (0x80) +#define QUEC_NET_MSG_ID_IP_DATA (0x00) + +struct multi_package_config { + __le32 enable; + __le32 package_max_len; + __le32 package_max_count_in_queue; + __le32 timeout; +} __packed; + +struct quec_net_package_header { + unsigned char msg_spec; + unsigned char msg_id; + unsigned short payload_len; + unsigned char reserve[16]; +} __packed; +#endif + +#ifdef QUECTEL_WWAN_QMAP +/* + Quectel_WCDMA<E_Linux_USB_Driver_User_Guide_V1.9.pdf + 5.6. Test QMAP on GobiNet or QMI WWAN + 0 - no QMAP + 1 - QMAP (Aggregation protocol) + X - QMAP (Multiplexing and Aggregation protocol) +*/ +static uint __read_mostly qmap_mode = 0; +module_param( qmap_mode, uint, S_IRUGO | S_IWUSR ); + +struct qmap_hdr { + u8 cd_rsvd_pad; + u8 mux_id; + u16 pkt_len; +} __packed; + +enum rmnet_map_v5_header_type { + RMNET_MAP_HEADER_TYPE_UNKNOWN, + RMNET_MAP_HEADER_TYPE_COALESCING = 0x1, + RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD = 0x2, + RMNET_MAP_HEADER_TYPE_ENUM_LENGTH +}; + +/* Main QMAP header */ +struct rmnet_map_header { +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 pad_len:6; + u8 next_hdr:1; + u8 cd_bit:1; +#elif defined (__BIG_ENDIAN_BITFIELD) + u8 cd_bit:1; + u8 next_hdr:1; + u8 pad_len:6; +#else +#error "Please fix " +#endif + u8 mux_id; + __be16 pkt_len; +} __aligned(1); + +/* QMAP v5 headers */ +struct rmnet_map_v5_csum_header { +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 next_hdr:1; + u8 header_type:7; + u8 hw_reserved:7; + u8 csum_valid_required:1; +#elif defined (__BIG_ENDIAN_BITFIELD) + u8 header_type:7; + u8 next_hdr:1; + u8 csum_valid_required:1; + u8 hw_reserved:7; +#else +#error "Please fix " +#endif + __be16 reserved; +} __aligned(1); + +struct qmap_priv { + struct net_device *real_dev; + struct net_device *self_dev; + uint qmap_version; + uint offset_id; + uint mux_id; + uint link_state; + +#if defined(QUECTEL_UL_DATA_AGG) + /* QMIWDS_ADMIN_SET_DATA_FORMAT_RESP TLV_0x17 and TLV_0x18 */ + uint ul_data_aggregation_max_datagrams; //UplinkDataAggregationMaxDatagramsTlv + uint ul_data_aggregation_max_size; //UplinkDataAggregationMaxSizeTlv + uint dl_minimum_padding; //0x1A + + spinlock_t agg_lock; + struct sk_buff *agg_skb; + unsigned agg_count; + struct timespec64 agg_time; + struct hrtimer agg_hrtimer; + struct work_struct agg_wq; +#endif + +#ifdef QUECTEL_BRIDGE_MODE + int m_bridge_mode; + uint m_bridge_ipv4; + unsigned char mHostMAC[6]; +#endif +}; + +#ifdef QUECTEL_BRIDGE_MODE +static int is_qmap_netdev(const struct net_device *netdev); +#endif + +#endif + +#ifdef QUECTEL_BRIDGE_MODE +static int __read_mostly bridge_mode = 0/*|BIT(1)*/; +module_param( bridge_mode, int, S_IRUGO | S_IWUSR ); + +static int bridge_arp_reply(struct net_device *net, struct sk_buff *skb, uint bridge_ipv4) { + struct arphdr *parp; + u8 *arpptr, *sha; + u8 sip[4], tip[4], ipv4[4]; + struct sk_buff *reply = NULL; + + ipv4[0] = (bridge_ipv4 >> 24) & 0xFF; + ipv4[1] = (bridge_ipv4 >> 16) & 0xFF; + ipv4[2] = (bridge_ipv4 >> 8) & 0xFF; + ipv4[3] = (bridge_ipv4 >> 0) & 0xFF; + + parp = arp_hdr(skb); + + if (parp->ar_hrd == htons(ARPHRD_ETHER) && parp->ar_pro == htons(ETH_P_IP) + && parp->ar_op == htons(ARPOP_REQUEST) && parp->ar_hln == 6 && parp->ar_pln == 4) { + arpptr = (u8 *)parp + sizeof(struct arphdr); + sha = arpptr; + arpptr += net->addr_len; /* sha */ + memcpy(sip, arpptr, sizeof(sip)); + arpptr += sizeof(sip); + arpptr += net->addr_len; /* tha */ + memcpy(tip, arpptr, sizeof(tip)); + + pr_info("%s sip = %d.%d.%d.%d, tip=%d.%d.%d.%d, ipv4=%d.%d.%d.%d\n", netdev_name(net), + sip[0], sip[1], sip[2], sip[3], tip[0], tip[1], tip[2], tip[3], ipv4[0], ipv4[1], ipv4[2], ipv4[3]); + //wwan0 sip = 10.151.137.255, tip=10.151.138.0, ipv4=10.151.137.255 + if (tip[0] == ipv4[0] && tip[1] == ipv4[1] && (tip[2]&0xFC) == (ipv4[2]&0xFC) && tip[3] != ipv4[3]) + reply = arp_create(ARPOP_REPLY, ETH_P_ARP, *((__be32 *)sip), net, *((__be32 *)tip), sha, ec20_mac, sha); + + if (reply) { + skb_reset_mac_header(reply); + __skb_pull(reply, skb_network_offset(reply)); + reply->ip_summed = CHECKSUM_UNNECESSARY; + reply->pkt_type = PACKET_HOST; + + netif_rx_ni(reply); + } + return 1; + } + + return 0; +} + +static struct sk_buff *bridge_mode_tx_fixup(struct net_device *net, struct sk_buff *skb, uint bridge_ipv4, unsigned char *bridge_mac) { + struct ethhdr *ehdr; + const struct iphdr *iph; + + skb_reset_mac_header(skb); + ehdr = eth_hdr(skb); + + if (ehdr->h_proto == htons(ETH_P_ARP)) { + if (bridge_ipv4) + bridge_arp_reply(net, skb, bridge_ipv4); + return NULL; + } + + iph = ip_hdr(skb); + //DBG("iphdr: "); + //PrintHex((void *)iph, sizeof(struct iphdr)); + +// 1 0.000000000 0.0.0.0 255.255.255.255 DHCP 362 DHCP Request - Transaction ID 0xe7643ad7 + if (ehdr->h_proto == htons(ETH_P_IP) && iph->protocol == IPPROTO_UDP && iph->saddr == 0x00000000 && iph->daddr == 0xFFFFFFFF) { + //if (udp_hdr(skb)->dest == htons(67)) //DHCP Request + { + memcpy(bridge_mac, ehdr->h_source, ETH_ALEN); + pr_info("%s PC Mac Address: %02x:%02x:%02x:%02x:%02x:%02x\n", netdev_name(net), + bridge_mac[0], bridge_mac[1], bridge_mac[2], bridge_mac[3], bridge_mac[4], bridge_mac[5]); + } + } + + if (memcmp(ehdr->h_source, bridge_mac, ETH_ALEN)) { + return NULL; + } + + return skb; +} + +static void bridge_mode_rx_fixup(sGobiUSBNet *pQmapDev, struct net_device *net, struct sk_buff *skb) { + uint bridge_mode = 0; + unsigned char *bridge_mac; + + if (pQmapDev->qmap_mode > 1) { + struct qmap_priv *priv = netdev_priv(net); + bridge_mode = priv->m_bridge_mode; + bridge_mac = priv->mHostMAC; + } + else { + bridge_mode = pQmapDev->m_bridge_mode; + bridge_mac = pQmapDev->mHostMAC; + } + + if (bridge_mode) + memcpy(eth_hdr(skb)->h_dest, bridge_mac, ETH_ALEN); + else + memcpy(eth_hdr(skb)->h_dest, net->dev_addr, ETH_ALEN); +} + +static ssize_t bridge_mode_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct net_device *pNet = to_net_dev(dev); + uint bridge_mode = 0; + + if (is_qmap_netdev(pNet)) { + struct qmap_priv *priv = netdev_priv(pNet); + bridge_mode = priv->m_bridge_mode; + } + else { + struct usbnet * pDev = netdev_priv( pNet ); + sGobiUSBNet * pGobiDev = (sGobiUSBNet *)pDev->data[0]; + bridge_mode = pGobiDev->m_bridge_mode; + } + + return snprintf(buf, PAGE_SIZE, "%d\n", bridge_mode); +} + +static ssize_t bridge_mode_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { + struct net_device *pNet = to_net_dev(dev); + uint old_mode = 0; + uint bridge_mode = simple_strtoul(buf, NULL, 0); + + if (pNet->type != ARPHRD_ETHER) { + return count; + } + + if (is_qmap_netdev(pNet)) { + struct qmap_priv *priv = netdev_priv(pNet); + + old_mode = priv->m_bridge_mode; + priv->m_bridge_mode = bridge_mode; + } + else { + struct usbnet * pDev = netdev_priv( pNet ); + sGobiUSBNet * pGobiDev = (sGobiUSBNet *)pDev->data[0]; + + old_mode = pGobiDev->m_bridge_mode; + pGobiDev->m_bridge_mode = bridge_mode; + } + + if (old_mode != bridge_mode) + dev_info(dev, "bridge_mode change to 0x%x\n", bridge_mode); + + return count; +} + +static ssize_t bridge_ipv4_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct net_device *pNet = to_net_dev(dev); + unsigned int bridge_ipv4 = 0; + unsigned char ipv4[4]; + + if (is_qmap_netdev(pNet)) { + struct qmap_priv *priv = netdev_priv(pNet); + bridge_ipv4 = priv->m_bridge_ipv4; + } + else { + struct usbnet * pDev = netdev_priv( pNet ); + sGobiUSBNet * pGobiDev = (sGobiUSBNet *)pDev->data[0]; + bridge_ipv4 = pGobiDev->m_bridge_ipv4; + } + + ipv4[0] = (bridge_ipv4 >> 24) & 0xFF; + ipv4[1] = (bridge_ipv4 >> 16) & 0xFF; + ipv4[2] = (bridge_ipv4 >> 8) & 0xFF; + ipv4[3] = (bridge_ipv4 >> 0) & 0xFF; + + return snprintf(buf, PAGE_SIZE, "%d.%d.%d.%d\n", ipv4[0], ipv4[1], ipv4[2], ipv4[3]); +} + +static ssize_t bridge_ipv4_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { + struct net_device *pNet = to_net_dev(dev); + + if (is_qmap_netdev(pNet)) { + struct qmap_priv *priv = netdev_priv(pNet); + priv->m_bridge_ipv4 = simple_strtoul(buf, NULL, 16); + } + else { + struct usbnet * pDev = netdev_priv( pNet ); + sGobiUSBNet * pGobiDev = (sGobiUSBNet *)pDev->data[0]; + pGobiDev->m_bridge_ipv4 = simple_strtoul(buf, NULL, 16); + } + + return count; +} + +static DEVICE_ATTR(bridge_mode, S_IWUSR | S_IRUGO, bridge_mode_show, bridge_mode_store); +static DEVICE_ATTR(bridge_ipv4, S_IWUSR | S_IRUGO, bridge_ipv4_show, bridge_ipv4_store); + +static struct attribute *qmi_qmap_sysfs_attrs[] = { + &dev_attr_bridge_mode.attr, + &dev_attr_bridge_ipv4.attr, + NULL, +}; + +static struct attribute_group qmi_qmap_sysfs_attr_group = { + .attrs = qmi_qmap_sysfs_attrs, +}; +#endif + +#ifdef QUECTEL_WWAN_QMAP +static sGobiUSBNet * net_to_qmap(struct net_device *dev) { + struct usbnet *usbnet = netdev_priv(dev); + sGobiUSBNet * pGobiDev = (sGobiUSBNet *)usbnet->data[0]; + + return pGobiDev; +} + +static struct sk_buff * add_qhdr(struct sk_buff *skb, u8 mux_id) { + struct qmap_hdr *qhdr; + int pad = 0; + + pad = skb->len%4; + if (pad) { + pad = 4 - pad; + if (skb_tailroom(skb) < pad) { + printk("skb_tailroom small!\n"); + pad = 0; + } + if (pad) + __skb_put(skb, pad); + } + + qhdr = (struct qmap_hdr *)skb_push(skb, sizeof(struct qmap_hdr)); + qhdr->cd_rsvd_pad = pad; + qhdr->mux_id = mux_id; + qhdr->pkt_len = cpu_to_be16(skb->len - sizeof(struct qmap_hdr)); + + return skb; +} + +static struct sk_buff * add_qhdr_v5(struct sk_buff *skb, u8 mux_id) { + struct rmnet_map_header *map_header; + struct rmnet_map_v5_csum_header *ul_header; + u32 padding, map_datalen; + + map_datalen = skb->len; + padding = map_datalen%4; + if (padding) { + padding = 4 - padding; + if (skb_tailroom(skb) < padding) { + printk("skb_tailroom small!\n"); + padding = 0; + } + if (padding) + __skb_put(skb, padding); + } + + map_header = (struct rmnet_map_header *)skb_push(skb, (sizeof(struct rmnet_map_header) + sizeof(struct rmnet_map_v5_csum_header))); + map_header->cd_bit = 0; + map_header->next_hdr = 1; + map_header->pad_len = padding; + map_header->mux_id = mux_id; + map_header->pkt_len = htons(map_datalen + padding); + + ul_header = (struct rmnet_map_v5_csum_header *)(map_header + 1); + memset(ul_header, 0, sizeof(*ul_header)); + ul_header->header_type = RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD; + if (skb->ip_summed == CHECKSUM_PARTIAL) { +#if 0 //TODO + skb->ip_summed = CHECKSUM_NONE; + /* Ask for checksum offloading */ + ul_header->csum_valid_required = 1; +#endif + } + + return skb; +} + +static void rmnet_usb_tx_wake_queue(unsigned long data) { + sGobiUSBNet *pQmapDev = (void *)data; + int i; + + for (i = 0; i < pQmapDev->qmap_mode; i++) { + struct net_device *qmap_net = pQmapDev->mpQmapNetDev[i]; + if (qmap_net) { + if (netif_queue_stopped(qmap_net) && !netif_queue_stopped(pQmapDev->mpNetDev->net)) { + netif_wake_queue(qmap_net); + } + } + } +} + +static void rmnet_usb_tx_skb_destructor(struct sk_buff *skb) { + sGobiUSBNet *pQmapDev = net_to_qmap(skb->dev); + int i; + + for (i = 0; i < pQmapDev->qmap_mode; i++) { + struct net_device *qmap_net = pQmapDev->mpQmapNetDev[i]; + + if (qmap_net) { + if (netif_queue_stopped(qmap_net)) { + tasklet_schedule(&pQmapDev->txq); + break; + } + } + } +} + +static void rmnet_vnd_update_rx_stats(struct net_device *net, + unsigned rx_packets, unsigned rx_bytes) { + net->stats.rx_packets += rx_packets; + net->stats.rx_bytes += rx_bytes; +} + +static void rmnet_vnd_update_tx_stats(struct net_device *net, + unsigned tx_packets, unsigned tx_bytes) { + net->stats.tx_packets += tx_packets; + net->stats.tx_bytes += tx_bytes; +} + +#if defined(QUECTEL_UL_DATA_AGG) +static long agg_time_limit __read_mostly = 1000000L; //reduce this time, can get better TPUT performance, but will increase USB interrupts +module_param(agg_time_limit, long, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(agg_time_limit, "Maximum time packets sit in the agg buf"); + +static long agg_bypass_time __read_mostly = 10000000L; +module_param(agg_bypass_time, long, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(agg_bypass_time, "Skip agg when apart spaced more than this"); + +static int rmnet_usb_tx_agg_skip(struct sk_buff *skb, int offset) +{ + u8 *packet_start = skb->data + offset; + int ready2send = 0; + + if (skb->protocol == htons(ETH_P_IP)) { + struct iphdr *ip4h = (struct iphdr *)(packet_start); + + if (ip4h->protocol == IPPROTO_TCP) { + const struct tcphdr *th = (const struct tcphdr *)(packet_start + sizeof(struct iphdr)); + if (th->psh) { + ready2send = 1; + } + } + else if (ip4h->protocol == IPPROTO_ICMP) + ready2send = 1; + + } else if (skb->protocol == htons(ETH_P_IPV6)) { + struct ipv6hdr *ip6h = (struct ipv6hdr *)(packet_start); + + if (ip6h->nexthdr == NEXTHDR_TCP) { + const struct tcphdr *th = (const struct tcphdr *)(packet_start + sizeof(struct ipv6hdr)); + if (th->psh) { + ready2send = 1; + } + } else if (ip6h->nexthdr == NEXTHDR_ICMP) { + ready2send = 1; + } else if (ip6h->nexthdr == NEXTHDR_FRAGMENT) { + struct frag_hdr *frag; + + frag = (struct frag_hdr *)(packet_start + + sizeof(struct ipv6hdr)); + if (frag->nexthdr == IPPROTO_ICMPV6) + ready2send = 1; + } + } + + return ready2send; +} + +static void rmnet_usb_tx_agg_work(struct work_struct *work) +{ + struct qmap_priv *priv = + container_of(work, struct qmap_priv, agg_wq); + struct sk_buff *skb = NULL; + unsigned long flags; + + spin_lock_irqsave(&priv->agg_lock, flags); + if (likely(priv->agg_skb)) { + skb = priv->agg_skb; + priv->agg_skb = NULL; + priv->agg_count = 0; + skb->protocol = htons(ETH_P_MAP); + skb->dev = priv->real_dev; + ktime_get_ts64(&priv->agg_time); + } + spin_unlock_irqrestore(&priv->agg_lock, flags); + + if (skb) { + int err = dev_queue_xmit(skb); + if (err != NET_XMIT_SUCCESS) { + priv->self_dev->stats.tx_errors++; + } + } +} + +static enum hrtimer_restart rmnet_usb_tx_agg_timer_cb(struct hrtimer *timer) +{ + struct qmap_priv *priv = + container_of(timer, struct qmap_priv, agg_hrtimer); + + schedule_work(&priv->agg_wq); + return HRTIMER_NORESTART; +} + +static int rmnet_usb_tx_agg(struct sk_buff *skb, struct qmap_priv *priv) { + int ready2send = 0; + int xmit_more = 0; + struct timespec64 diff, now; + struct sk_buff *agg_skb = NULL; + unsigned long flags; + int err; + struct net_device *pNet = priv->self_dev; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5,1,0) //6b16f9ee89b8d5709f24bc3ac89ae8b5452c0d7c +#if LINUX_VERSION_CODE > KERNEL_VERSION(3,16,0) + xmit_more = skb->xmit_more; +#endif +#else + xmit_more = netdev_xmit_more(); +#endif + + rmnet_vnd_update_tx_stats(pNet, 1, skb->len); + + if (priv->ul_data_aggregation_max_datagrams == 1) { + skb->protocol = htons(ETH_P_MAP); + skb->dev = priv->real_dev; + if (!skb->destructor) + skb->destructor = rmnet_usb_tx_skb_destructor; + err = dev_queue_xmit(skb); + if (err != NET_XMIT_SUCCESS) + pNet->stats.tx_errors++; + return NET_XMIT_SUCCESS; + } + +new_packet: + spin_lock_irqsave(&priv->agg_lock, flags); + agg_skb = NULL; + ready2send = 0; + ktime_get_ts64(&now); + diff = timespec64_sub(now, priv->agg_time); + + if (priv->agg_skb) { + if ((priv->agg_skb->len + skb->len) < priv->ul_data_aggregation_max_size) { + memcpy(skb_put(priv->agg_skb, skb->len), skb->data, skb->len); + priv->agg_count++; + + if (diff.tv_sec > 0 || diff.tv_nsec > agg_time_limit) { + ready2send = 1; + } + else if (priv->agg_count == priv->ul_data_aggregation_max_datagrams) { + ready2send = 1; + } + else if (xmit_more == 0) { + struct rmnet_map_header *map_header = (struct rmnet_map_header *)skb->data; + size_t offset = sizeof(struct rmnet_map_header); + if (map_header->next_hdr) + offset += sizeof(struct rmnet_map_v5_csum_header); + + ready2send = rmnet_usb_tx_agg_skip(skb, offset); + } + + dev_kfree_skb_any(skb); + skb = NULL; + } + else { + ready2send = 1; + } + + if (ready2send) { + agg_skb = priv->agg_skb; + priv->agg_skb = NULL; + priv->agg_count = 0; + } + } + else if (skb) { + if (diff.tv_sec > 0 || diff.tv_nsec > agg_bypass_time) { + ready2send = 1; + } + else if (xmit_more == 0) { + struct rmnet_map_header *map_header = (struct rmnet_map_header *)skb->data; + size_t offset = sizeof(struct rmnet_map_header); + if (map_header->next_hdr) + offset += sizeof(struct rmnet_map_v5_csum_header); + + ready2send = rmnet_usb_tx_agg_skip(skb, offset); + } + + if (ready2send == 0) { + priv->agg_skb = alloc_skb(priv->ul_data_aggregation_max_size, GFP_ATOMIC); + if (priv->agg_skb) { + memcpy(skb_put(priv->agg_skb, skb->len), skb->data, skb->len); + priv->agg_count++; + dev_kfree_skb_any(skb); + skb = NULL; + } + else { + ready2send = 1; + } + } + + if (ready2send) { + agg_skb = skb; + skb = NULL; + } + } + + if (ready2send) { + priv->agg_time = now; + } + spin_unlock_irqrestore(&priv->agg_lock, flags); + + if (agg_skb) { + agg_skb->protocol = htons(ETH_P_MAP); + agg_skb->dev = priv->real_dev; + if (!agg_skb->destructor) + agg_skb->destructor = rmnet_usb_tx_skb_destructor; + err = dev_queue_xmit(agg_skb); + if (err != NET_XMIT_SUCCESS) { + pNet->stats.tx_errors++; + } + } + + if (skb) { + goto new_packet; + } + + if (priv->agg_skb) { + if (!hrtimer_is_queued(&priv->agg_hrtimer)) + hrtimer_start(&priv->agg_hrtimer, ns_to_ktime(NSEC_PER_MSEC * 2), HRTIMER_MODE_REL); + } + + return NET_XMIT_SUCCESS; +} +#endif + +static int qmap_open(struct net_device *dev) +{ + struct qmap_priv *priv = netdev_priv(dev); + sGobiUSBNet * pGobiDev = net_to_qmap(priv->real_dev); + + if (!(priv->real_dev->flags & IFF_UP)) + return -ENETDOWN; + + if (!pGobiDev->mbQMIReady) + return -ENETDOWN; + +#if defined(QUECTEL_UL_DATA_AGG) + if (priv->ul_data_aggregation_max_datagrams == 1 && pGobiDev->agg_ctx.ul_data_aggregation_max_datagrams > 1) { + priv->ul_data_aggregation_max_datagrams = pGobiDev->agg_ctx.ul_data_aggregation_max_datagrams; + priv->ul_data_aggregation_max_size = pGobiDev->agg_ctx.ul_data_aggregation_max_size; + priv->dl_minimum_padding = pGobiDev->agg_ctx.dl_minimum_padding; + } +#endif + + if (netif_carrier_ok(priv->real_dev) && priv->link_state) + netif_carrier_on(dev); + + if (netif_carrier_ok(dev)) { + if (netif_queue_stopped(dev) && !netif_queue_stopped(priv->real_dev)) + netif_wake_queue(dev); + } + + return 0; +} + +static int qmap_stop(struct net_device *pNet) +{ + netif_carrier_off(pNet); + return 0; +} + +static int qmap_start_xmit(struct sk_buff *skb, struct net_device *pNet) +{ + int err; + struct qmap_priv *priv = netdev_priv(pNet); + + if (netif_queue_stopped(priv->real_dev)) { + //printk(KERN_DEBUG "s\n"); + netif_stop_queue(pNet); + return NETDEV_TX_BUSY; + } + + if (pNet->type == ARPHRD_ETHER) { +#ifdef QUECTEL_BRIDGE_MODE + if (priv->m_bridge_mode && bridge_mode_tx_fixup(pNet, skb, priv->m_bridge_ipv4, priv->mHostMAC) == NULL) { + dev_kfree_skb_any (skb); + return NETDEV_TX_OK; + } +#endif + + if (ether_to_ip_fixup(pNet, skb) == NULL) { + dev_kfree_skb_any (skb); + return NETDEV_TX_OK; + } + } + + if (priv->qmap_version == 5) { + add_qhdr(skb, priv->mux_id); + } + else if (priv->qmap_version == 9) { + add_qhdr_v5(skb, priv->mux_id); + } + else { + dev_kfree_skb_any (skb); + return NETDEV_TX_OK; + } + +#if defined(QUECTEL_UL_DATA_AGG) + err = rmnet_usb_tx_agg(skb, priv); +#else + skb->protocol = htons(ETH_P_MAP); + skb->dev = priv->real_dev; + if (!skb->destructor) + skb->destructor = rmnet_usb_tx_skb_destructor; + err = dev_queue_xmit(skb); +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,14 )) + if (err == NET_XMIT_SUCCESS) { + rmnet_vnd_update_tx_stats(pNet, 1, skb->len); + } else { + pNet->stats.tx_errors++; + } +#endif +#endif + + return err; +} + +#if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,29 )) +#else +static const struct net_device_ops qmap_netdev_ops = { + .ndo_open = qmap_open, + .ndo_stop = qmap_stop, + .ndo_start_xmit = qmap_start_xmit, +}; +#endif + +#ifdef QUECTEL_BRIDGE_MODE +static int is_qmap_netdev(const struct net_device *netdev) { +#if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,29 )) + return netdev->open == qmap_open; +#else + return netdev->netdev_ops == &qmap_netdev_ops; +#endif +} +#endif + +static int qmap_register_device(sGobiUSBNet * pDev, u8 offset_id) +{ + struct net_device *real_dev = pDev->mpNetDev->net; + struct net_device *qmap_net; + struct qmap_priv *priv; + int err; + + qmap_net = alloc_etherdev(sizeof(*priv)); + if (!qmap_net) + return -ENOBUFS; + + SET_NETDEV_DEV(qmap_net, &real_dev->dev); + priv = netdev_priv(qmap_net); + priv->offset_id = offset_id; + priv->mux_id = QUECTEL_QMAP_MUX_ID + offset_id; + priv->qmap_version = pDev->qmap_version; + priv->real_dev = real_dev; + priv->self_dev = qmap_net; + +#if defined(QUECTEL_UL_DATA_AGG) + priv->ul_data_aggregation_max_datagrams = 1; + priv->ul_data_aggregation_max_size = 2048; + priv->dl_minimum_padding = 0; + priv->agg_skb = NULL; + priv->agg_count = 0; + hrtimer_init(&priv->agg_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + priv->agg_hrtimer.function = rmnet_usb_tx_agg_timer_cb; + INIT_WORK(&priv->agg_wq, rmnet_usb_tx_agg_work); + ktime_get_ts64(&priv->agg_time); + spin_lock_init(&priv->agg_lock); +#endif + + sprintf(qmap_net->name, "%s.%d", real_dev->name, offset_id + 1); +#if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,29 )) + qmap_net->open = qmap_open; + qmap_net->stop = qmap_stop; + qmap_net->hard_start_xmit = qmap_start_xmit; +#else + qmap_net->netdev_ops = &qmap_netdev_ops; +#endif + memcpy (qmap_net->dev_addr, real_dev->dev_addr, ETH_ALEN); + +#ifdef QUECTEL_BRIDGE_MODE + priv->m_bridge_mode = !!(pDev->m_bridge_mode & BIT(offset_id)); + qmap_net->sysfs_groups[0] = &qmi_qmap_sysfs_attr_group; +#endif + + err = register_netdev(qmap_net); + if (err < 0) { + INFO("register_netdev(%s), err=%d\n", qmap_net->name, err); + goto out_free_newdev; + } + netif_device_attach (qmap_net); + + pDev->mpQmapNetDev[offset_id] = qmap_net; + qmap_net->flags |= IFF_NOARP; + qmap_net->flags &= ~(IFF_BROADCAST | IFF_MULTICAST); + + INFO("%s\n", qmap_net->name); + + return 0; + +out_free_newdev: + free_netdev(qmap_net); + return err; +} + +static void qmap_unregister_device(sGobiUSBNet * pDev, u8 offset_id) { + struct net_device *qmap_net; +#if defined(QUECTEL_UL_DATA_AGG) + struct qmap_priv *priv; + unsigned long flags; +#endif + + qmap_net = pDev->mpQmapNetDev[offset_id]; + if (qmap_net == NULL) + return; + + netif_carrier_off(qmap_net); + netif_stop_queue(qmap_net); + +#if defined(QUECTEL_UL_DATA_AGG) + priv = netdev_priv(qmap_net); + hrtimer_cancel(&priv->agg_hrtimer); + cancel_work_sync(&priv->agg_wq); + spin_lock_irqsave(&priv->agg_lock, flags); + if (priv->agg_skb) { + kfree_skb(priv->agg_skb); + } + spin_unlock_irqrestore(&priv->agg_lock, flags); +#endif + + unregister_netdev(qmap_net); + free_netdev(qmap_net); +} + +static ssize_t qmap_mode_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct net_device *pNet = to_net_dev(dev); + struct usbnet * pDev = netdev_priv( pNet ); + sGobiUSBNet * pGobiDev = (sGobiUSBNet *)pDev->data[0]; + + return snprintf(buf, PAGE_SIZE, "%d\n", pGobiDev->qmap_mode); +} + +static DEVICE_ATTR(qmap_mode, S_IRUGO, qmap_mode_show, NULL); + +static ssize_t qmap_size_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct net_device *pNet = to_net_dev(dev); + struct usbnet * pDev = netdev_priv( pNet ); + sGobiUSBNet * pGobiDev = (sGobiUSBNet *)pDev->data[0]; + + return snprintf(buf, PAGE_SIZE, "%d\n", pGobiDev->qmap_size); +} + +static DEVICE_ATTR(qmap_size, S_IRUGO, qmap_size_show, NULL); + +static ssize_t link_state_show(struct device *dev, struct device_attribute *attr, char *buf) { + sGobiUSBNet *pQmapDev = net_to_qmap(to_net_dev(dev)); + + return snprintf(buf, PAGE_SIZE, "0x%x\n", pQmapDev->link_state); +} + +static ssize_t link_state_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { + struct net_device *netdev = to_net_dev(dev); + sGobiUSBNet *pQmapDev = net_to_qmap(to_net_dev(dev)); + unsigned qmap_mode = pQmapDev->qmap_mode; + unsigned link_state = 0; + unsigned old_link = pQmapDev->link_state; + uint offset_id = 0; + + link_state = simple_strtoul(buf, NULL, 0); + if (qmap_mode == 1) { + pQmapDev->link_state = !!link_state; + } + else if (qmap_mode > 1) { + offset_id = ((link_state&0x7F) - 1); + + if (offset_id >= qmap_mode) { + dev_info(dev, "%s offset_id is %d. but qmap_mode is %d\n", __func__, offset_id, pQmapDev->qmap_mode); + return count; + } + + if (link_state&0x80) + pQmapDev->link_state &= ~(1 << offset_id); + else + pQmapDev->link_state |= (1 << offset_id); + } + + if (old_link != pQmapDev->link_state) { + struct net_device *qmap_net = pQmapDev->mpQmapNetDev[offset_id]; + + if (pQmapDev->link_state) { + netif_carrier_on(netdev); + } else { + netif_carrier_off(netdev); + } + + if (qmap_net && qmap_net != netdev) { + struct qmap_priv *priv = netdev_priv(qmap_net); + + priv->link_state = !!(pQmapDev->link_state & (1 << offset_id)); + if (priv->link_state) { + netif_carrier_on(qmap_net); + if (netif_queue_stopped(qmap_net) && !netif_queue_stopped(priv->real_dev)) + netif_wake_queue(qmap_net); + } + else + netif_carrier_off(qmap_net); + } + } + + if (old_link != pQmapDev->link_state) + dev_info(dev, "link_state 0x%x -> 0x%x\n", old_link, pQmapDev->link_state); + + return count; +} + +static DEVICE_ATTR(link_state, S_IWUSR | S_IRUGO, link_state_show, link_state_store); +#endif + +static struct attribute *gobinet_sysfs_attrs[] = { +#ifdef QUECTEL_BRIDGE_MODE + &dev_attr_bridge_mode.attr, + &dev_attr_bridge_ipv4.attr, +#endif +#ifdef QUECTEL_WWAN_QMAP + &dev_attr_qmap_mode.attr, + &dev_attr_qmap_size.attr, + &dev_attr_link_state.attr, +#endif + NULL, +}; + +static struct attribute_group gobinet_sysfs_attr_group = { + .attrs = gobinet_sysfs_attrs, +}; + +#if defined(QUECTEL_WWAN_QMAP) +typedef struct { + unsigned int size; + unsigned int rx_urb_size; + unsigned int ep_type; + unsigned int iface_id; + unsigned int qmap_mode; + unsigned int qmap_version; + unsigned int dl_minimum_padding; + char ifname[8][16]; + unsigned char mux_id[8]; +} RMNET_INFO; + +static void rmnet_info_set(struct sGobiUSBNet *pQmapDev, RMNET_INFO *rmnet_info) +{ + int i; + + memset(rmnet_info, 0, sizeof(*rmnet_info)); + rmnet_info->size = sizeof(RMNET_INFO); + rmnet_info->rx_urb_size = pQmapDev->qmap_size; + rmnet_info->ep_type = 2; //DATA_EP_TYPE_HSUSB + rmnet_info->iface_id = 4; + rmnet_info->qmap_mode = pQmapDev->qmap_mode; + rmnet_info->qmap_version = pQmapDev->qmap_version; + rmnet_info->dl_minimum_padding = 0; + + for (i = 0; i < pQmapDev->qmap_mode; i++) { + struct net_device *qmap_net = pQmapDev->mpQmapNetDev[i]; + + if (!qmap_net) + break; + + strcpy(rmnet_info->ifname[i], qmap_net->name); + rmnet_info->mux_id[i] = QUECTEL_QMAP_MUX_ID; + if (pQmapDev->qmap_mode > 1) { + struct qmap_priv *priv = netdev_priv(qmap_net); + + rmnet_info->mux_id[i] = priv->mux_id; + } + } +} + +static int qmap_ndo_do_ioctl(struct net_device *dev,struct ifreq *ifr, int cmd) { + int rc = -EOPNOTSUPP; + uint link_state = 0; + sGobiUSBNet *pQmapDev = net_to_qmap(dev); + + atomic_inc(&pQmapDev->refcount); + if (!pQmapDev->mbQMIReady) { + if (wait_for_completion_interruptible_timeout(&pQmapDev->mQMIReadyCompletion, 15*HZ) <= 0) { + if (atomic_dec_and_test(&pQmapDev->refcount)) { + kfree( pQmapDev ); + } + return -ETIMEDOUT; + } + } + atomic_dec(&pQmapDev->refcount); + + switch (cmd) { + case 0x89F1: //SIOCDEVPRIVATE + rc = copy_from_user(&link_state, ifr->ifr_ifru.ifru_data, sizeof(link_state)); + if (!rc) { + char buf[32]; + snprintf(buf, sizeof(buf), "%u", link_state); + link_state_store(&dev->dev, NULL, buf, strlen(buf)); + } + break; + + case 0x89F2: //SIOCDEVPRIVATE + rc = 0; + break; + + case 0x89F3: //SIOCDEVPRIVATE + if (pQmapDev->qmap_mode) { + RMNET_INFO rmnet_info; + + rmnet_info_set(pQmapDev, &rmnet_info); + rc = copy_to_user(ifr->ifr_ifru.ifru_data, &rmnet_info, sizeof(rmnet_info)); + } + break; + + default: + break; + } + + return rc; +} +#endif + +#ifdef CONFIG_PM +/*=========================================================================== +METHOD: + GobiNetSuspend (Public Method) + +DESCRIPTION: + Stops QMI traffic while device is suspended + +PARAMETERS + pIntf [ I ] - Pointer to interface + powerEvent [ I ] - Power management event + +RETURN VALUE: + int - 0 for success + negative errno for failure +===========================================================================*/ +static int GobiNetSuspend( + struct usb_interface * pIntf, + pm_message_t powerEvent ) +{ + struct usbnet * pDev; + sGobiUSBNet * pGobiDev; + + if (pIntf == 0) + { + return -ENOMEM; + } + +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,23 )) + pDev = usb_get_intfdata( pIntf ); +#else + pDev = (struct usbnet *)pIntf->dev.platform_data; +#endif + + if (pDev == NULL || pDev->net == NULL) + { + DBG( "failed to get netdevice\n" ); + return -ENXIO; + } + + pGobiDev = (sGobiUSBNet *)pDev->data[0]; + if (pGobiDev == NULL) + { + DBG( "failed to get QMIDevice\n" ); + return -ENXIO; + } + + if (pGobiDev->mbQMISyncIng) + { + DBG( "QMI sync ing\n" ); + return -EBUSY; + } + + // Is this autosuspend or system suspend? + // do we allow remote wakeup? +#if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,33 )) +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,18 )) + if (pDev->udev->auto_pm == 0) +#else + if (1) +#endif +#else + if ((powerEvent.event & PM_EVENT_AUTO) == 0) +#endif + { + DBG( "device suspended to power level %d\n", + powerEvent.event ); + GobiSetDownReason( pGobiDev, DRIVER_SUSPENDED ); + } + else + { + DBG( "device autosuspend\n" ); + } + + if (powerEvent.event & PM_EVENT_SUSPEND) + { + // Stop QMI read callbacks + if (pGobiDev->m_qcrmcall_mode) { + } else { + KillRead( pGobiDev ); + } +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,22 )) + pDev->udev->reset_resume = 0; +#endif + + // Store power state to avoid duplicate resumes + pIntf->dev.power.power_state.event = powerEvent.event; + } + else + { + // Other power modes cause QMI connection to be lost +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,22 )) + pDev->udev->reset_resume = 1; +#endif + } + + // Run usbnet's suspend function + return usbnet_suspend( pIntf, powerEvent ); +} +int QuecGobiNetSuspend(struct usb_interface *pIntf, pm_message_t powerEvent ) { + return GobiNetSuspend(pIntf, powerEvent); +} + +/*=========================================================================== +METHOD: + GobiNetResume (Public Method) + +DESCRIPTION: + Resume QMI traffic or recreate QMI device + +PARAMETERS + pIntf [ I ] - Pointer to interface + +RETURN VALUE: + int - 0 for success + negative errno for failure +===========================================================================*/ +static int GobiNetResume( struct usb_interface * pIntf ) +{ + struct usbnet * pDev; + sGobiUSBNet * pGobiDev; + int nRet; + int oldPowerState; + + if (pIntf == 0) + { + return -ENOMEM; + } + +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,23 )) + pDev = usb_get_intfdata( pIntf ); +#else + pDev = (struct usbnet *)pIntf->dev.platform_data; +#endif + + if (pDev == NULL || pDev->net == NULL) + { + DBG( "failed to get netdevice\n" ); + return -ENXIO; + } + + pGobiDev = (sGobiUSBNet *)pDev->data[0]; + if (pGobiDev == NULL) + { + DBG( "failed to get QMIDevice\n" ); + return -ENXIO; + } + + oldPowerState = pIntf->dev.power.power_state.event; + pIntf->dev.power.power_state.event = PM_EVENT_ON; + DBG( "resuming from power mode %d\n", oldPowerState ); + + if (oldPowerState & PM_EVENT_SUSPEND) + { + // It doesn't matter if this is autoresume or system resume + GobiClearDownReason( pGobiDev, DRIVER_SUSPENDED ); + + nRet = usbnet_resume( pIntf ); + if (nRet != 0) + { + DBG( "usbnet_resume error %d\n", nRet ); + return nRet; + } + + // Restart QMI read callbacks + if (pGobiDev->m_qcrmcall_mode) { + nRet = 0; + } else { + nRet = StartRead( pGobiDev ); + } + if (nRet != 0) + { + DBG( "StartRead error %d\n", nRet ); + return nRet; + } + +#ifdef CONFIG_PM + #if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,29 )) + // Kick Auto PM thread to process any queued URBs + complete( &pGobiDev->mAutoPM.mThreadDoWork ); + #endif +#endif /* CONFIG_PM */ + +#if defined(QUECTEL_WWAN_QMAP) + if ((!netif_queue_stopped(pDev->net)) && (pGobiDev->qmap_mode > 1)) { + rmnet_usb_tx_wake_queue((unsigned long )pGobiDev); + } +#endif + } + else + { + DBG( "nothing to resume\n" ); + return 0; + } + + return nRet; +} +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,27 )) +static int GobiNetResetResume( struct usb_interface * pIntf ) +{ + INFO( "device do not support reset_resume\n" ); + pIntf->needs_binding = 1; + + return -EOPNOTSUPP; +} +#endif +#endif /* CONFIG_PM */ + +static void ql_net_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info) +{ + usbnet_get_drvinfo(net, info); + /* Inherit standard device info */ + strlcpy(info->driver, driver_name, sizeof(info->driver)); + strlcpy(info->version, VERSION_NUMBER, sizeof(info->version)); +} + +static struct ethtool_ops ql_net_ethtool_ops; + +/*=========================================================================== +METHOD: + GobiNetDriverBind (Public Method) + +DESCRIPTION: + Setup in and out pipes + +PARAMETERS + pDev [ I ] - Pointer to usbnet device + pIntf [ I ] - Pointer to interface + +RETURN VALUE: + int - 0 for success + Negative errno for error +===========================================================================*/ +static int GobiNetDriverBind( + struct usbnet * pDev, + struct usb_interface * pIntf ) +{ + int numEndpoints; + int endpointIndex; + struct usb_host_endpoint * pEndpoint = NULL; + struct usb_host_endpoint * pIn = NULL; + struct usb_host_endpoint * pOut = NULL; + + // Verify one altsetting + if (pIntf->num_altsetting != 1) + { + DBG( "invalid num_altsetting %u\n", pIntf->num_altsetting ); + return -ENODEV; + } + + // Verify correct interface (4 for UC20) + if ( !test_bit(pIntf->cur_altsetting->desc.bInterfaceNumber, &pDev->driver_info->data)) + { + DBG( "invalid interface %d\n", + pIntf->cur_altsetting->desc.bInterfaceNumber ); + return -ENODEV; + } + + if ( pIntf->cur_altsetting->desc.bInterfaceClass != 0xff) + { + struct usb_interface_descriptor *desc = &pIntf->cur_altsetting->desc; + const char *qcfg_usbnet = "UNKNOW"; + + if (desc->bInterfaceClass == 2 && desc->bInterfaceSubClass == 0x0e) { + qcfg_usbnet = "MBIM"; + } else if (desc->bInterfaceClass == 2 && desc->bInterfaceSubClass == 0x06) { + qcfg_usbnet = "ECM"; + } else if (desc->bInterfaceClass == 0xe0 && desc->bInterfaceSubClass == 1 && desc->bInterfaceProtocol == 3) { + qcfg_usbnet = "RNDIS"; + } + + INFO( "usbnet is %s not NDIS/RMNET!\n", qcfg_usbnet); + + return -ENODEV; + } + + // Collect In and Out endpoints + numEndpoints = pIntf->cur_altsetting->desc.bNumEndpoints; + for (endpointIndex = 0; endpointIndex < numEndpoints; endpointIndex++) + { + pEndpoint = pIntf->cur_altsetting->endpoint + endpointIndex; + if (pEndpoint == NULL) + { + DBG( "invalid endpoint %u\n", endpointIndex ); + return -ENODEV; + } + + if (usb_endpoint_dir_in( &pEndpoint->desc ) == true + && usb_endpoint_xfer_int( &pEndpoint->desc ) == false) + { + pIn = pEndpoint; + } + else if (usb_endpoint_dir_out( &pEndpoint->desc ) == true) + { + pOut = pEndpoint; + } + } + + if (pIn == NULL || pOut == NULL) + { + DBG( "invalid endpoints\n" ); + return -ENODEV; + } + + if (usb_set_interface( pDev->udev, + pIntf->cur_altsetting->desc.bInterfaceNumber, + 0 ) != 0) + { + DBG( "unable to set interface\n" ); + return -ENODEV; + } + + pDev->in = usb_rcvbulkpipe( pDev->udev, + pIn->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK ); + pDev->out = usb_sndbulkpipe( pDev->udev, + pOut->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK ); + +#if defined(QUECTEL_WWAN_MULTI_PACKAGES) + if (rx_packets && pDev->udev->descriptor.idVendor == cpu_to_le16(0x2C7C)) { + struct multi_package_config rx_config = { + .enable = cpu_to_le32(1), + .package_max_len = cpu_to_le32((1500 + sizeof(struct quec_net_package_header)) * rx_packets), + .package_max_count_in_queue = cpu_to_le32(rx_packets), + .timeout = cpu_to_le32(10*1000), //10ms + }; + int ret = 0; + + ret = usb_control_msg( + interface_to_usbdev(pIntf), + usb_sndctrlpipe(interface_to_usbdev(pIntf), 0), + USB_CDC_SET_MULTI_PACKAGE_COMMAND, + 0x21, //USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE + 1, + pIntf->cur_altsetting->desc.bInterfaceNumber, + &rx_config, sizeof(rx_config), 100); + + DBG( "Quectel EC21&EC25 rx_packets=%d, ret=%d\n", rx_packets, ret); + if (ret == sizeof(rx_config)) { + pDev->rx_urb_size = le32_to_cpu(rx_config.package_max_len); + } else { + rx_packets = 0; + } + } +#endif + +#if 1 //def DATA_MODE_RP + /* make MAC addr easily distinguishable from an IP header */ + if ((pDev->net->dev_addr[0] & 0xd0) == 0x40) { + /*clear this bit wil make usbnet apdater named as usbX(instead if ethX)*/ + pDev->net->dev_addr[0] |= 0x02; /* set local assignment bit */ + pDev->net->dev_addr[0] &= 0xbf; /* clear "IP" bit */ + } + memcpy (pDev->net->dev_addr, node_id, sizeof node_id); + pDev->net->flags &= ~(IFF_BROADCAST | IFF_MULTICAST); + pDev->net->features |= (NETIF_F_VLAN_CHALLENGED); +#endif + + ql_net_ethtool_ops = *pDev->net->ethtool_ops; + ql_net_ethtool_ops.get_drvinfo = ql_net_get_drvinfo; + pDev->net->ethtool_ops = &ql_net_ethtool_ops; + + DBG( "in %x, out %x\n", + pIn->desc.bEndpointAddress, + pOut->desc.bEndpointAddress ); + + // In later versions of the kernel, usbnet helps with this +#if (LINUX_VERSION_CODE <= KERNEL_VERSION( 2,6,23 )) + pIntf->dev.platform_data = (void *)pDev; +#endif + + if (qcrmcall_mode == 0 && pDev->net->sysfs_groups[0] == NULL && gobinet_sysfs_attr_group.attrs[0] != NULL) { +#if (LINUX_VERSION_CODE <= KERNEL_VERSION( 2,6,32)) //see commit 0c509a6c9393b27a8c5a01acd4a72616206cfc24 + pDev->net->sysfs_groups[1] = &gobinet_sysfs_attr_group; //see netdev_register_sysfs() +#else + pDev->net->sysfs_groups[0] = &gobinet_sysfs_attr_group; +#endif + } + + if (!pDev->rx_urb_size) { +//to advoid module report mtu 1460, but rx 1500 bytes IP packets, and cause the customer's system crash +//next setting can make usbnet.c:usbnet_change_mtu() do not modify rx_urb_size according to mtu + pDev->rx_urb_size = ETH_DATA_LEN + ETH_HLEN + 6; + } + + return 0; +} + +/*=========================================================================== +METHOD: + GobiNetDriverUnbind (Public Method) + +DESCRIPTION: + Deregisters QMI device (Registration happened in the probe function) + +PARAMETERS + pDev [ I ] - Pointer to usbnet device + pIntfUnused [ I ] - Pointer to interface + +RETURN VALUE: + None +===========================================================================*/ +static void GobiNetDriverUnbind( + struct usbnet * pDev, + struct usb_interface * pIntf) +{ + sGobiUSBNet * pGobiDev = (sGobiUSBNet *)pDev->data[0]; + + // Should already be down, but just in case... + netif_carrier_off( pDev->net ); + + if (pGobiDev->m_qcrmcall_mode) { + } else { + DeregisterQMIDevice( pGobiDev ); + } + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION( 2,6,29 )) + kfree( pDev->net->netdev_ops ); + pDev->net->netdev_ops = NULL; +#endif + +#if (LINUX_VERSION_CODE <= KERNEL_VERSION( 2,6,23 )) + pIntf->dev.platform_data = NULL; +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION( 2,6,19 )) + pIntf->needs_remote_wakeup = 0; +#endif + + if (atomic_dec_and_test(&pGobiDev->refcount)) + kfree( pGobiDev ); + else + INFO("memory leak!\n"); +} + +#if 1 //def DATA_MODE_RP + +#if defined(QUECTEL_WWAN_QMAP) +static void _rmnet_usb_rx_handler(struct usbnet *dev, struct sk_buff *skb_in) +{ + sGobiUSBNet * pQmapDev = (sGobiUSBNet *)dev->data[0]; + struct sk_buff *qmap_skb; + struct sk_buff_head skb_chain; + uint dl_minimum_padding = 0; + +#if defined(QUECTEL_UL_DATA_AGG) + if (pQmapDev->qmap_version == 9) + dl_minimum_padding = pQmapDev->agg_ctx.dl_minimum_padding; +#endif + + __skb_queue_head_init(&skb_chain); + + while (skb_in->len > sizeof(struct qmap_hdr)) { + struct rmnet_map_header *map_header = (struct rmnet_map_header *)skb_in->data; + struct rmnet_map_v5_csum_header *ul_header = NULL; + size_t hdr_size = sizeof(struct rmnet_map_header); + struct net_device *qmap_net; + int pkt_len = ntohs(map_header->pkt_len); + int skb_len; + __be16 protocol; + int mux_id; + + if (map_header->next_hdr) { + ul_header = (struct rmnet_map_v5_csum_header *)(map_header + 1); + hdr_size += sizeof(struct rmnet_map_v5_csum_header); + } + + skb_len = pkt_len - (map_header->pad_len&0x3F); + skb_len -= dl_minimum_padding; + if (skb_len > 1500) { + dev_info(&dev->net->dev, "drop skb_len=%x larger than 1500\n", skb_len); + goto error_pkt; + } + + if (skb_in->len < (pkt_len + hdr_size)) { + dev_info(&dev->net->dev, "drop qmap unknow pkt, len=%d, pkt_len=%d\n", skb_in->len, pkt_len); + goto error_pkt; + } + + if (map_header->cd_bit) { + dev_info(&dev->net->dev, "skip qmap command packet\n"); + goto skip_pkt; + } + + switch (skb_in->data[hdr_size] & 0xf0) { + case 0x40: + protocol = htons(ETH_P_IP); + break; + case 0x60: + protocol = htons(ETH_P_IPV6); + break; + default: + dev_info(&dev->net->dev, "unknow skb->protocol %02x\n", skb_in->data[hdr_size]); + goto error_pkt; + } + + mux_id = map_header->mux_id - QUECTEL_QMAP_MUX_ID; + if (mux_id >= pQmapDev->qmap_mode) { + dev_info(&dev->net->dev, "drop qmap unknow mux_id %x\n", map_header->mux_id); + goto error_pkt; + } + + qmap_net = pQmapDev->mpQmapNetDev[mux_id]; + + if (qmap_net == NULL) { + dev_info(&dev->net->dev, "drop qmap unknow mux_id %x\n", map_header->mux_id); + goto skip_pkt; + } + + qmap_skb = netdev_alloc_skb(qmap_net, skb_len); + if (qmap_skb) { + skb_put(qmap_skb, skb_len); + memcpy(qmap_skb->data, skb_in->data + hdr_size, skb_len); + } + + if (qmap_skb == NULL) { + dev_info(&dev->net->dev, "fail to alloc skb, pkt_len = %d\n", skb_len); + goto error_pkt; + } + + skb_reset_transport_header(qmap_skb); + skb_reset_network_header(qmap_skb); + qmap_skb->pkt_type = PACKET_HOST; + skb_set_mac_header(qmap_skb, 0); + qmap_skb->protocol = protocol; + + if (ul_header && ul_header->header_type == RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD + && ul_header->csum_valid_required) { +#if 0 //TODO + qmap_skb->ip_summed = CHECKSUM_UNNECESSARY; +#endif + } + + if (qmap_skb->dev->type == ARPHRD_ETHER) { + skb_push(qmap_skb, ETH_HLEN); + skb_reset_mac_header(qmap_skb); + memcpy(eth_hdr(qmap_skb)->h_source, default_modem_addr, ETH_ALEN); + memcpy(eth_hdr(qmap_skb)->h_dest, qmap_net->dev_addr, ETH_ALEN); + eth_hdr(qmap_skb)->h_proto = protocol; +#ifdef QUECTEL_BRIDGE_MODE + bridge_mode_rx_fixup(pQmapDev, qmap_net, qmap_skb); +#endif + } + + __skb_queue_tail(&skb_chain, qmap_skb); + +skip_pkt: + skb_pull(skb_in, pkt_len + hdr_size); + } + +error_pkt: + while ((qmap_skb = __skb_dequeue (&skb_chain))) { + if (qmap_skb->dev != dev->net) { + if (qmap_skb->dev->type == ARPHRD_ETHER) + __skb_pull(qmap_skb, ETH_HLEN); + rmnet_vnd_update_rx_stats(qmap_skb->dev, 1, qmap_skb->len); + netif_rx(qmap_skb); + } + else { + qmap_skb->protocol = 0; + usbnet_skb_return(dev, qmap_skb); + } + } +} + +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,35 )) //ab95bfe01f9872459c8678572ccadbf646badad0 +#if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,39 )) //8a4eb5734e8d1dc60a8c28576bbbdfdcc643626d +static struct sk_buff* rmnet_usb_rx_handler(struct sk_buff *skb) +{ + struct usbnet *dev; + + if (!skb) + goto done; + + //printk("%s skb=%p, protocol=%x, len=%d\n", __func__, skb, skb->protocol, skb->len); + + if (skb->pkt_type == PACKET_LOOPBACK) + return skb; + + if (skb->protocol != htons(ETH_P_MAP)) { + WARN_ON(1); + return skb; + } + + dev = netdev_priv(skb->dev); + + if (dev == NULL) { + WARN_ON(1); + return skb; + } + + _rmnet_usb_rx_handler(dev, skb); + consume_skb(skb); + +done: + return NULL; +} +#else +static rx_handler_result_t rmnet_usb_rx_handler(struct sk_buff **pskb) +{ + struct sk_buff *skb = *pskb; + struct usbnet *dev; + + if (!skb) + goto done; + + //printk("%s skb=%p, protocol=%x, len=%d\n", __func__, skb, skb->protocol, skb->len); + + if (skb->pkt_type == PACKET_LOOPBACK) + return RX_HANDLER_PASS; + + if (skb->protocol != htons(ETH_P_MAP)) { + WARN_ON(1); + return RX_HANDLER_PASS; + } + + dev = netdev_priv(skb->dev); + + if (dev == NULL) { + WARN_ON(1); + return RX_HANDLER_PASS; + } + + _rmnet_usb_rx_handler(dev, skb); + consume_skb(skb); + +done: + return RX_HANDLER_CONSUMED; +} +#endif +#endif +#endif +/*=========================================================================== +METHOD: + GobiNetDriverTxFixup (Public Method) + +DESCRIPTION: + Handling data format mode on transmit path + +PARAMETERS + pDev [ I ] - Pointer to usbnet device + pSKB [ I ] - Pointer to transmit packet buffer + flags [ I ] - os flags + +RETURN VALUE: + None +===========================================================================*/ +static struct sk_buff *GobiNetDriverTxFixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) +{ + sGobiUSBNet * pGobiDev = (sGobiUSBNet *)dev->data[0]; + + if (!pGobiDev) { + DBG( "failed to get QMIDevice\n" ); + dev_kfree_skb_any(skb); + return NULL; + } + + if (unlikely(!skb)) { + return NULL; + } + + if (!pGobiDev->mbRawIPMode) + return skb; + +#ifdef QUECTEL_WWAN_QMAP + if (pGobiDev->qmap_mode > 1) { + if (skb->protocol == htons(ETH_P_MAP)) + return skb; + + goto drop_skb; + } + else if (pGobiDev->qmap_mode == 1) { + if (unlikely(!pGobiDev->link_state)) { + dev_info(&dev->net->dev, "link_state 0x%x, drop skb, len = %u\n", pGobiDev->link_state, skb->len); + goto drop_skb; + } + + if (dev->net->type == ARPHRD_ETHER) { +#ifdef QUECTEL_BRIDGE_MODE + if (pGobiDev->m_bridge_mode && bridge_mode_tx_fixup(dev->net, skb, pGobiDev->m_bridge_ipv4, pGobiDev->mHostMAC) == NULL) { + goto drop_skb; + } +#endif + + if (ether_to_ip_fixup(dev->net, skb) == NULL) + goto drop_skb; + } + + if (pGobiDev->qmap_version == 5) { + add_qhdr(skb, QUECTEL_QMAP_MUX_ID); + } + else if (pGobiDev->qmap_version == 9) { + add_qhdr_v5(skb, QUECTEL_QMAP_MUX_ID); + } + else { + goto drop_skb; + } + + return skb; + } +#endif + +#ifdef QUECTEL_BRIDGE_MODE + if (pGobiDev->m_bridge_mode && bridge_mode_tx_fixup(dev->net, skb, pGobiDev->m_bridge_ipv4, pGobiDev->mHostMAC) == NULL) { + goto drop_skb; + } +#endif + + // Skip Ethernet header from message + if (likely(ether_to_ip_fixup(dev->net, skb))) { + return skb; + } + else { +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,22 )) + dev_err(&dev->intf->dev, "Packet Dropped "); +#elif (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,18 )) + dev_err(dev->net->dev.parent, "Packet Dropped "); +#else + INFO("Packet Dropped "); +#endif + } + +#if defined(QUECTEL_WWAN_QMAP) +drop_skb: +#endif +#if (LINUX_VERSION_CODE <= KERNEL_VERSION( 2,6,24 )) && defined(CONFIG_X86_32) + INFO("dev_kfree_skb_any() will make kernel panic on CentOS!\n"); + quec_debug=1;PrintHex(skb->data, 32);quec_debug=0; +#else + // Filter the packet out, release it + dev_kfree_skb_any(skb); +#endif + + return NULL; +} + +#if defined(QUECTEL_WWAN_MULTI_PACKAGES) +static int GobiNetDriverRxPktsFixup(struct usbnet *dev, struct sk_buff *skb) +{ + sGobiUSBNet * pGobiDev = (sGobiUSBNet *)dev->data[0]; + + if (!pGobiDev->mbRawIPMode) + return 1; + + /* This check is no longer done by usbnet */ + if (skb->len < dev->net->hard_header_len) + return 0; + + if (!rx_packets) { + return GobiNetDriverRxFixup(dev, skb); + } + + while (likely(skb->len)) { + struct sk_buff* new_skb; + struct quec_net_package_header package_header; + + if (skb->len < sizeof(package_header)) + return 0; + + memcpy(&package_header, skb->data, sizeof(package_header)); + package_header.payload_len = be16_to_cpu(package_header.payload_len); + + if (package_header.msg_spec != QUEC_NET_MSG_SPEC || package_header.msg_id != QUEC_NET_MSG_ID_IP_DATA) + return 0; + + if (skb->len < (package_header.payload_len + sizeof(package_header))) + return 0; + + skb_pull(skb, sizeof(package_header)); + + if (skb->len == package_header.payload_len) + return GobiNetDriverRxFixup(dev, skb); + + new_skb = skb_clone(skb, GFP_ATOMIC); + if (new_skb) { + skb_trim(new_skb, package_header.payload_len); + if (GobiNetDriverRxFixup(dev, new_skb)) + usbnet_skb_return(dev, new_skb); + else + return 0; + } + + skb_pull(skb, package_header.payload_len); + } + + return 0; +} +#endif + +#ifdef QUECTEL_WWAN_QMAP +static int GobiNetDriverRxQmapFixup(struct usbnet *dev, struct sk_buff *skb) +{ +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,35 )) //ab95bfe01f9872459c8678572ccadbf646badad0 + rx_handler_func_t *rx_handler; + +#if (LINUX_VERSION_CODE < KERNEL_VERSION( 3,3,1 )) //7bdd402706cf26bfef9050dfee3f229b7f33ee4f + if (skb->dev == NULL) { + skb->dev = dev->net; + } +#endif + rx_handler = rcu_dereference(skb->dev->rx_handler); + + if (rx_handler == rmnet_usb_rx_handler) { +#if (LINUX_VERSION_CODE < KERNEL_VERSION( 3,3,1 )) //7bdd402706cf26bfef9050dfee3f229b7f33ee4f + unsigned headroom = skb_headroom(skb); + if (headroom < ETH_HLEN) { + unsigned tailroom = skb_tailroom(skb); + if ((tailroom + headroom) >= ETH_HLEN) { + unsigned moveroom = ETH_HLEN - headroom; + memmove(skb->data + moveroom ,skb->data, skb->len); + skb->data += moveroom; + skb->tail += moveroom; + #ifdef WARN_ONCE + WARN_ONCE(1, "It is better reserve headroom in usbnet.c:rx_submit()!\n"); + #endif + } + } +#endif + + if (dev->net->type == ARPHRD_ETHER && skb_headroom(skb) >= ETH_HLEN) { + //usbnet.c rx_process() usbnet_skb_return() eth_type_trans() + skb_push(skb, ETH_HLEN); + skb_reset_mac_header(skb); + memcpy(eth_hdr(skb)->h_source, default_modem_addr, ETH_ALEN); + memcpy(eth_hdr(skb)->h_dest, dev->net->dev_addr, ETH_ALEN); + eth_hdr(skb)->h_proto = htons(ETH_P_MAP); + + return 1; + } + +#ifdef WARN_ONCE + WARN_ONCE(1, "skb_headroom < ETH_HLEN\n"); +#endif + return 0; + } +#endif + + _rmnet_usb_rx_handler(dev, skb); + return 0; +} +#endif +/*=========================================================================== +METHOD: + GobiNetDriverRxFixup (Public Method) + +DESCRIPTION: + Handling data format mode on receive path + +PARAMETERS + pDev [ I ] - Pointer to usbnet device + pSKB [ I ] - Pointer to received packet buffer + +RETURN VALUE: + None +===========================================================================*/ +static int GobiNetDriverRxFixup(struct usbnet *dev, struct sk_buff *skb) +{ + __be16 proto; + sGobiUSBNet * pGobiDev = (sGobiUSBNet *)dev->data[0]; + + if (!pGobiDev->mbRawIPMode) + return 1; + + /* This check is no longer done by usbnet */ + if (skb->len < dev->net->hard_header_len) + return 0; + +#ifdef QUECTEL_WWAN_QMAP + if (pGobiDev->qmap_mode) { + return GobiNetDriverRxQmapFixup(dev, skb); + } +#endif + + switch (skb->data[0] & 0xf0) { + case 0x40: + proto = htons(ETH_P_IP); + break; + case 0x60: + proto = htons(ETH_P_IPV6); + break; + case 0x00: + if (is_multicast_ether_addr(skb->data)) + return 1; + /* possibly bogus destination - rewrite just in case */ + skb_reset_mac_header(skb); + goto fix_dest; + default: + /* pass along other packets without modifications */ + return 1; + } + if (skb_headroom(skb) < ETH_HLEN && pskb_expand_head(skb, ETH_HLEN, 0, GFP_ATOMIC)) { + DBG("%s: couldn't pskb_expand_head\n", __func__); + return 0; + } + skb_push(skb, ETH_HLEN); + skb_reset_mac_header(skb); + eth_hdr(skb)->h_proto = proto; + memcpy(eth_hdr(skb)->h_source, ec20_mac, ETH_ALEN); +fix_dest: +#ifdef QUECTEL_BRIDGE_MODE + bridge_mode_rx_fixup(pGobiDev, dev->net, skb); +#else + memcpy(eth_hdr(skb)->h_dest, dev->net->dev_addr, ETH_ALEN); +#endif + +#ifdef QUECTEL_BRIDGE_MODE +#if 0 + if (pGobiDev->m_bridge_mode) { + struct ethhdr *ehdr = eth_hdr(skb); +quec_debug = 1; + DBG(": "); + PrintHex(ehdr, sizeof(struct ethhdr)); +quec_debug = 0; + } +#endif +#endif + + return 1; +} +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,29 )) +#ifdef CONFIG_PM +/*=========================================================================== +METHOD: + GobiUSBNetURBCallback (Public Method) + +DESCRIPTION: + Write is complete, cleanup and signal that we're ready for next packet + +PARAMETERS + pURB [ I ] - Pointer to sAutoPM struct + +RETURN VALUE: + None +===========================================================================*/ +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,18 )) +void GobiUSBNetURBCallback( struct urb * pURB ) +#else +void GobiUSBNetURBCallback(struct urb *pURB, struct pt_regs *regs) +#endif +{ + unsigned long activeURBflags; + sAutoPM * pAutoPM = (sAutoPM *)pURB->context; + if (pAutoPM == NULL) + { + // Should never happen + DBG( "bad context\n" ); + return; + } + + if (pURB->status != 0) + { + // Note that in case of an error, the behaviour is no different + DBG( "urb finished with error %d\n", pURB->status ); + } + + // Remove activeURB (memory to be freed later) + spin_lock_irqsave( &pAutoPM->mActiveURBLock, activeURBflags ); + + // EAGAIN used to signify callback is done + pAutoPM->mpActiveURB = ERR_PTR( -EAGAIN ); + + spin_unlock_irqrestore( &pAutoPM->mActiveURBLock, activeURBflags ); + + complete( &pAutoPM->mThreadDoWork ); + +#ifdef URB_FREE_BUFFER_BY_SELF + if (pURB->transfer_flags & URB_FREE_BUFFER) + kfree(pURB->transfer_buffer); +#endif + usb_free_urb( pURB ); +} + +/*=========================================================================== +METHOD: + GobiUSBNetTXTimeout (Public Method) + +DESCRIPTION: + Timeout declared by the net driver. Stop all transfers + +PARAMETERS + pNet [ I ] - Pointer to net device + +RETURN VALUE: + None +===========================================================================*/ +void GobiUSBNetTXTimeout( struct net_device * pNet ) +{ + struct sGobiUSBNet * pGobiDev; + sAutoPM * pAutoPM; + sURBList * pURBListEntry; + unsigned long activeURBflags, URBListFlags; + struct usbnet * pDev = netdev_priv( pNet ); + struct urb * pURB; + + if (pDev == NULL || pDev->net == NULL) + { + DBG( "failed to get usbnet device\n" ); + return; + } + + pGobiDev = (sGobiUSBNet *)pDev->data[0]; + if (pGobiDev == NULL) + { + DBG( "failed to get QMIDevice\n" ); + return; + } + pAutoPM = &pGobiDev->mAutoPM; + + DBG( "\n" ); + + // Grab a pointer to active URB + spin_lock_irqsave( &pAutoPM->mActiveURBLock, activeURBflags ); + pURB = pAutoPM->mpActiveURB; + spin_unlock_irqrestore( &pAutoPM->mActiveURBLock, activeURBflags ); + // Stop active URB + if (pURB != NULL) + { + usb_kill_urb( pURB ); + } + + // Cleanup URB List + spin_lock_irqsave( &pAutoPM->mURBListLock, URBListFlags ); + + pURBListEntry = pAutoPM->mpURBList; + while (pURBListEntry != NULL) + { + pAutoPM->mpURBList = pAutoPM->mpURBList->mpNext; + atomic_dec( &pAutoPM->mURBListLen ); + usb_free_urb( pURBListEntry->mpURB ); + kfree( pURBListEntry ); + pURBListEntry = pAutoPM->mpURBList; + } + + spin_unlock_irqrestore( &pAutoPM->mURBListLock, URBListFlags ); + + complete( &pAutoPM->mThreadDoWork ); + + return; +} + +/*=========================================================================== +METHOD: + GobiUSBNetAutoPMThread (Public Method) + +DESCRIPTION: + Handle device Auto PM state asynchronously + Handle network packet transmission asynchronously + +PARAMETERS + pData [ I ] - Pointer to sAutoPM struct + +RETURN VALUE: + int - 0 for success + Negative errno for error +===========================================================================*/ +static int GobiUSBNetAutoPMThread( void * pData ) +{ + unsigned long activeURBflags, URBListFlags; + sURBList * pURBListEntry; + int status; + struct usb_device * pUdev; + sAutoPM * pAutoPM = (sAutoPM *)pData; + struct urb * pURB; + + if (pAutoPM == NULL) + { + DBG( "passed null pointer\n" ); + return -EINVAL; + } + + pUdev = interface_to_usbdev( pAutoPM->mpIntf ); + + DBG( "traffic thread started\n" ); + + while (pAutoPM->mbExit == false) + { + // Wait for someone to poke us + wait_for_completion_interruptible( &pAutoPM->mThreadDoWork ); + + // Time to exit? + if (pAutoPM->mbExit == true) + { + // Stop activeURB + spin_lock_irqsave( &pAutoPM->mActiveURBLock, activeURBflags ); + pURB = pAutoPM->mpActiveURB; + spin_unlock_irqrestore( &pAutoPM->mActiveURBLock, activeURBflags ); + + // EAGAIN used to signify callback is done + if (IS_ERR( pAutoPM->mpActiveURB ) + && PTR_ERR( pAutoPM->mpActiveURB ) == -EAGAIN ) + { + pURB = NULL; + } + + if (pURB != NULL) + { + usb_kill_urb( pURB ); + } + // Will be freed in callback function + + // Cleanup URB List + spin_lock_irqsave( &pAutoPM->mURBListLock, URBListFlags ); + + pURBListEntry = pAutoPM->mpURBList; + while (pURBListEntry != NULL) + { + pAutoPM->mpURBList = pAutoPM->mpURBList->mpNext; + atomic_dec( &pAutoPM->mURBListLen ); + usb_free_urb( pURBListEntry->mpURB ); + kfree( pURBListEntry ); + pURBListEntry = pAutoPM->mpURBList; + } + + spin_unlock_irqrestore( &pAutoPM->mURBListLock, URBListFlags ); + + break; + } + + // Is our URB active? + spin_lock_irqsave( &pAutoPM->mActiveURBLock, activeURBflags ); + + // EAGAIN used to signify callback is done + if (IS_ERR( pAutoPM->mpActiveURB ) + && PTR_ERR( pAutoPM->mpActiveURB ) == -EAGAIN ) + { + pAutoPM->mpActiveURB = NULL; + + // Restore IRQs so task can sleep + spin_unlock_irqrestore( &pAutoPM->mActiveURBLock, activeURBflags ); + + // URB is done, decrement the Auto PM usage count + usb_autopm_put_interface( pAutoPM->mpIntf ); + + // Lock ActiveURB again + spin_lock_irqsave( &pAutoPM->mActiveURBLock, activeURBflags ); + } + + if (pAutoPM->mpActiveURB != NULL) + { + // There is already a URB active, go back to sleep + spin_unlock_irqrestore( &pAutoPM->mActiveURBLock, activeURBflags ); + continue; + } + + // Is there a URB waiting to be submitted? + spin_lock_irqsave( &pAutoPM->mURBListLock, URBListFlags ); + if (pAutoPM->mpURBList == NULL) + { + // No more URBs to submit, go back to sleep + spin_unlock_irqrestore( &pAutoPM->mURBListLock, URBListFlags ); + spin_unlock_irqrestore( &pAutoPM->mActiveURBLock, activeURBflags ); + continue; + } + + // Pop an element + pURBListEntry = pAutoPM->mpURBList; + pAutoPM->mpURBList = pAutoPM->mpURBList->mpNext; + atomic_dec( &pAutoPM->mURBListLen ); + spin_unlock_irqrestore( &pAutoPM->mURBListLock, URBListFlags ); + + // Set ActiveURB + pAutoPM->mpActiveURB = pURBListEntry->mpURB; + spin_unlock_irqrestore( &pAutoPM->mActiveURBLock, activeURBflags ); + + // Tell autopm core we need device woken up + status = usb_autopm_get_interface( pAutoPM->mpIntf ); + if (status < 0) + { + DBG( "unable to autoresume interface: %d\n", status ); + + // likely caused by device going from autosuspend -> full suspend + if (status == -EPERM) + { +#if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,33 )) +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,18 )) + pUdev->auto_pm = 0; +#else + pUdev = pUdev; +#endif +#endif + GobiNetSuspend( pAutoPM->mpIntf, PMSG_SUSPEND ); + } + + // Add pURBListEntry back onto pAutoPM->mpURBList + spin_lock_irqsave( &pAutoPM->mURBListLock, URBListFlags ); + pURBListEntry->mpNext = pAutoPM->mpURBList; + pAutoPM->mpURBList = pURBListEntry; + atomic_inc( &pAutoPM->mURBListLen ); + spin_unlock_irqrestore( &pAutoPM->mURBListLock, URBListFlags ); + + spin_lock_irqsave( &pAutoPM->mActiveURBLock, activeURBflags ); + pAutoPM->mpActiveURB = NULL; + spin_unlock_irqrestore( &pAutoPM->mActiveURBLock, activeURBflags ); + + // Go back to sleep + continue; + } + + // Submit URB + status = usb_submit_urb( pAutoPM->mpActiveURB, GFP_KERNEL ); + if (status < 0) + { + // Could happen for a number of reasons + DBG( "Failed to submit URB: %d. Packet dropped\n", status ); + spin_lock_irqsave( &pAutoPM->mActiveURBLock, activeURBflags ); + usb_free_urb( pAutoPM->mpActiveURB ); + pAutoPM->mpActiveURB = NULL; + spin_unlock_irqrestore( &pAutoPM->mActiveURBLock, activeURBflags ); + usb_autopm_put_interface( pAutoPM->mpIntf ); + + // Loop again + complete( &pAutoPM->mThreadDoWork ); + } + + kfree( pURBListEntry ); + } + + DBG( "traffic thread exiting\n" ); + pAutoPM->mpThread = NULL; + return 0; +} + +/*=========================================================================== +METHOD: + GobiUSBNetStartXmit (Public Method) + +DESCRIPTION: + Convert sk_buff to usb URB and queue for transmit + +PARAMETERS + pNet [ I ] - Pointer to net device + +RETURN VALUE: + NETDEV_TX_OK on success + NETDEV_TX_BUSY on error +===========================================================================*/ +int GobiUSBNetStartXmit( + struct sk_buff * pSKB, + struct net_device * pNet ) +{ + unsigned long URBListFlags; + struct sGobiUSBNet * pGobiDev; + sAutoPM * pAutoPM; + sURBList * pURBListEntry, ** ppURBListEnd; + void * pURBData; + struct usbnet * pDev = netdev_priv( pNet ); + + //DBG( "\n" ); + + if (pDev == NULL || pDev->net == NULL) + { + DBG( "failed to get usbnet device\n" ); + return NETDEV_TX_BUSY; + } + + pGobiDev = (sGobiUSBNet *)pDev->data[0]; + if (pGobiDev == NULL) + { + DBG( "failed to get QMIDevice\n" ); + return NETDEV_TX_BUSY; + } + pAutoPM = &pGobiDev->mAutoPM; + + if( NULL == pSKB ) + { + DBG( "Buffer is NULL \n" ); + return NETDEV_TX_BUSY; + } + + if (GobiTestDownReason( pGobiDev, DRIVER_SUSPENDED )) + { + // Should not happen + DBG( "device is suspended\n" ); + dump_stack(); + return NETDEV_TX_BUSY; + } + + if (GobiTestDownReason( pGobiDev, NO_NDIS_CONNECTION )) + { + //netif_carrier_off( pGobiDev->mpNetDev->net ); + //DBG( "device is disconnected\n" ); + //dump_stack(); + return NETDEV_TX_BUSY; + } + + // Convert the sk_buff into a URB + + // Check if buffer is full + if ( atomic_read( &pAutoPM->mURBListLen ) >= txQueueLength) + { + DBG( "not scheduling request, buffer is full\n" ); + return NETDEV_TX_BUSY; + } + + // Allocate URBListEntry + pURBListEntry = kmalloc( sizeof( sURBList ), GFP_ATOMIC ); + if (pURBListEntry == NULL) + { + DBG( "unable to allocate URBList memory\n" ); + return NETDEV_TX_BUSY; + } + pURBListEntry->mpNext = NULL; + + // Allocate URB + pURBListEntry->mpURB = usb_alloc_urb( 0, GFP_ATOMIC ); + if (pURBListEntry->mpURB == NULL) + { + DBG( "unable to allocate URB\n" ); + // release all memory allocated by now + if (pURBListEntry) + kfree( pURBListEntry ); + return NETDEV_TX_BUSY; + } + +#if 1 //def DATA_MODE_RP + GobiNetDriverTxFixup(pDev, pSKB, GFP_ATOMIC); +#endif + + // Allocate URB transfer_buffer + pURBData = kmalloc( pSKB->len, GFP_ATOMIC ); + if (pURBData == NULL) + { + DBG( "unable to allocate URB data\n" ); + // release all memory allocated by now + if (pURBListEntry) + { + usb_free_urb( pURBListEntry->mpURB ); + kfree( pURBListEntry ); + } + return NETDEV_TX_BUSY; + } + // Fill with SKB's data + memcpy( pURBData, pSKB->data, pSKB->len ); + + usb_fill_bulk_urb( pURBListEntry->mpURB, + pGobiDev->mpNetDev->udev, + pGobiDev->mpNetDev->out, + pURBData, + pSKB->len, + GobiUSBNetURBCallback, + pAutoPM ); + + /* Handle the need to send a zero length packet and release the + * transfer buffer + */ + pURBListEntry->mpURB->transfer_flags |= (URB_ZERO_PACKET | URB_FREE_BUFFER); + + // Aquire lock on URBList + spin_lock_irqsave( &pAutoPM->mURBListLock, URBListFlags ); + + // Add URB to end of list + ppURBListEnd = &pAutoPM->mpURBList; + while ((*ppURBListEnd) != NULL) + { + ppURBListEnd = &(*ppURBListEnd)->mpNext; + } + *ppURBListEnd = pURBListEntry; + atomic_inc( &pAutoPM->mURBListLen ); + + spin_unlock_irqrestore( &pAutoPM->mURBListLock, URBListFlags ); + + complete( &pAutoPM->mThreadDoWork ); + + // Start transfer timer + pNet->trans_start = jiffies; + // Free SKB + if (pSKB) + dev_kfree_skb_any( pSKB ); + + return NETDEV_TX_OK; +} +#endif +static int (*local_usbnet_start_xmit) (struct sk_buff *skb, struct net_device *net); +#endif + +static int GobiUSBNetStartXmit2( struct sk_buff *pSKB, struct net_device *pNet ){ + struct sGobiUSBNet * pGobiDev; + struct usbnet * pDev = netdev_priv( pNet ); + + //DBG( "\n" ); + + if (pDev == NULL || pDev->net == NULL) + { + DBG( "failed to get usbnet device\n" ); + return NETDEV_TX_BUSY; + } + + pGobiDev = (sGobiUSBNet *)pDev->data[0]; + if (pGobiDev == NULL) + { + DBG( "failed to get QMIDevice\n" ); + return NETDEV_TX_BUSY; + } + + if( NULL == pSKB ) + { + DBG( "Buffer is NULL \n" ); + return NETDEV_TX_BUSY; + } + + if (GobiTestDownReason( pGobiDev, DRIVER_SUSPENDED )) + { + // Should not happen + DBG( "device is suspended\n" ); + dump_stack(); + return NETDEV_TX_BUSY; + } + + if (GobiTestDownReason( pGobiDev, NO_NDIS_CONNECTION )) + { + //netif_carrier_off( pGobiDev->mpNetDev->net ); + //DBG( "device is disconnected\n" ); + //dump_stack(); + return NETDEV_TX_BUSY; + } + +#if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,29 )) + return local_usbnet_start_xmit(pSKB, pNet); +#else + return usbnet_start_xmit(pSKB, pNet); +#endif +} + +/*=========================================================================== +METHOD: + GobiUSBNetOpen (Public Method) + +DESCRIPTION: + Wrapper to usbnet_open, correctly handling autosuspend + Start AutoPM thread (if CONFIG_PM is defined) + +PARAMETERS + pNet [ I ] - Pointer to net device + +RETURN VALUE: + int - 0 for success + Negative errno for error +===========================================================================*/ +static int GobiUSBNetOpen( struct net_device * pNet ) +{ + int status = 0; + struct sGobiUSBNet * pGobiDev; + struct usbnet * pDev = netdev_priv( pNet ); + + if (pDev == NULL) + { + DBG( "failed to get usbnet device\n" ); + return -ENXIO; + } + + pGobiDev = (sGobiUSBNet *)pDev->data[0]; + if (pGobiDev == NULL) + { + DBG( "failed to get QMIDevice\n" ); + return -ENXIO; + } + + DBG( "\n" ); + +#ifdef CONFIG_PM + #if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,29 )) + // Start the AutoPM thread + pGobiDev->mAutoPM.mpIntf = pGobiDev->mpIntf; + pGobiDev->mAutoPM.mbExit = false; + pGobiDev->mAutoPM.mpURBList = NULL; + pGobiDev->mAutoPM.mpActiveURB = NULL; + spin_lock_init( &pGobiDev->mAutoPM.mURBListLock ); + spin_lock_init( &pGobiDev->mAutoPM.mActiveURBLock ); + atomic_set( &pGobiDev->mAutoPM.mURBListLen, 0 ); + init_completion( &pGobiDev->mAutoPM.mThreadDoWork ); + + pGobiDev->mAutoPM.mpThread = kthread_run( GobiUSBNetAutoPMThread, + &pGobiDev->mAutoPM, + "GobiUSBNetAutoPMThread" ); + if (IS_ERR( pGobiDev->mAutoPM.mpThread )) + { + DBG( "AutoPM thread creation error\n" ); + return PTR_ERR( pGobiDev->mAutoPM.mpThread ); + } + #endif +#endif /* CONFIG_PM */ + + // Allow traffic + GobiClearDownReason( pGobiDev, NET_IFACE_STOPPED ); + + // Pass to usbnet_open if defined + if (pGobiDev->mpUSBNetOpen != NULL) + { + status = pGobiDev->mpUSBNetOpen( pNet ); +#ifdef CONFIG_PM + // If usbnet_open was successful enable Auto PM + if (status == 0) + { +#if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,33 )) + usb_autopm_enable( pGobiDev->mpIntf ); +#else + usb_autopm_put_interface( pGobiDev->mpIntf ); +#endif + } +#endif /* CONFIG_PM */ + } + else + { + DBG( "no USBNetOpen defined\n" ); + } + + return status; +} + +/*=========================================================================== +METHOD: + GobiUSBNetStop (Public Method) + +DESCRIPTION: + Wrapper to usbnet_stop, correctly handling autosuspend + Stop AutoPM thread (if CONFIG_PM is defined) + +PARAMETERS + pNet [ I ] - Pointer to net device + +RETURN VALUE: + int - 0 for success + Negative errno for error +===========================================================================*/ +static int GobiUSBNetStop( struct net_device * pNet ) +{ + struct sGobiUSBNet * pGobiDev; + struct usbnet * pDev = netdev_priv( pNet ); + + if (pDev == NULL || pDev->net == NULL) + { + DBG( "failed to get netdevice\n" ); + return -ENXIO; + } + + pGobiDev = (sGobiUSBNet *)pDev->data[0]; + if (pGobiDev == NULL) + { + DBG( "failed to get QMIDevice\n" ); + return -ENXIO; + } + + // Stop traffic + GobiSetDownReason( pGobiDev, NET_IFACE_STOPPED ); + +#ifdef CONFIG_PM + #if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,29 )) + // Tell traffic thread to exit + pGobiDev->mAutoPM.mbExit = true; + complete( &pGobiDev->mAutoPM.mThreadDoWork ); + + // Wait for it to exit + while( pGobiDev->mAutoPM.mpThread != NULL ) + { + msleep( 100 ); + } + DBG( "thread stopped\n" ); + #endif +#endif /* CONFIG_PM */ + + // Pass to usbnet_stop, if defined + if (pGobiDev->mpUSBNetStop != NULL) + { + return pGobiDev->mpUSBNetStop( pNet ); + } + else + { + return 0; + } +} + +static int GobiNetDriver_check_connect(struct usbnet *pDev) { + int status = 0; + struct sGobiUSBNet * pGobiDev = NULL; + + while (status++ < 10) { + pGobiDev = (sGobiUSBNet *)pDev->data[0]; + if (pGobiDev && pGobiDev->mbProbeDone) + break; + msleep(1); + } + + return 0; +} + +/*=========================================================================*/ +// Struct driver_info +/*=========================================================================*/ +static struct driver_info GobiNetInfo = +{ + .description = "GobiNet Ethernet Device", +#if 1//def CONFIG_ANDROID +#if defined(QUECTEL_WWAN_QMAP) && defined(FLAG_RX_ASSEMBLE) + .flags = FLAG_RX_ASSEMBLE, //usb0 +#endif +#else +#if defined(QUECTEL_WWAN_QMAP) && defined(FLAG_RX_ASSEMBLE) + .flags = FLAG_ETHER | FLAG_RX_ASSEMBLE, +#else + .flags = FLAG_ETHER, +#endif +#endif + .bind = GobiNetDriverBind, + .unbind = GobiNetDriverUnbind, +#if 1 //def DATA_MODE_RP +#if defined(QUECTEL_WWAN_MULTI_PACKAGES) + .rx_fixup = GobiNetDriverRxPktsFixup, +#else + .rx_fixup = GobiNetDriverRxFixup, +#endif + .tx_fixup = GobiNetDriverTxFixup, +#endif + .check_connect = GobiNetDriver_check_connect, + .data = (1 << 4), +}; + +/*=========================================================================*/ +// Qualcomm Gobi 3000 VID/PIDs +/*=========================================================================*/ +#define GOBI_FIXED_INTF(vend, prod) \ + { \ + USB_DEVICE( vend, prod ), \ + .driver_info = (unsigned long)&GobiNetInfo, \ + } +static const struct usb_device_id QuecGobiVIDPIDTable [] = +{ + GOBI_FIXED_INTF( 0x05c6, 0x9003 ), // Quectel UC20 + GOBI_FIXED_INTF( 0x05c6, 0x9215 ), // Quectel EC20 (MDM9215) + GOBI_FIXED_INTF( 0x2c7c, 0x0125 ), // Quectel EC20 (MDM9X07)/EC25/EG25 + GOBI_FIXED_INTF( 0x2c7c, 0x0121 ), // Quectel EC21 + GOBI_FIXED_INTF( 0x2c7c, 0x0306 ), // Quectel EP06 + GOBI_FIXED_INTF( 0x2c7c, 0x030B ), // Quectel EG065K,SDX12 + GOBI_FIXED_INTF( 0x2c7c, 0x0435 ), // Quectel AG35 + GOBI_FIXED_INTF( 0x2c7c, 0x0296 ), // Quectel BG96 + GOBI_FIXED_INTF( 0x2c7c, 0x0191 ), // Quectel EG91 + GOBI_FIXED_INTF( 0x2c7c, 0x0195 ), // Quectel EG95 + GOBI_FIXED_INTF( 0x2c7c, 0x0512 ), // Quectel EG12/EP12/EM12/EG16/EG18,SDx20 + GOBI_FIXED_INTF( 0x2c7c, 0x0620 ), // Quectel EG20,SDx24 + GOBI_FIXED_INTF( 0x2c7c, 0x0800 ), // Quectel RG500Q,RM500Q,RM510Q,SDX55 + GOBI_FIXED_INTF( 0x2c7c, 0x0801 ), // Quectel RG520Q,RM520Q,SG520Q,SDX6X + //Terminating entry + { } +}; + +MODULE_DEVICE_TABLE( usb, QuecGobiVIDPIDTable ); + +/*=========================================================================== +METHOD: + GobiUSBNetProbe (Public Method) + +DESCRIPTION: + Run usbnet_probe + Setup QMI device + +PARAMETERS + pIntf [ I ] - Pointer to interface + pVIDPIDs [ I ] - Pointer to VID/PID table + +RETURN VALUE: + int - 0 for success + Negative errno for error +===========================================================================*/ +static int GobiUSBNetProbe( + struct usb_interface * pIntf, + const struct usb_device_id * pVIDPIDs ) +{ + int status; + struct usbnet * pDev; + sGobiUSBNet * pGobiDev; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION( 2,6,29 )) + struct net_device_ops * pNetDevOps; +#endif + + status = usbnet_probe( pIntf, pVIDPIDs ); + if (status < 0) + { + DBG( "usbnet_probe failed %d\n", status ); + return status; + } + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION( 2,6,19 )) + pIntf->needs_remote_wakeup = 1; +#endif + +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,23 )) + pDev = usb_get_intfdata( pIntf ); +#else + pDev = (struct usbnet *)pIntf->dev.platform_data; +#endif + + if (pDev == NULL || pDev->net == NULL) + { + DBG( "failed to get netdevice\n" ); + usbnet_disconnect( pIntf ); + return -ENXIO; + } + + pGobiDev = kzalloc( sizeof( sGobiUSBNet ), GFP_KERNEL ); + if (pGobiDev == NULL) + { + DBG( "fail to allocate device buffers" ); + usbnet_disconnect( pIntf ); + return -ENOMEM; + } + + atomic_set(&pGobiDev->refcount, 1); + + pDev->data[0] = (unsigned long)pGobiDev; + + pGobiDev->mpNetDev = pDev; + + // Clearing endpoint halt is a magic handshake that brings + // the device out of low power (airplane) mode + usb_clear_halt( pGobiDev->mpNetDev->udev, pDev->out ); + + // Overload PM related network functions +#if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,29 )) + pGobiDev->mpUSBNetOpen = pDev->net->open; + pDev->net->open = GobiUSBNetOpen; + pGobiDev->mpUSBNetStop = pDev->net->stop; + pDev->net->stop = GobiUSBNetStop; +#if defined(CONFIG_PM) && (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,14 )) + pDev->net->hard_start_xmit = GobiUSBNetStartXmit; + pDev->net->tx_timeout = GobiUSBNetTXTimeout; +#else //quectel donot send dhcp request before ndis connect for uc20 + local_usbnet_start_xmit = pDev->net->hard_start_xmit; + pDev->net->hard_start_xmit = GobiUSBNetStartXmit2; +#endif +#else + pNetDevOps = kmalloc( sizeof( struct net_device_ops ), GFP_KERNEL ); + if (pNetDevOps == NULL) + { + DBG( "falied to allocate net device ops" ); + usbnet_disconnect( pIntf ); + return -ENOMEM; + } + memcpy( pNetDevOps, pDev->net->netdev_ops, sizeof( struct net_device_ops ) ); + + pGobiDev->mpUSBNetOpen = pNetDevOps->ndo_open; + pNetDevOps->ndo_open = GobiUSBNetOpen; + pGobiDev->mpUSBNetStop = pNetDevOps->ndo_stop; + pNetDevOps->ndo_stop = GobiUSBNetStop; +#if 1 //quectel donot send dhcp request before ndis connect for uc20 + pNetDevOps->ndo_start_xmit = GobiUSBNetStartXmit2; +#else + pNetDevOps->ndo_start_xmit = usbnet_start_xmit; +#endif + pNetDevOps->ndo_tx_timeout = usbnet_tx_timeout; + +#if defined(QUECTEL_WWAN_QMAP) + pNetDevOps->ndo_do_ioctl = qmap_ndo_do_ioctl; +#endif + + pDev->net->netdev_ops = pNetDevOps; +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,31 )) + memset( &(pGobiDev->mpNetDev->stats), 0, sizeof( struct net_device_stats ) ); +#else + memset( &(pGobiDev->mpNetDev->net->stats), 0, sizeof( struct net_device_stats ) ); +#endif + + pGobiDev->mpIntf = pIntf; + memset( &(pGobiDev->mMEID), '0', 14 ); + + DBG( "Mac Address:\n" ); + PrintHex( &pGobiDev->mpNetDev->net->dev_addr[0], 6 ); + + pGobiDev->mbQMIValid = false; + memset( &pGobiDev->mQMIDev, 0, sizeof( sQMIDev ) ); + pGobiDev->mQMIDev.mbCdevIsInitialized = false; + + pGobiDev->mQMIDev.mpDevClass = gpClass; + +#ifdef CONFIG_PM + #if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,29 )) + init_completion( &pGobiDev->mAutoPM.mThreadDoWork ); + #endif +#endif /* CONFIG_PM */ + spin_lock_init( &pGobiDev->mQMIDev.mClientMemLock ); + + // Default to device down + pGobiDev->mDownReason = 0; + +//#if (LINUX_VERSION_CODE < KERNEL_VERSION( 3,11,0 )) + GobiSetDownReason( pGobiDev, NO_NDIS_CONNECTION ); + GobiSetDownReason( pGobiDev, NET_IFACE_STOPPED ); +//#endif + + // Register QMI + pGobiDev->mbMdm9x07 |= (pDev->udev->descriptor.idVendor == cpu_to_le16(0x2c7c)); + pGobiDev->mbMdm9x06 |= (pDev->udev->descriptor.idVendor == cpu_to_le16(0x2c7c) && pDev->udev->descriptor.idProduct == cpu_to_le16(0x0296)); + pGobiDev->mbRawIPMode = pGobiDev->mbMdm9x07; + if ( pGobiDev->mbRawIPMode) + pGobiDev->mpNetDev->net->flags |= IFF_NOARP; +#ifdef QUECTEL_BRIDGE_MODE + memcpy(pGobiDev->mHostMAC, pDev->net->dev_addr, 6); + pGobiDev->m_bridge_mode = bridge_mode; +#endif + +#ifdef QUECTEL_REMOVE_TX_ZLP + { + struct remove_tx_zlp_config { + __le32 enable; + } __packed; + + struct remove_tx_zlp_config cfg; + cfg.enable = cpu_to_le32(1); //1-enable 0-disable + + usb_control_msg( + interface_to_usbdev(pIntf), + usb_sndctrlpipe(interface_to_usbdev(pIntf), 0), + USB_CDC_SET_REMOVE_TX_ZLP_COMMAND, + 0x21, //USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE + 0, + pIntf->cur_altsetting->desc.bInterfaceNumber, + &cfg, sizeof(cfg), 100); + } +#endif + + pGobiDev->m_qcrmcall_mode = qcrmcall_mode; + + if (pGobiDev->m_qcrmcall_mode) { + INFO("AT$QCRMCALL MODE!"); + + GobiClearDownReason( pGobiDev, NO_NDIS_CONNECTION ); + usb_control_msg( + interface_to_usbdev(pIntf), + usb_sndctrlpipe(interface_to_usbdev(pIntf), 0), + 0x22, //USB_CDC_REQ_SET_CONTROL_LINE_STATE + 0x21, //USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE + 1, //active CDC DTR + pIntf->cur_altsetting->desc.bInterfaceNumber, + NULL, 0, 100); + status = 0; + } + else { +#if defined(QUECTEL_WWAN_QMAP) + if (pGobiDev->mbRawIPMode) { + unsigned idProduct = le16_to_cpu(pDev->udev->descriptor.idProduct); + + pGobiDev->qmap_mode = qmap_mode; + if (pGobiDev->qmap_mode == 0) { + if (idProduct == 0x0800 || idProduct == 0x0801) { + pGobiDev->qmap_mode = 1; + } + } + + pGobiDev->qmap_version = 5; + if (idProduct == 0x0800 || idProduct == 0x0801) { + pGobiDev->qmap_version = 9; + } + } + + if (pGobiDev->qmap_mode) { + netif_carrier_off(pDev->net); + } + + if (pGobiDev->qmap_mode > 1) { +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,35 )) //ab95bfe01f9872459c8678572ccadbf646badad0 + rtnl_lock(); + netdev_rx_handler_register(pDev->net, rmnet_usb_rx_handler, NULL); + rtnl_unlock(); +#endif + } + +#if defined(QUECTEL_UL_DATA_AGG) + if (pGobiDev->qmap_mode) { + struct ul_agg_ctx *agg_ctx = &pGobiDev->agg_ctx; + + agg_ctx->ul_data_aggregation_max_datagrams = 1; + agg_ctx->ul_data_aggregation_max_size = 2048; + agg_ctx->dl_minimum_padding = 0; + } +#endif +#endif + status = RegisterQMIDevice( pGobiDev ); + } + + if (status != 0) + { + // usbnet_disconnect() will call GobiNetDriverUnbind() which will call + // DeregisterQMIDevice() to clean up any partially created QMI device + usbnet_disconnect( pIntf ); + return status; + } + +#if defined(QUECTEL_WWAN_QMAP) + tasklet_init(&pGobiDev->txq, rmnet_usb_tx_wake_queue, (unsigned long)pGobiDev); + + if (pGobiDev->qmap_mode > 1) { + unsigned i; + + for (i = 0; i < pGobiDev->qmap_mode; i++) { + qmap_register_device(pGobiDev, i); + } + } else { + pGobiDev->mpQmapNetDev[0] = pDev->net; + } +#endif + + pGobiDev->mbProbeDone = 1; + // Success + return 0; +} + +static void GobiUSBNetDisconnect (struct usb_interface *intf) { +#if defined(QUECTEL_WWAN_QMAP) + struct usbnet *pDev = usb_get_intfdata(intf); + sGobiUSBNet * pGobiDev = (sGobiUSBNet *)pDev->data[0]; + unsigned i; + + if (pGobiDev->qmap_mode > 1) { + for (i = 0; i < pGobiDev->qmap_mode; i++) { + qmap_unregister_device(pGobiDev, i); + } + + } + + tasklet_kill(&pGobiDev->txq); +#endif + + usbnet_disconnect(intf); +} + +static struct usb_driver GobiNet = +{ + .name = "GobiNet", + .id_table = QuecGobiVIDPIDTable, + .probe = GobiUSBNetProbe, + .disconnect = GobiUSBNetDisconnect, +#ifdef CONFIG_PM + .suspend = GobiNetSuspend, + .resume = GobiNetResume, +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,27 )) + .reset_resume = GobiNetResetResume, +#endif +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,18 )) + .supports_autosuspend = true, +#endif +#endif /* CONFIG_PM */ +}; + +/*=========================================================================== +METHOD: + GobiUSBNetModInit (Public Method) + +DESCRIPTION: + Initialize module + Create device class + Register out usb_driver struct + +RETURN VALUE: + int - 0 for success + Negative errno for error +===========================================================================*/ +static int __init GobiUSBNetModInit( void ) +{ + gpClass = class_create( THIS_MODULE, "GobiQMI" ); + if (IS_ERR( gpClass ) == true) + { + DBG( "error at class_create %ld\n", PTR_ERR( gpClass ) ); + return -ENOMEM; + } + + // This will be shown whenever driver is loaded + printk( KERN_INFO "%s: %s\n", DRIVER_DESC, DRIVER_VERSION ); + + return usb_register( &GobiNet ); +} +module_init( GobiUSBNetModInit ); + +/*=========================================================================== +METHOD: + GobiUSBNetModExit (Public Method) + +DESCRIPTION: + Deregister module + Destroy device class + +RETURN VALUE: + void +===========================================================================*/ +static void __exit GobiUSBNetModExit( void ) +{ + usb_deregister( &GobiNet ); + + class_destroy( gpClass ); +} +module_exit( GobiUSBNetModExit ); + +MODULE_VERSION( DRIVER_VERSION ); +MODULE_AUTHOR( DRIVER_AUTHOR ); +MODULE_DESCRIPTION( DRIVER_DESC ); +MODULE_LICENSE("Dual BSD/GPL"); + +#ifdef bool +#undef bool +#endif + +module_param_named( debug, quec_debug, int, S_IRUGO | S_IWUSR ); +MODULE_PARM_DESC( debug, "Debuging enabled or not" ); + +//module_param_named( interruptible, Quecinterruptible, int, S_IRUGO | S_IWUSR ); +//MODULE_PARM_DESC( interruptible, "Listen for and return on user interrupt" ); +module_param( txQueueLength, int, S_IRUGO | S_IWUSR ); +MODULE_PARM_DESC( txQueueLength, + "Number of IP packets which may be queued up for transmit" ); + diff --git a/package/wwan/driver/quectel_Gobinet/src/Makefile b/package/wwan/driver/quectel_Gobinet/src/Makefile new file mode 100644 index 000000000..7f0deb0e9 --- /dev/null +++ b/package/wwan/driver/quectel_Gobinet/src/Makefile @@ -0,0 +1,43 @@ +obj-m := GobiNet.o +GobiNet-objs := GobiUSBNet.o QMIDevice.o QMI.o + +PWD := $(shell pwd) +OUTPUTDIR=/lib/modules/`uname -r`/kernel/drivers/net/usb/ + +ifeq ($(ARCH),) +ARCH := $(shell uname -m) +endif +ifeq ($(CROSS_COMPILE),) +CROSS_COMPILE := +endif +ifeq ($(KDIR),) +KDIR := /lib/modules/$(shell uname -r)/build +ifeq ($(ARCH),i686) +ifeq ($(wildcard $KDIR/arch/$ARCH),) +ARCH=i386 +endif +endif +endif + +$(shell rm -rf usbnet.h) +ifneq ($(wildcard $(KDIR)/drivers/usb/net/usbnet.h),) +$(shell ln -s $(KDIR)/drivers/usb/net/usbnet.h usbnet.h) +endif +ifneq ($(wildcard $(KDIR)/drivers/net/usb/usbnet.h),) +$(shell ln -s $(KDIR)/drivers/net/usb/usbnet.h usbnet.h) +endif + +default: + ln -sf makefile Makefile + $(MAKE) ARCH=${ARCH} CROSS_COMPILE=${CROSS_COMPILE} -C $(KDIR) M=$(PWD) modules + +install: default + mkdir -p $(OUTPUTDIR) + cp -f GobiNet.ko $(OUTPUTDIR) + depmod + modprobe -r GobiNet + modprobe GobiNet + +clean: + rm -rf Makefile usbnet.h + rm -rf *.o *~ core .depend .*.cmd *.ko *.mod.c .tmp_versions Module.* modules.order diff --git a/package/wwan/driver/quectel_Gobinet/src/QMI.c b/package/wwan/driver/quectel_Gobinet/src/QMI.c new file mode 100644 index 000000000..644699b1a --- /dev/null +++ b/package/wwan/driver/quectel_Gobinet/src/QMI.c @@ -0,0 +1,1521 @@ +#ifdef __QUEC_INCLUDE_QMI_C__ +/*=========================================================================== +FILE: + QMI.c + +DESCRIPTION: + Qualcomm QMI driver code + +FUNCTIONS: + Generic QMUX functions + ParseQMUX + FillQMUX + + Generic QMI functions + GetTLV + ValidQMIMessage + GetQMIMessageID + + Fill Buffers with QMI requests + QMICTLGetClientIDReq + QMICTLReleaseClientIDReq + QMICTLReadyReq + QMIWDSSetEventReportReq + QMIWDSGetPKGSRVCStatusReq + QMIDMSGetMEIDReq + QMIWDASetDataFormatReq + QMICTLSetDataFormatReq + QMICTLSyncReq + + Parse data from QMI responses + QMICTLGetClientIDResp + QMICTLReleaseClientIDResp + QMIWDSEventResp + QMIDMSGetMEIDResp + QMIWDASetDataFormatResp + QMICTLSyncResp + +Copyright (c) 2011, Code Aurora Forum. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Code Aurora Forum nor + the names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. + + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. +===========================================================================*/ + +//--------------------------------------------------------------------------- +// Include Files +//--------------------------------------------------------------------------- +#include +#include +#include "Structs.h" +#include "QMI.h" + +/*=========================================================================*/ +// Get sizes of buffers needed by QMI requests +/*=========================================================================*/ + +/*=========================================================================== +METHOD: + QMUXHeaderSize (Public Method) + +DESCRIPTION: + Get size of buffer needed for QMUX + +RETURN VALUE: + u16 - size of buffer +===========================================================================*/ +static u16 QMUXHeaderSize( void ) +{ + return sizeof( sQMUX ); +} + +/*=========================================================================== +METHOD: + QMICTLGetClientIDReqSize (Public Method) + +DESCRIPTION: + Get size of buffer needed for QMUX + QMICTLGetClientIDReq + +RETURN VALUE: + u16 - size of buffer +===========================================================================*/ +static u16 QMICTLGetClientIDReqSize( void ) +{ + return sizeof( sQMUX ) + 10; +} + +/*=========================================================================== +METHOD: + QMICTLReleaseClientIDReqSize (Public Method) + +DESCRIPTION: + Get size of buffer needed for QMUX + QMICTLReleaseClientIDReq + +RETURN VALUE: + u16 - size of header +===========================================================================*/ +static u16 QMICTLReleaseClientIDReqSize( void ) +{ + return sizeof( sQMUX ) + 11; +} + +/*=========================================================================== +METHOD: + QMICTLReadyReqSize (Public Method) + +DESCRIPTION: + Get size of buffer needed for QMUX + QMICTLReadyReq + +RETURN VALUE: + u16 - size of buffer +===========================================================================*/ +static u16 QMICTLReadyReqSize( void ) +{ + return sizeof( sQMUX ) + 6; +} + +/*=========================================================================== +METHOD: + QMIWDSSetEventReportReqSize (Public Method) + +DESCRIPTION: + Get size of buffer needed for QMUX + QMIWDSSetEventReportReq + +RETURN VALUE: + u16 - size of buffer +===========================================================================*/ +static u16 QMIWDSSetEventReportReqSize( void ) +{ + return sizeof( sQMUX ) + 15; +} + +/*=========================================================================== +METHOD: + QMIWDSGetPKGSRVCStatusReqSize (Public Method) + +DESCRIPTION: + Get size of buffer needed for QMUX + QMIWDSGetPKGSRVCStatusReq + +RETURN VALUE: + u16 - size of buffer +===========================================================================*/ +static u16 QMIWDSGetPKGSRVCStatusReqSize( void ) +{ + return sizeof( sQMUX ) + 7; +} + +/*=========================================================================== +METHOD: + QMIDMSGetMEIDReqSize (Public Method) + +DESCRIPTION: + Get size of buffer needed for QMUX + QMIDMSGetMEIDReq + +RETURN VALUE: + u16 - size of buffer +===========================================================================*/ +static u16 QMIDMSGetMEIDReqSize( void ) +{ + return sizeof( sQMUX ) + 7; +} + +struct QMIWDS_ADMIN_SET_DATA_FORMAT_TLV_QOS +{ + u8 TLVType; + u16 TLVLength; + u8 QOSSetting; +} __packed; + +struct QMIWDS_ADMIN_SET_DATA_FORMAT_TLV +{ + u8 TLVType; + u16 TLVLength; + u32 Value; +} __packed; + +struct QMIWDS_ENDPOINT_TLV +{ + u8 TLVType; + u16 TLVLength; + u32 ep_type; + u32 iface_id; +} __packed; + +struct QMIWDS_ADMIN_SET_DATA_FORMAT_REQ_MSG +{ + u8 CtlFlags; // 0: single QMUX Msg; 1: + u16 TransactionId; + u16 Type; + u16 Length; + struct QMIWDS_ADMIN_SET_DATA_FORMAT_TLV_QOS QosDataFormatTlv; + struct QMIWDS_ADMIN_SET_DATA_FORMAT_TLV UnderlyingLinkLayerProtocolTlv; + struct QMIWDS_ADMIN_SET_DATA_FORMAT_TLV UplinkDataAggregationProtocolTlv; + struct QMIWDS_ADMIN_SET_DATA_FORMAT_TLV DownlinkDataAggregationProtocolTlv; + struct QMIWDS_ADMIN_SET_DATA_FORMAT_TLV DownlinkDataAggregationMaxDatagramsTlv; + struct QMIWDS_ADMIN_SET_DATA_FORMAT_TLV DownlinkDataAggregationMaxSizeTlv; + struct QMIWDS_ENDPOINT_TLV epTlv; + struct QMIWDS_ADMIN_SET_DATA_FORMAT_TLV dl_minimum_padding; + struct QMIWDS_ADMIN_SET_DATA_FORMAT_TLV UplinkDataAggregationMaxDatagramsTlv; + struct QMIWDS_ADMIN_SET_DATA_FORMAT_TLV UplinkDataAggregationMaxSizeTlv; +} __packed; + +/*=========================================================================== +METHOD: + QMIWDASetDataFormatReqSize (Public Method) + +DESCRIPTION: + Get size of buffer needed for QMUX + QMIWDASetDataFormatReq + +RETURN VALUE: + u16 - size of buffer +===========================================================================*/ +static u16 QMIWDASetDataFormatReqSize( int qmap_version ) +{ +if (qmap_version) + return sizeof( sQMUX ) + sizeof(struct QMIWDS_ADMIN_SET_DATA_FORMAT_REQ_MSG); +else + return sizeof( sQMUX ) + 18; +} + +/*=========================================================================== +METHOD: + QMICTLSyncReqSize (Public Method) + +DESCRIPTION: + Get size of buffer needed for QMUX + QMICTLSyncReq + +RETURN VALUE: + u16 - size of buffer +===========================================================================*/ +static u16 QMICTLSyncReqSize( void ) +{ + return sizeof( sQMUX ) + 6; +} + +/*=========================================================================*/ +// Generic QMUX functions +/*=========================================================================*/ + +/*=========================================================================== +METHOD: + ParseQMUX (Public Method) + +DESCRIPTION: + Remove QMUX headers from a buffer + +PARAMETERS + pClientID [ O ] - On success, will point to Client ID + pBuffer [ I ] - Full Message passed in + buffSize [ I ] - Size of pBuffer + +RETURN VALUE: + int - Positive for size of QMUX header + Negative errno for error +===========================================================================*/ +static int ParseQMUX( + u16 * pClientID, + void * pBuffer, + u16 buffSize ) +{ + sQMUX * pQMUXHeader; + + if (pBuffer == 0 || buffSize < 12) + { + return -ENOMEM; + } + + // QMUX Header + pQMUXHeader = (sQMUX *)pBuffer; + + if (pQMUXHeader->mTF != 1 + || le16_to_cpu(get_unaligned(&pQMUXHeader->mLength)) != buffSize - 1 + || pQMUXHeader->mCtrlFlag != 0x80 ) + { + return -EINVAL; + } + + // Client ID + *pClientID = (pQMUXHeader->mQMIClientID << 8) + pQMUXHeader->mQMIService; + + return sizeof( sQMUX ); +} + +/*=========================================================================== +METHOD: + FillQMUX (Public Method) + +DESCRIPTION: + Fill buffer with QMUX headers + +PARAMETERS + clientID [ I ] - Client ID + pBuffer [ O ] - Buffer to be filled + buffSize [ I ] - Size of pBuffer (must be at least 6) + +RETURN VALUE: + int - 0 for success + Negative errno for error +===========================================================================*/ +static int FillQMUX( + u16 clientID, + void * pBuffer, + u16 buffSize ) +{ + sQMUX * pQMUXHeader; + + if (pBuffer == 0 || buffSize < sizeof( sQMUX )) + { + return -ENOMEM; + } + + // QMUX Header + pQMUXHeader = (sQMUX *)pBuffer; + + pQMUXHeader->mTF = 1; + put_unaligned(cpu_to_le16(buffSize - 1), &pQMUXHeader->mLength); + //DBG("pQMUXHeader->mLength = 0x%x, buffSize - 1 = 0x%x\n",pQMUXHeader->mLength, buffSize - 1); + pQMUXHeader->mCtrlFlag = 0; + + // Service and Client ID + pQMUXHeader->mQMIService = clientID & 0xff; + pQMUXHeader->mQMIClientID = clientID >> 8; + + return 0; +} + +/*=========================================================================*/ +// Generic QMI functions +/*=========================================================================*/ + +/*=========================================================================== +METHOD: + GetTLV (Public Method) + +DESCRIPTION: + Get data buffer of a specified TLV from a QMI message + + QMI Message shall NOT include SDU + +PARAMETERS + pQMIMessage [ I ] - QMI Message buffer + messageLen [ I ] - Size of QMI Message buffer + type [ I ] - Desired Type + pOutDataBuf [ O ] - Buffer to be filled with TLV + messageLen [ I ] - Size of QMI Message buffer + +RETURN VALUE: + u16 - Size of TLV for success + Negative errno for error +===========================================================================*/ +static int GetTLV( + void * pQMIMessage, + u16 messageLen, + u8 type, + void * pOutDataBuf, + u16 bufferLen ) +{ + u16 pos; + u16 tlvSize = 0; + u16 cpyCount; + + if (pQMIMessage == 0 || pOutDataBuf == 0) + { + return -ENOMEM; + } + + for (pos = 4; + pos + 3 < messageLen; + pos += tlvSize + 3) + { + tlvSize = le16_to_cpu( get_unaligned(((u16 *)(pQMIMessage + pos + 1) )) ); + if (*(u8 *)(pQMIMessage + pos) == type) + { + if (bufferLen < tlvSize) + { + return -ENOMEM; + } + + for (cpyCount = 0; cpyCount < tlvSize; cpyCount++) + { + *((char*)(pOutDataBuf + cpyCount)) = *((char*)(pQMIMessage + pos + 3 + cpyCount)); + } + + return tlvSize; + } + } + + return -ENOMSG; +} + +/*=========================================================================== +METHOD: + ValidQMIMessage (Public Method) + +DESCRIPTION: + Check mandatory TLV in a QMI message + + QMI Message shall NOT include SDU + +PARAMETERS + pQMIMessage [ I ] - QMI Message buffer + messageLen [ I ] - Size of QMI Message buffer + +RETURN VALUE: + int - 0 for success (no error) + Negative errno for error + Positive for QMI error code +===========================================================================*/ +static int ValidQMIMessage( + void * pQMIMessage, + u16 messageLen ) +{ + char mandTLV[4]; + + if (GetTLV( pQMIMessage, messageLen, 2, &mandTLV[0], 4 ) == 4) + { + // Found TLV + if (*(u16 *)&mandTLV[0] != 0) + { + return le16_to_cpu( get_unaligned(&mandTLV[2]) ); + } + else + { + return 0; + } + } + else + { + return -ENOMSG; + } +} + +/*=========================================================================== +METHOD: + GetQMIMessageID (Public Method) + +DESCRIPTION: + Get the message ID of a QMI message + + QMI Message shall NOT include SDU + +PARAMETERS + pQMIMessage [ I ] - QMI Message buffer + messageLen [ I ] - Size of QMI Message buffer + +RETURN VALUE: + int - Positive for message ID + Negative errno for error +===========================================================================*/ +static int GetQMIMessageID( + void * pQMIMessage, + u16 messageLen ) +{ + if (messageLen < 2) + { + return -ENODATA; + } + else + { + return le16_to_cpu( get_unaligned((u16 *)pQMIMessage) ); + } +} + +/*=========================================================================*/ +// Fill Buffers with QMI requests +/*=========================================================================*/ + +/*=========================================================================== +METHOD: + QMICTLGetClientIDReq (Public Method) + +DESCRIPTION: + Fill buffer with QMI CTL Get Client ID Request + +PARAMETERS + pBuffer [ 0 ] - Buffer to be filled + buffSize [ I ] - Size of pBuffer + transactionID [ I ] - Transaction ID + serviceType [ I ] - Service type requested + +RETURN VALUE: + int - Positive for resulting size of pBuffer + Negative errno for error +===========================================================================*/ +static int QMICTLGetClientIDReq( + void * pBuffer, + u16 buffSize, + u8 transactionID, + u8 serviceType ) +{ + if (pBuffer == 0 || buffSize < QMICTLGetClientIDReqSize() ) + { + return -ENOMEM; + } + + // QMI CTL GET CLIENT ID + // Request + *(u8 *)(pBuffer + sizeof( sQMUX ))= 0x00; + // Transaction ID + *(u8 *)(pBuffer + sizeof( sQMUX ) + 1) = transactionID; + // Message ID + put_unaligned(cpu_to_le16(0x0022), (u16 *)(pBuffer + sizeof( sQMUX ) + 2)); + // Size of TLV's + put_unaligned(cpu_to_le16(0x0004), (u16 *)(pBuffer + sizeof( sQMUX ) + 4)); + // QMI Service Type + *(u8 *)(pBuffer + sizeof( sQMUX ) + 6) = 0x01; + // Size + put_unaligned(cpu_to_le16(0x0001), (u16 *)(pBuffer + sizeof( sQMUX ) + 7)); + // QMI svc type + *(u8 *)(pBuffer + sizeof( sQMUX ) + 9) = serviceType; + + // success + return sizeof( sQMUX ) + 10; +} + +/*=========================================================================== +METHOD: + QMICTLReleaseClientIDReq (Public Method) + +DESCRIPTION: + Fill buffer with QMI CTL Release Client ID Request + +PARAMETERS + pBuffer [ 0 ] - Buffer to be filled + buffSize [ I ] - Size of pBuffer + transactionID [ I ] - Transaction ID + clientID [ I ] - Service type requested + +RETURN VALUE: + int - Positive for resulting size of pBuffer + Negative errno for error +===========================================================================*/ +static int QMICTLReleaseClientIDReq( + void * pBuffer, + u16 buffSize, + u8 transactionID, + u16 clientID ) +{ + if (pBuffer == 0 || buffSize < QMICTLReleaseClientIDReqSize() ) + { + return -ENOMEM; + } + + DBG( "buffSize: 0x%x, transactionID: 0x%x, clientID: 0x%x,\n", + buffSize, transactionID, clientID ); + + // QMI CTL RELEASE CLIENT ID REQ + // Request + *(u8 *)(pBuffer + sizeof( sQMUX )) = 0x00; + // Transaction ID + *(u8 *)(pBuffer + sizeof( sQMUX ) + 1 ) = transactionID; + // Message ID + put_unaligned( cpu_to_le16(0x0023), (u16 *)(pBuffer + sizeof( sQMUX ) + 2) ); + // Size of TLV's + put_unaligned( cpu_to_le16(0x0005), (u16 *)(pBuffer + sizeof( sQMUX ) + 4) ); + // Release client ID + *(u8 *)(pBuffer + sizeof( sQMUX ) + 6) = 0x01; + // Size + put_unaligned( cpu_to_le16(0x0002), (u16 *)(pBuffer + sizeof( sQMUX ) + 7)); + // QMI svs type / Client ID + put_unaligned(cpu_to_le16(clientID), (u16 *)(pBuffer + sizeof( sQMUX ) + 9)); + + // success + return sizeof( sQMUX ) + 11; +} + +/*=========================================================================== +METHOD: + QMICTLReadyReq (Public Method) + +DESCRIPTION: + Fill buffer with QMI CTL Get Version Info Request + +PARAMETERS + pBuffer [ 0 ] - Buffer to be filled + buffSize [ I ] - Size of pBuffer + transactionID [ I ] - Transaction ID + +RETURN VALUE: + int - Positive for resulting size of pBuffer + Negative errno for error +===========================================================================*/ +static int QMICTLReadyReq( + void * pBuffer, + u16 buffSize, + u8 transactionID ) +{ + if (pBuffer == 0 || buffSize < QMICTLReadyReqSize() ) + { + return -ENOMEM; + } + + DBG("buffSize: 0x%x, transactionID: 0x%x\n", buffSize, transactionID); + + // QMI CTL GET VERSION INFO REQ + // Request + *(u8 *)(pBuffer + sizeof( sQMUX )) = 0x00; + // Transaction ID + *(u8 *)(pBuffer + sizeof( sQMUX ) + 1) = transactionID; + // Message ID + put_unaligned( cpu_to_le16(0x0021), (u16 *)(pBuffer + sizeof( sQMUX ) + 2) ); + // Size of TLV's + put_unaligned( cpu_to_le16(0x0000), (u16 *)(pBuffer + sizeof( sQMUX ) + 4) ); + + // success + return sizeof( sQMUX ) + 6; +} + +/*=========================================================================== +METHOD: + QMIWDSSetEventReportReq (Public Method) + +DESCRIPTION: + Fill buffer with QMI WDS Set Event Report Request + +PARAMETERS + pBuffer [ 0 ] - Buffer to be filled + buffSize [ I ] - Size of pBuffer + transactionID [ I ] - Transaction ID + +RETURN VALUE: + int - Positive for resulting size of pBuffer + Negative errno for error +===========================================================================*/ +static int QMIWDSSetEventReportReq( + void * pBuffer, + u16 buffSize, + u16 transactionID ) +{ + if (pBuffer == 0 || buffSize < QMIWDSSetEventReportReqSize() ) + { + return -ENOMEM; + } + + // QMI WDS SET EVENT REPORT REQ + // Request + *(u8 *)(pBuffer + sizeof( sQMUX )) = 0x00; + // Transaction ID + put_unaligned( cpu_to_le16(transactionID), (u16 *)(pBuffer + sizeof( sQMUX ) + 1)); + // Message ID + put_unaligned( cpu_to_le16(0x0001), (u16 *)(pBuffer + sizeof( sQMUX ) + 3)); + // Size of TLV's + put_unaligned(cpu_to_le16(0x0008), (u16 *)(pBuffer + sizeof( sQMUX ) + 5)); + // Report channel rate TLV + *(u8 *)(pBuffer + sizeof( sQMUX ) + 7) = 0x11; + // Size + put_unaligned( cpu_to_le16(0x0005), (u16 *)(pBuffer + sizeof( sQMUX ) + 8)); + // Stats period + *(u8 *)(pBuffer + sizeof( sQMUX ) + 10) = 0x01; + // Stats mask + put_unaligned( cpu_to_le32(0x000000ff), (u32 *)(pBuffer + sizeof( sQMUX ) + 11) ); + + // success + return sizeof( sQMUX ) + 15; +} + +/*=========================================================================== +METHOD: + QMIWDSGetPKGSRVCStatusReq (Public Method) + +DESCRIPTION: + Fill buffer with QMI WDS Get PKG SRVC Status Request + +PARAMETERS + pBuffer [ 0 ] - Buffer to be filled + buffSize [ I ] - Size of pBuffer + transactionID [ I ] - Transaction ID + +RETURN VALUE: + int - Positive for resulting size of pBuffer + Negative errno for error +===========================================================================*/ +static int QMIWDSGetPKGSRVCStatusReq( + void * pBuffer, + u16 buffSize, + u16 transactionID ) +{ + if (pBuffer == 0 || buffSize < QMIWDSGetPKGSRVCStatusReqSize() ) + { + return -ENOMEM; + } + + // QMI WDS Get PKG SRVC Status REQ + // Request + *(u8 *)(pBuffer + sizeof( sQMUX )) = 0x00; + // Transaction ID + put_unaligned(cpu_to_le16(transactionID), (u16 *)(pBuffer + sizeof( sQMUX ) + 1)); + // Message ID + put_unaligned(cpu_to_le16(0x0022), (u16 *)(pBuffer + sizeof( sQMUX ) + 3)); + // Size of TLV's + put_unaligned(cpu_to_le16(0x0000), (u16 *)(pBuffer + sizeof( sQMUX ) + 5)); + + // success + return sizeof( sQMUX ) + 7; +} + +#if 0 +static u16 QMIWDSSetQMUXBindMuxDataPortSize( void ) +{ + return sizeof( sQMUX ) + 29; +} + +static u16 QMIWDSSetQMUXBindMuxDataPortReq( + void * pBuffer, + u16 buffSize, + u8 MuxId, + u16 transactionID ) +{ + if (pBuffer == 0 || buffSize < QMIWDSSetQMUXBindMuxDataPortSize() ) + { + return -ENOMEM; + } + + // QMI WDS Set QMUX Bind Mux Data Port REQ + // Request + *(u8 *)(pBuffer + sizeof( sQMUX )) = 0x00; + // Transaction ID + put_unaligned(cpu_to_le16(transactionID), (u16 *)(pBuffer + sizeof( sQMUX ) + 1)); + // Message ID + put_unaligned(cpu_to_le16(0x00a2), (u16 *)(pBuffer + sizeof( sQMUX ) + 3)); + // Size of TLV's + put_unaligned(cpu_to_le16(0x0016), (u16 *)(pBuffer + sizeof( sQMUX ) + 5)); + + *(u8 *)(pBuffer + sizeof( sQMUX ) + 7) = 0x10; + put_unaligned(cpu_to_le16(0x08), (u16 *)(pBuffer + sizeof( sQMUX ) + 8)); + put_unaligned(cpu_to_le32(0x02), (u32 *)(pBuffer + sizeof( sQMUX ) + 10)); // ep_type + put_unaligned(cpu_to_le32(0x04), (u32 *)(pBuffer + sizeof( sQMUX ) + 14)); // iface_id + + *(u8 *)(pBuffer + sizeof( sQMUX ) + 18) = 0x11; + put_unaligned(cpu_to_le16(0x01), (u16 *)(pBuffer + sizeof( sQMUX ) + 19)); + *(u8 *)(pBuffer + sizeof( sQMUX ) + 21) = MuxId; // MuxId + + *(u8 *)(pBuffer + sizeof( sQMUX ) + 22) = 0x13; + put_unaligned(cpu_to_le16(0x04), (u16 *)(pBuffer + sizeof( sQMUX ) + 23)); + put_unaligned(cpu_to_le32(0x01), (u32 *)(pBuffer + sizeof( sQMUX ) + 25)); + + // success + return sizeof( sQMUX ) + 29; +} +#endif + +/*=========================================================================== +METHOD: + QMIDMSGetMEIDReq (Public Method) + +DESCRIPTION: + Fill buffer with QMI DMS Get Serial Numbers Request + +PARAMETERS + pBuffer [ 0 ] - Buffer to be filled + buffSize [ I ] - Size of pBuffer + transactionID [ I ] - Transaction ID + +RETURN VALUE: + int - Positive for resulting size of pBuffer + Negative errno for error +===========================================================================*/ +static int QMIDMSGetMEIDReq( + void * pBuffer, + u16 buffSize, + u16 transactionID ) +{ + if (pBuffer == 0 || buffSize < QMIDMSGetMEIDReqSize() ) + { + return -ENOMEM; + } + + // QMI DMS GET SERIAL NUMBERS REQ + // Request + *(u8 *)(pBuffer + sizeof( sQMUX )) = 0x00; + // Transaction ID + put_unaligned( cpu_to_le16(transactionID), (u16 *)(pBuffer + sizeof( sQMUX ) + 1) ); + // Message ID + put_unaligned( cpu_to_le16(0x0025), (u16 *)(pBuffer + sizeof( sQMUX ) + 3) ); + // Size of TLV's + put_unaligned( cpu_to_le16(0x0000), (u16 *)(pBuffer + sizeof( sQMUX ) + 5)); + + // success + return sizeof( sQMUX ) + 7; +} + +/*=========================================================================== +METHOD: + QMIWDASetDataFormatReq (Public Method) + +DESCRIPTION: + Fill buffer with QMI WDA Set Data Format Request + +PARAMETERS + pBuffer [ 0 ] - Buffer to be filled + buffSize [ I ] - Size of pBuffer + transactionID [ I ] - Transaction ID + +RETURN VALUE: + int - Positive for resulting size of pBuffer + Negative errno for error +===========================================================================*/ +static int QMIWDASetDataFormatReq( + void * pBuffer, + u16 buffSize, + bool bRawIPMode, int qmap_version, u32 rx_size, + u16 transactionID ) +{ +if (qmap_version) { + struct QMIWDS_ADMIN_SET_DATA_FORMAT_REQ_MSG *pMUXMsg = (struct QMIWDS_ADMIN_SET_DATA_FORMAT_REQ_MSG *)(pBuffer + sizeof( sQMUX )); + + pMUXMsg->CtlFlags = 0x00; + put_unaligned( cpu_to_le16(transactionID), &pMUXMsg->TransactionId); + put_unaligned( cpu_to_le16(0x0020), &pMUXMsg->Type); + put_unaligned( cpu_to_le16(sizeof( struct QMIWDS_ADMIN_SET_DATA_FORMAT_REQ_MSG) - 7), &pMUXMsg->Length); + + //Indicates whether the Quality of Service(QOS) data format is used by the client. + pMUXMsg->QosDataFormatTlv.TLVType = 0x10; + pMUXMsg->QosDataFormatTlv.TLVLength = cpu_to_le16(0x0001); + pMUXMsg->QosDataFormatTlv.QOSSetting = 0; /* no-QOS header */ +//Underlying Link Layer Protocol + pMUXMsg->UnderlyingLinkLayerProtocolTlv.TLVType = 0x11; + pMUXMsg->UnderlyingLinkLayerProtocolTlv.TLVLength = cpu_to_le16(4); + pMUXMsg->UnderlyingLinkLayerProtocolTlv.Value = cpu_to_le32(0x02); /* Set Ethernet mode */ +//Uplink (UL) data aggregation protocol to be used for uplink data transfer. + pMUXMsg->UplinkDataAggregationProtocolTlv.TLVType = 0x12; + pMUXMsg->UplinkDataAggregationProtocolTlv.TLVLength = cpu_to_le16(4); + pMUXMsg->UplinkDataAggregationProtocolTlv.Value = cpu_to_le32(qmap_version); //UL QMAP is enabled +//Downlink (DL) data aggregation protocol to be used for downlink data transfer + pMUXMsg->DownlinkDataAggregationProtocolTlv.TLVType = 0x13; + pMUXMsg->DownlinkDataAggregationProtocolTlv.TLVLength = cpu_to_le16(4); + pMUXMsg->DownlinkDataAggregationProtocolTlv.Value = cpu_to_le32(qmap_version); //UL QMAP is enabled +//Maximum number of datagrams in a single aggregated packet on downlink + pMUXMsg->DownlinkDataAggregationMaxDatagramsTlv.TLVType = 0x15; + pMUXMsg->DownlinkDataAggregationMaxDatagramsTlv.TLVLength = cpu_to_le16(4); + pMUXMsg->DownlinkDataAggregationMaxDatagramsTlv.Value = cpu_to_le32(rx_size/1024); +//Maximum size in bytes of a single aggregated packet allowed on downlink + pMUXMsg->DownlinkDataAggregationMaxSizeTlv.TLVType = 0x16; + pMUXMsg->DownlinkDataAggregationMaxSizeTlv.TLVLength = cpu_to_le16(4); + pMUXMsg->DownlinkDataAggregationMaxSizeTlv.Value = cpu_to_le32(rx_size); +//Peripheral End Point ID + pMUXMsg->epTlv.TLVType = 0x17; + pMUXMsg->epTlv.TLVLength = cpu_to_le16(8); + pMUXMsg->epTlv.ep_type = cpu_to_le32(0x02); // DATA_EP_TYPE_BAM_DMUX + pMUXMsg->epTlv.iface_id = cpu_to_le32(0x04); +//Specifies the minimum padding bytes to be added in between aggregated downlink QMAP packets. + pMUXMsg->dl_minimum_padding.TLVType = 0x19; + pMUXMsg->dl_minimum_padding.TLVLength = cpu_to_le16(4); + pMUXMsg->dl_minimum_padding.Value = cpu_to_le32(0); +//Maximum number of datagrams in a single aggregated packet on uplink + pMUXMsg->UplinkDataAggregationMaxDatagramsTlv.TLVType = 27; + pMUXMsg->UplinkDataAggregationMaxDatagramsTlv.TLVLength = cpu_to_le16(4); + pMUXMsg->UplinkDataAggregationMaxDatagramsTlv.Value = cpu_to_le32(11); +//Maximum size in bytes of a single aggregated packet allowed on uplink + pMUXMsg->UplinkDataAggregationMaxSizeTlv.TLVType = 28; + pMUXMsg->UplinkDataAggregationMaxSizeTlv.TLVLength = cpu_to_le16(4); + pMUXMsg->UplinkDataAggregationMaxSizeTlv.Value = cpu_to_le32(8*1024); +} +else { + if (pBuffer == 0 || buffSize < QMIWDASetDataFormatReqSize(qmap_version) ) + { + return -ENOMEM; + } + + // QMI WDA SET DATA FORMAT REQ + // Request + *(u8 *)(pBuffer + sizeof( sQMUX )) = 0x00; + + // Transaction ID + put_unaligned( cpu_to_le16(transactionID), (u16 *)(pBuffer + sizeof( sQMUX ) + 1) ); + + // Message ID + put_unaligned( cpu_to_le16(0x0020), (u16 *)(pBuffer + sizeof( sQMUX ) + 3) ); + + // Size of TLV's + put_unaligned( cpu_to_le16(0x000b), (u16 *)(pBuffer + sizeof( sQMUX ) + 5)); + + /* TLVType QOS Data Format 1 byte */ + *(u8 *)(pBuffer + sizeof( sQMUX ) + 7) = 0x10; // type data format + + /* TLVLength 2 bytes - see spec */ + put_unaligned( cpu_to_le16(0x0001), (u16 *)(pBuffer + sizeof( sQMUX ) + 8)); + + /* DataFormat: 0-default; 1-QoS hdr present 2 bytes */ +#ifdef QOS_MODE + *(u8 *)(pBuffer + sizeof( sQMUX ) + 10) = 1; /* QOS header */ +#else + *(u8 *)(pBuffer + sizeof( sQMUX ) + 10) = 0; /* no-QOS header */ +#endif + + /* TLVType Link-Layer Protocol (Optional) 1 byte */ + *(u8 *)(pBuffer + sizeof( sQMUX ) + 11) = 0x11; + + /* TLVLength 2 bytes */ + put_unaligned( cpu_to_le16(0x0004), (u16 *)(pBuffer + sizeof( sQMUX ) + 12)); + + /* LinkProt: 0x1 - ETH; 0x2 - rawIP 4 bytes */ +if (bRawIPMode) { //#ifdef DATA_MODE_RP + /* Set RawIP mode */ + put_unaligned( cpu_to_le32(0x00000002), (u32 *)(pBuffer + sizeof( sQMUX ) + 14)); + DBG("Request RawIP Data Format\n"); +} else { //#else + /* Set Ethernet mode */ + put_unaligned( cpu_to_le32(0x00000001), (u32 *)(pBuffer + sizeof( sQMUX ) + 14)); + DBG("Request Ethernet Data Format\n"); +} //#endif + +} + + // success + return QMIWDASetDataFormatReqSize(qmap_version); +} + +#if 0 +static int QMIWDASetDataQmapReq( + void * pBuffer, + u16 buffSize, + u16 transactionID ) +{ + // QMI WDA SET DATA FORMAT REQ + // Request + *(u8 *)(pBuffer + sizeof( sQMUX )) = 0x00; + + // Transaction ID + put_unaligned( cpu_to_le16(transactionID), (u16 *)(pBuffer + sizeof( sQMUX ) + 1) ); + + // Message ID + put_unaligned( cpu_to_le16(0x002B), (u16 *)(pBuffer + sizeof( sQMUX ) + 3) ); + + // Size of TLV's + put_unaligned( cpu_to_le16(0x0004), (u16 *)(pBuffer + sizeof( sQMUX ) + 5)); + + /* TLVType QMAP In-Band Flow Control 1 byte */ + *(u8 *)(pBuffer + sizeof( sQMUX ) + 7) = 0x10; + put_unaligned( cpu_to_le16(0x0001), (u16 *)(pBuffer + sizeof( sQMUX ) + 8)); + *(u8 *)(pBuffer + sizeof( sQMUX ) + 10) = 0x01; + + // success + return ( sizeof( sQMUX ) + 11); +} +#endif + +#if 0 +/*=========================================================================== +METHOD: + QMICTLSetDataFormatReqSize (Public Method) + +DESCRIPTION: + Get size of buffer needed for QMUX + QMICTLSetDataFormatReq + +RETURN VALUE: + u16 - size of buffer +===========================================================================*/ +static u16 QMICTLSetDataFormatReqSize( void ) +{ + return sizeof( sQMUX ) + 15; +} + +/*=========================================================================== +METHOD: + QMICTLSetDataFormatReq (Public Method) + +DESCRIPTION: + Fill buffer with QMI CTL Set Data Format Request + +PARAMETERS + pBuffer [ 0 ] - Buffer to be filled + buffSize [ I ] - Size of pBuffer + transactionID [ I ] - Transaction ID + +RETURN VALUE: + int - Positive for resulting size of pBuffer + Negative errno for error +===========================================================================*/ +static int QMICTLSetDataFormatReq( + void * pBuffer, + u16 buffSize, + u8 transactionID ) +{ + if (pBuffer == 0 || buffSize < QMICTLSetDataFormatReqSize() ) + { + return -ENOMEM; + } + + /* QMI CTL Set Data Format Request */ + /* Request */ + *(u8 *)(pBuffer + sizeof( sQMUX )) = 0x00; // QMICTL_FLAG_REQUEST + + /* Transaction ID 1 byte */ + *(u8 *)(pBuffer + sizeof( sQMUX ) + 1) = transactionID; /* 1 byte as in spec */ + + /* QMICTLType 2 bytes */ + put_unaligned( cpu_to_le16(0x0026), (u16 *)(pBuffer + sizeof( sQMUX ) + 2)); + + /* Length 2 bytes of 2 TLVs each - see spec */ + put_unaligned( cpu_to_le16(0x0009), (u16 *)(pBuffer + sizeof( sQMUX ) + 4)); + + /* TLVType Data Format (Mandatory) 1 byte */ + *(u8 *)(pBuffer + sizeof( sQMUX ) + 6) = 0x01; // type data format + + /* TLVLength 2 bytes - see spec */ + put_unaligned( cpu_to_le16(0x0001), (u16 *)(pBuffer + sizeof( sQMUX ) + 7)); + + /* DataFormat: 0-default; 1-QoS hdr present 2 bytes */ +#ifdef QOS_MODE + *(u8 *)(pBuffer + sizeof( sQMUX ) + 9) = 1; /* QOS header */ +#else + *(u8 *)(pBuffer + sizeof( sQMUX ) + 9) = 0; /* no-QOS header */ +#endif + + /* TLVType Link-Layer Protocol (Optional) 1 byte */ + *(u8 *)(pBuffer + sizeof( sQMUX ) + 10) = TLV_TYPE_LINK_PROTO; + + /* TLVLength 2 bytes */ + put_unaligned( cpu_to_le16(0x0002), (u16 *)(pBuffer + sizeof( sQMUX ) + 11)); + + /* LinkProt: 0x1 - ETH; 0x2 - rawIP 2 bytes */ +#ifdef DATA_MODE_RP + /* Set RawIP mode */ + put_unaligned( cpu_to_le16(0x0002), (u16 *)(pBuffer + sizeof( sQMUX ) + 13)); + DBG("Request RawIP Data Format\n"); +#else + /* Set Ethernet mode */ + put_unaligned( cpu_to_le16(0x0001), (u16 *)(pBuffer + sizeof( sQMUX ) + 13)); + DBG("Request Ethernet Data Format\n"); +#endif + + /* success */ + return sizeof( sQMUX ) + 15; + +} +#endif + +/*=========================================================================== +METHOD: + QMICTLSyncReq (Public Method) + +DESCRIPTION: + Fill buffer with QMI CTL Sync Request + +PARAMETERS + pBuffer [ 0 ] - Buffer to be filled + buffSize [ I ] - Size of pBuffer + transactionID [ I ] - Transaction ID + +RETURN VALUE: + int - Positive for resulting size of pBuffer + Negative errno for error +===========================================================================*/ +static int QMICTLSyncReq( + void * pBuffer, + u16 buffSize, + u16 transactionID ) +{ + if (pBuffer == 0 || buffSize < QMICTLSyncReqSize() ) + { + return -ENOMEM; + } + + // Request + *(u8 *)(pBuffer + sizeof( sQMUX )) = 0x00; + // Transaction ID + *(u8 *)(pBuffer + sizeof( sQMUX ) + 1) = transactionID; + // Message ID + put_unaligned( cpu_to_le16(0x0027), (u16 *)(pBuffer + sizeof( sQMUX ) + 2) ); + // Size of TLV's + put_unaligned( cpu_to_le16(0x0000), (u16 *)(pBuffer + sizeof( sQMUX ) + 4) ); + + // success + return sizeof( sQMUX ) + 6; +} + +/*=========================================================================*/ +// Parse data from QMI responses +/*=========================================================================*/ + +/*=========================================================================== +METHOD: + QMICTLGetClientIDResp (Public Method) + +DESCRIPTION: + Parse the QMI CTL Get Client ID Resp + +PARAMETERS + pBuffer [ I ] - Buffer to be parsed + buffSize [ I ] - Size of pBuffer + pClientID [ 0 ] - Recieved client ID + +RETURN VALUE: + int - 0 for success + Negative errno for error +===========================================================================*/ +static int QMICTLGetClientIDResp( + void * pBuffer, + u16 buffSize, + u16 * pClientID ) +{ + int result; + + // Ignore QMUX and SDU + // QMI CTL SDU is 2 bytes, not 3 + u8 offset = sizeof( sQMUX ) + 2; + + if (pBuffer == 0 || buffSize < offset) + { + return -ENOMEM; + } + + pBuffer = pBuffer + offset; + buffSize -= offset; + + result = GetQMIMessageID( pBuffer, buffSize ); + if (result != 0x22) + { + return -EFAULT; + } + + result = ValidQMIMessage( pBuffer, buffSize ); + if (result != 0) + { + return -EFAULT; + } + + result = GetTLV( pBuffer, buffSize, 0x01, pClientID, 2 ); + if (result != 2) + { + return -EFAULT; + } + + return 0; +} + +/*=========================================================================== +METHOD: + QMICTLReleaseClientIDResp (Public Method) + +DESCRIPTION: + Verify the QMI CTL Release Client ID Resp is valid + +PARAMETERS + pBuffer [ I ] - Buffer to be parsed + buffSize [ I ] - Size of pBuffer + +RETURN VALUE: + int - 0 for success + Negative errno for error +===========================================================================*/ +static int QMICTLReleaseClientIDResp( + void * pBuffer, + u16 buffSize ) +{ + int result; + + // Ignore QMUX and SDU + // QMI CTL SDU is 2 bytes, not 3 + u8 offset = sizeof( sQMUX ) + 2; + + if (pBuffer == 0 || buffSize < offset) + { + return -ENOMEM; + } + + pBuffer = pBuffer + offset; + buffSize -= offset; + + result = GetQMIMessageID( pBuffer, buffSize ); + if (result != 0x23) + { + return -EFAULT; + } + + result = ValidQMIMessage( pBuffer, buffSize ); + if (result != 0) + { + return -EFAULT; + } + + return 0; +} + +/*=========================================================================== +METHOD: + QMIWDSEventResp (Public Method) + +DESCRIPTION: + Parse the QMI WDS Set Event Report Resp/Indication or + QMI WDS Get PKG SRVC Status Resp/Indication + + Return parameters will only be updated if value was received + +PARAMETERS + pBuffer [ I ] - Buffer to be parsed + buffSize [ I ] - Size of pBuffer + pTXOk [ O ] - Number of transmitted packets without errors + pRXOk [ O ] - Number of recieved packets without errors + pTXErr [ O ] - Number of transmitted packets with framing errors + pRXErr [ O ] - Number of recieved packets with framing errors + pTXOfl [ O ] - Number of transmitted packets dropped due to overflow + pRXOfl [ O ] - Number of recieved packets dropped due to overflow + pTXBytesOk [ O ] - Number of transmitted bytes without errors + pRXBytesOk [ O ] - Number of recieved bytes without errors + pbLinkState [ 0 ] - Is the link active? + pbReconfigure [ 0 ] - Must interface be reconfigured? (reset IP address) + +RETURN VALUE: + int - 0 for success + Negative errno for error +===========================================================================*/ +static int QMIWDSEventResp( + void * pBuffer, + u16 buffSize, + u32 * pTXOk, + u32 * pRXOk, + u32 * pTXErr, + u32 * pRXErr, + u32 * pTXOfl, + u32 * pRXOfl, + u64 * pTXBytesOk, + u64 * pRXBytesOk, + bool * pbLinkState, + bool * pbReconfigure ) +{ + int result; + u8 pktStatusRead[2]; + + // Ignore QMUX and SDU + u8 offset = sizeof( sQMUX ) + 3; + + if (pBuffer == 0 + || buffSize < offset + || pTXOk == 0 + || pRXOk == 0 + || pTXErr == 0 + || pRXErr == 0 + || pTXOfl == 0 + || pRXOfl == 0 + || pTXBytesOk == 0 + || pRXBytesOk == 0 + || pbLinkState == 0 + || pbReconfigure == 0 ) + { + return -ENOMEM; + } + + pBuffer = pBuffer + offset; + buffSize -= offset; + + // Note: Indications. No Mandatory TLV required + + result = GetQMIMessageID( pBuffer, buffSize ); + // QMI WDS Set Event Report Resp + if (result == 0x01) + { + // TLV's are not mandatory + GetTLV( pBuffer, buffSize, 0x10, (void*)pTXOk, 4 ); + put_unaligned( le32_to_cpu(*pTXOk), pTXOk); + GetTLV( pBuffer, buffSize, 0x11, (void*)pRXOk, 4 ); + put_unaligned( le32_to_cpu(*pRXOk), pRXOk); + GetTLV( pBuffer, buffSize, 0x12, (void*)pTXErr, 4 ); + put_unaligned( le32_to_cpu(*pTXErr), pTXErr); + GetTLV( pBuffer, buffSize, 0x13, (void*)pRXErr, 4 ); + put_unaligned( le32_to_cpu(*pRXErr), pRXErr); + GetTLV( pBuffer, buffSize, 0x14, (void*)pTXOfl, 4 ); + put_unaligned( le32_to_cpu(*pTXOfl), pTXOfl); + GetTLV( pBuffer, buffSize, 0x15, (void*)pRXOfl, 4 ); + put_unaligned( le32_to_cpu(*pRXOfl), pRXOfl); + GetTLV( pBuffer, buffSize, 0x19, (void*)pTXBytesOk, 8 ); + put_unaligned( le64_to_cpu(*pTXBytesOk), pTXBytesOk); + GetTLV( pBuffer, buffSize, 0x1A, (void*)pRXBytesOk, 8 ); + put_unaligned( le64_to_cpu(*pRXBytesOk), pRXBytesOk); + } + // QMI WDS Get PKG SRVC Status Resp + else if (result == 0x22) + { + result = GetTLV( pBuffer, buffSize, 0x01, &pktStatusRead[0], 2 ); + // 1 or 2 bytes may be received + if (result >= 1) + { + if (pktStatusRead[0] == 0x02) + { + *pbLinkState = true; + } + else + { + *pbLinkState = false; + } + } + if (result == 2) + { + if (pktStatusRead[1] == 0x01) + { + *pbReconfigure = true; + } + else + { + *pbReconfigure = false; + } + } + + if (result < 0) + { + return result; + } + } + else + { + return -EFAULT; + } + + return 0; +} + +/*=========================================================================== +METHOD: + QMIDMSGetMEIDResp (Public Method) + +DESCRIPTION: + Parse the QMI DMS Get Serial Numbers Resp + +PARAMETERS + pBuffer [ I ] - Buffer to be parsed + buffSize [ I ] - Size of pBuffer + pMEID [ O ] - Device MEID + meidSize [ I ] - Size of MEID buffer (at least 14) + +RETURN VALUE: + int - 0 for success + Negative errno for error +===========================================================================*/ +static int QMIDMSGetMEIDResp( + void * pBuffer, + u16 buffSize, + char * pMEID, + int meidSize ) +{ + int result; + + // Ignore QMUX and SDU + u8 offset = sizeof( sQMUX ) + 3; + + if (pBuffer == 0 || buffSize < offset || meidSize < 14) + { + return -ENOMEM; + } + + pBuffer = pBuffer + offset; + buffSize -= offset; + + result = GetQMIMessageID( pBuffer, buffSize ); + if (result != 0x25) + { + return -EFAULT; + } + + result = ValidQMIMessage( pBuffer, buffSize ); + if (result != 0) + { + return -EFAULT; + } + + result = GetTLV( pBuffer, buffSize, 0x12, (void*)pMEID, 14 ); + if (result != 14) + { + return -EFAULT; + } + + return 0; +} + +/*=========================================================================== +METHOD: + QMIWDASetDataFormatResp (Public Method) + +DESCRIPTION: + Parse the QMI WDA Set Data Format Response + +PARAMETERS + pBuffer [ I ] - Buffer to be parsed + buffSize [ I ] - Size of pBuffer + +RETURN VALUE: + int - 0 for success + Negative errno for error +===========================================================================*/ +static int QMIWDASetDataFormatResp( + void * pBuffer, + u16 buffSize, bool bRawIPMode, int *qmap_version, int *rx_size, int *tx_size, QMAP_SETTING *set) +{ + + int result; + + u8 pktLinkProtocol[4]; + + // Ignore QMUX and SDU + // QMI SDU is 3 bytes + u8 offset = sizeof( sQMUX ) + 3; + + if (pBuffer == 0 || buffSize < offset) + { + return -ENOMEM; + } + + pBuffer = pBuffer + offset; + buffSize -= offset; + + result = GetQMIMessageID( pBuffer, buffSize ); + if (result != 0x20) + { + return -EFAULT; + } + + /* Check response message result TLV */ + result = ValidQMIMessage( pBuffer, buffSize ); + if (result != 0) + { + DBG("EFAULT: Data Format Mode Bad Response\n"); +// return -EFAULT; + return 0; + } + + /* Check response message link protocol */ + result = GetTLV( pBuffer, buffSize, 0x11, + &pktLinkProtocol[0], 4); + if (result != 4) + { + DBG("EFAULT: Wrong TLV format\n"); + return 0; + } + +if (bRawIPMode) { ////#ifdef DATA_MODE_RP + if (pktLinkProtocol[0] != 2) + { + DBG("EFAULT: Data Format Cannot be set to RawIP Mode\n"); + return pktLinkProtocol[0]; + } + DBG("Data Format Set to RawIP\n"); +} else { ////#else + if (pktLinkProtocol[0] != 1) + { + DBG("EFAULT: Data Format Cannot be set to Ethernet Mode\n"); + return pktLinkProtocol[0]; + } + DBG("Data Format Set to Ethernet Mode \n"); +} //#endif + + GetTLV( pBuffer, buffSize, 0x12, qmap_version, 4); + if (le32_to_cpu(*qmap_version)) + GetTLV( pBuffer, buffSize, 0x13, qmap_version, 4); + + GetTLV( pBuffer, buffSize, 0x16, rx_size, 4); + GetTLV( pBuffer, buffSize, 0x18, tx_size, 4); + + if (set) { + GetTLV( pBuffer, buffSize, 0x15, &set->dl_data_aggregation_max_datagrams, 4); + GetTLV( pBuffer, buffSize, 0x16, &set->dl_data_aggregation_max_size, 4); + GetTLV( pBuffer, buffSize, 0x17, &set->ul_data_aggregation_max_datagrams, 4); + GetTLV( pBuffer, buffSize, 0x18, &set->ul_data_aggregation_max_size, 4); + GetTLV( pBuffer, buffSize, 0x1a, &set->dl_minimum_padding, 4); + } + + return pktLinkProtocol[0]; +} + +/*=========================================================================== +METHOD: + QMICTLSyncResp (Public Method) + +DESCRIPTION: + Validate the QMI CTL Sync Response + +PARAMETERS + pBuffer [ I ] - Buffer to be parsed + buffSize [ I ] - Size of pBuffer + +RETURN VALUE: + int - 0 for success + Negative errno for error +===========================================================================*/ +static int QMICTLSyncResp( + void *pBuffer, + u16 buffSize ) +{ + int result; + + // Ignore QMUX (2 bytes for QMI CTL) and SDU + u8 offset = sizeof( sQMUX ) + 2; + + if (pBuffer == 0 || buffSize < offset) + { + return -ENOMEM; + } + + pBuffer = pBuffer + offset; + buffSize -= offset; + + result = GetQMIMessageID( pBuffer, buffSize ); + if (result != 0x27) + { + return -EFAULT; + } + + result = ValidQMIMessage( pBuffer, buffSize ); + + return result; +} +#endif diff --git a/package/wwan/driver/quectel_Gobinet/src/QMI.h b/package/wwan/driver/quectel_Gobinet/src/QMI.h new file mode 100644 index 000000000..284bf7998 --- /dev/null +++ b/package/wwan/driver/quectel_Gobinet/src/QMI.h @@ -0,0 +1,337 @@ +/*=========================================================================== +FILE: + QMI.h + +DESCRIPTION: + Qualcomm QMI driver header + +FUNCTIONS: + Generic QMUX functions + ParseQMUX + FillQMUX + + Generic QMI functions + GetTLV + ValidQMIMessage + GetQMIMessageID + + Get sizes of buffers needed by QMI requests + QMUXHeaderSize + QMICTLGetClientIDReqSize + QMICTLReleaseClientIDReqSize + QMICTLReadyReqSize + QMIWDSSetEventReportReqSize + QMIWDSGetPKGSRVCStatusReqSize + QMIDMSGetMEIDReqSize + QMICTLSyncReqSize + + Fill Buffers with QMI requests + QMICTLGetClientIDReq + QMICTLReleaseClientIDReq + QMICTLReadyReq + QMIWDSSetEventReportReq + QMIWDSGetPKGSRVCStatusReq + QMIDMSGetMEIDReq + QMICTLSetDataFormatReq + QMICTLSyncReq + + Parse data from QMI responses + QMICTLGetClientIDResp + QMICTLReleaseClientIDResp + QMIWDSEventResp + QMIDMSGetMEIDResp + +Copyright (c) 2011, Code Aurora Forum. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Code Aurora Forum nor + the names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. + + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. +===========================================================================*/ + +#pragma once + +/*=========================================================================*/ +// Definitions +/*=========================================================================*/ + +extern int quec_debug; +// DBG macro +#define DBG( format, arg... ) do { \ + if (quec_debug == 1)\ + { \ + printk( KERN_INFO "GobiNet::%s " format, __FUNCTION__, ## arg ); \ + } }while(0) + +#if 0 +#define VDBG( format, arg... ) do { \ + if (debug == 1)\ + { \ + printk( KERN_INFO "GobiNet::%s " format, __FUNCTION__, ## arg ); \ + } } while(0) +#else +#define VDBG( format, arg... ) do { } while(0) +#endif + +#define INFO( format, arg... ) do { \ + printk( KERN_INFO "GobiNet::%s " format, __FUNCTION__, ## arg ); \ + }while(0) + +// QMI Service Types +#define QMICTL 0 +#define QMIWDS 1 +#define QMIDMS 2 +#define QMINAS 3 +#define QMIUIM 11 +#define QMIWDA 0x1A + +#define u8 unsigned char +#define u16 unsigned short +#define u32 unsigned int +#define u64 unsigned long long + +#define bool u8 +#define true 1 +#define false 0 + +#define ENOMEM 12 +#define EFAULT 14 +#define EINVAL 22 +#ifndef ENOMSG +#define ENOMSG 42 +#endif +#define ENODATA 61 + +#define TLV_TYPE_LINK_PROTO 0x10 + +/*=========================================================================*/ +// Struct sQMUX +// +// Structure that defines a QMUX header +/*=========================================================================*/ +typedef struct sQMUX +{ + /* T\F, always 1 */ + u8 mTF; + + /* Size of message */ + u16 mLength; + + /* Control flag */ + u8 mCtrlFlag; + + /* Service Type */ + u8 mQMIService; + + /* Client ID */ + u8 mQMIClientID; + +}__attribute__((__packed__)) sQMUX; + +#if 0 +/*=========================================================================*/ +// Generic QMUX functions +/*=========================================================================*/ + +// Remove QMUX headers from a buffer +int ParseQMUX( + u16 * pClientID, + void * pBuffer, + u16 buffSize ); + +// Fill buffer with QMUX headers +int FillQMUX( + u16 clientID, + void * pBuffer, + u16 buffSize ); + +/*=========================================================================*/ +// Generic QMI functions +/*=========================================================================*/ + +// Get data buffer of a specified TLV from a QMI message +int GetTLV( + void * pQMIMessage, + u16 messageLen, + u8 type, + void * pOutDataBuf, + u16 bufferLen ); + +// Check mandatory TLV in a QMI message +int ValidQMIMessage( + void * pQMIMessage, + u16 messageLen ); + +// Get the message ID of a QMI message +int GetQMIMessageID( + void * pQMIMessage, + u16 messageLen ); + +/*=========================================================================*/ +// Get sizes of buffers needed by QMI requests +/*=========================================================================*/ + +// Get size of buffer needed for QMUX +u16 QMUXHeaderSize( void ); + +// Get size of buffer needed for QMUX + QMICTLGetClientIDReq +u16 QMICTLGetClientIDReqSize( void ); + +// Get size of buffer needed for QMUX + QMICTLReleaseClientIDReq +u16 QMICTLReleaseClientIDReqSize( void ); + +// Get size of buffer needed for QMUX + QMICTLReadyReq +u16 QMICTLReadyReqSize( void ); + +// Get size of buffer needed for QMUX + QMIWDSSetEventReportReq +u16 QMIWDSSetEventReportReqSize( void ); + +// Get size of buffer needed for QMUX + QMIWDSGetPKGSRVCStatusReq +u16 QMIWDSGetPKGSRVCStatusReqSize( void ); + +u16 QMIWDSSetQMUXBindMuxDataPortSize( void ); + +// Get size of buffer needed for QMUX + QMIDMSGetMEIDReq +u16 QMIDMSGetMEIDReqSize( void ); + +// Get size of buffer needed for QMUX + QMIWDASetDataFormatReq +u16 QMIWDASetDataFormatReqSize( int qmap_mode ); + +// Get size of buffer needed for QMUX + QMICTLSyncReq +u16 QMICTLSyncReqSize( void ); + +/*=========================================================================*/ +// Fill Buffers with QMI requests +/*=========================================================================*/ + +// Fill buffer with QMI CTL Get Client ID Request +int QMICTLGetClientIDReq( + void * pBuffer, + u16 buffSize, + u8 transactionID, + u8 serviceType ); + +// Fill buffer with QMI CTL Release Client ID Request +int QMICTLReleaseClientIDReq( + void * pBuffer, + u16 buffSize, + u8 transactionID, + u16 clientID ); + +// Fill buffer with QMI CTL Get Version Info Request +int QMICTLReadyReq( + void * pBuffer, + u16 buffSize, + u8 transactionID ); + +// Fill buffer with QMI WDS Set Event Report Request +int QMIWDSSetEventReportReq( + void * pBuffer, + u16 buffSize, + u16 transactionID ); + +// Fill buffer with QMI WDS Get PKG SRVC Status Request +int QMIWDSGetPKGSRVCStatusReq( + void * pBuffer, + u16 buffSize, + u16 transactionID ); + +u16 QMIWDSSetQMUXBindMuxDataPortReq( + void * pBuffer, + u16 buffSize, + u8 MuxId, + u16 transactionID ); + +// Fill buffer with QMI DMS Get Serial Numbers Request +int QMIDMSGetMEIDReq( + void * pBuffer, + u16 buffSize, + u16 transactionID ); + +// Fill buffer with QMI WDA Set Data Format Request +int QMIWDASetDataFormatReq( + void * pBuffer, + u16 buffSize, + bool bRawIPMode, int qmap_mode, u32 rx_size, + u16 transactionID ); + +#if 0 +int QMIWDASetDataQmapReq( + void * pBuffer, + u16 buffSize, + u16 transactionID ); +#endif + +int QMICTLSyncReq( + void * pBuffer, + u16 buffSize, + u16 transactionID ); + +/*=========================================================================*/ +// Parse data from QMI responses +/*=========================================================================*/ + +// Parse the QMI CTL Get Client ID Resp +int QMICTLGetClientIDResp( + void * pBuffer, + u16 buffSize, + u16 * pClientID ); + +// Verify the QMI CTL Release Client ID Resp is valid +int QMICTLReleaseClientIDResp( + void * pBuffer, + u16 buffSize ); + +// Parse the QMI WDS Set Event Report Resp/Indication or +// QMI WDS Get PKG SRVC Status Resp/Indication +int QMIWDSEventResp( + void * pBuffer, + u16 buffSize, + u32 * pTXOk, + u32 * pRXOk, + u32 * pTXErr, + u32 * pRXErr, + u32 * pTXOfl, + u32 * pRXOfl, + u64 * pTXBytesOk, + u64 * pRXBytesOk, + bool * pbLinkState, + bool * pbReconfigure ); + +// Parse the QMI DMS Get Serial Numbers Resp +int QMIDMSGetMEIDResp( + void * pBuffer, + u16 buffSize, + char * pMEID, + int meidSize ); + +// Parse the QMI DMS Get Serial Numbers Resp +int QMIWDASetDataFormatResp( + void * pBuffer, + u16 buffSize, bool bRawIPMode, int *qmap_enabled, int *rx_size, int *tx_size); + +// Pasre the QMI CTL Sync Response +int QMICTLSyncResp( + void *pBuffer, + u16 buffSize ); +#endif diff --git a/package/wwan/driver/quectel_Gobinet/src/QMIDevice.c b/package/wwan/driver/quectel_Gobinet/src/QMIDevice.c new file mode 100644 index 000000000..5d907f50a --- /dev/null +++ b/package/wwan/driver/quectel_Gobinet/src/QMIDevice.c @@ -0,0 +1,4363 @@ +/*=========================================================================== +FILE: + QMIDevice.c + +DESCRIPTION: + Functions related to the QMI interface device + +FUNCTIONS: + Generic functions + IsDeviceValid + PrintHex + GobiSetDownReason + GobiClearDownReason + GobiTestDownReason + + Driver level asynchronous read functions + ResubmitIntURB + ReadCallback + IntCallback + StartRead + KillRead + + Internal read/write functions + ReadAsync + UpSem + ReadSync + WriteSyncCallback + WriteSync + + Internal memory management functions + GetClientID + ReleaseClientID + FindClientMem + AddToReadMemList + PopFromReadMemList + AddToNotifyList + NotifyAndPopNotifyList + AddToURBList + PopFromURBList + + Internal userspace wrapper functions + UserspaceunlockedIOCTL + + Userspace wrappers + UserspaceOpen + UserspaceIOCTL + UserspaceClose + UserspaceRead + UserspaceWrite + UserspacePoll + + Initializer and destructor + RegisterQMIDevice + DeregisterQMIDevice + + Driver level client management + QMIReady + QMIWDSCallback + SetupQMIWDSCallback + QMIDMSGetMEID + +Copyright (c) 2011, Code Aurora Forum. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Code Aurora Forum nor + the names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. + + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. +===========================================================================*/ + +//--------------------------------------------------------------------------- +// Include Files +//--------------------------------------------------------------------------- +#include +#include +#include +#include + +//----------------------------------------------------------------------------- +// Definitions +//----------------------------------------------------------------------------- + +#define __QUEC_INCLUDE_QMI_C__ +#include "QMI.c" +#define __QUECTEL_INTER__ +#include "QMIDevice.h" + +#if (LINUX_VERSION_CODE <= KERNEL_VERSION( 2,6,22 )) +static int s_interval; +#endif + +#if (LINUX_VERSION_CODE <= KERNEL_VERSION( 2,6,14 )) +#include +static char devfs_name[32]; +static int device_create(struct class *class, struct device *parent, dev_t devt, const char *fmt, ...) +{ + va_list vargs; + struct class_device *class_dev; + int err; + + va_start(vargs, fmt); + vsnprintf(devfs_name, sizeof(devfs_name), fmt, vargs); + va_end(vargs); + + class_dev = class_device_create(class, devt, parent, "%s", devfs_name); + if (IS_ERR(class_dev)) { + err = PTR_ERR(class_dev); + goto out; + } + + err = devfs_mk_cdev(devt, S_IFCHR|S_IRUSR|S_IWUSR|S_IRGRP, devfs_name); + if (err) { + class_device_destroy(class, devt); + goto out; + } + + return 0; + +out: + return err; +} + +static void device_destroy(struct class *class, dev_t devt) +{ + class_device_destroy(class, devt); + devfs_remove(devfs_name); +} +#endif + +#ifdef CONFIG_PM +// Prototype to GobiNetSuspend function +int QuecGobiNetSuspend( + struct usb_interface * pIntf, + pm_message_t powerEvent ); +#endif /* CONFIG_PM */ + +// IOCTL to generate a client ID for this service type +#define IOCTL_QMI_GET_SERVICE_FILE 0x8BE0 + 1 + +// IOCTL to get the VIDPID of the device +#define IOCTL_QMI_GET_DEVICE_VIDPID 0x8BE0 + 2 + +// IOCTL to get the MEID of the device +#define IOCTL_QMI_GET_DEVICE_MEID 0x8BE0 + 3 + +#define IOCTL_QMI_RELEASE_SERVICE_FILE_IOCTL (0x8BE0 + 4) + +// CDC GET_ENCAPSULATED_RESPONSE packet +#define CDC_GET_ENCAPSULATED_RESPONSE_LE 0x01A1ll +#define CDC_GET_ENCAPSULATED_RESPONSE_BE 0xA101000000000000ll +/* The following masks filter the common part of the encapsulated response + * packet value for Gobi and QMI devices, ie. ignore usb interface number + */ +#define CDC_RSP_MASK_BE 0xFFFFFFFF00FFFFFFll +#define CDC_RSP_MASK_LE 0xFFFFFFE0FFFFFFFFll + +static const int i = 1; +#define is_bigendian() ( (*(char*)&i) == 0 ) +#define CDC_GET_ENCAPSULATED_RESPONSE(pcdcrsp, pmask)\ +{\ + *pcdcrsp = is_bigendian() ? CDC_GET_ENCAPSULATED_RESPONSE_BE \ + : CDC_GET_ENCAPSULATED_RESPONSE_LE ; \ + *pmask = is_bigendian() ? CDC_RSP_MASK_BE \ + : CDC_RSP_MASK_LE; \ +} + +// CDC CONNECTION_SPEED_CHANGE indication packet +#define CDC_CONNECTION_SPEED_CHANGE_LE 0x2AA1ll +#define CDC_CONNECTION_SPEED_CHANGE_BE 0xA12A000000000000ll +/* The following masks filter the common part of the connection speed change + * packet value for Gobi and QMI devices + */ +#define CDC_CONNSPD_MASK_BE 0xFFFFFFFFFFFF7FFFll +#define CDC_CONNSPD_MASK_LE 0XFFF7FFFFFFFFFFFFll +#define CDC_GET_CONNECTION_SPEED_CHANGE(pcdccscp, pmask)\ +{\ + *pcdccscp = is_bigendian() ? CDC_CONNECTION_SPEED_CHANGE_BE \ + : CDC_CONNECTION_SPEED_CHANGE_LE ; \ + *pmask = is_bigendian() ? CDC_CONNSPD_MASK_BE \ + : CDC_CONNSPD_MASK_LE; \ +} + +#define SET_CONTROL_LINE_STATE_REQUEST_TYPE 0x21 +#define SET_CONTROL_LINE_STATE_REQUEST 0x22 +#define CONTROL_DTR 0x01 +#define CONTROL_RTS 0x02 + +/*=========================================================================*/ +// UserspaceQMIFops +// QMI device's userspace file operations +/*=========================================================================*/ +static struct file_operations UserspaceQMIFops = +{ + .owner = THIS_MODULE, + .read = UserspaceRead, + .write = UserspaceWrite, +#ifdef CONFIG_COMPAT + .compat_ioctl = UserspaceunlockedIOCTL, +#endif +#if (LINUX_VERSION_CODE >= KERNEL_VERSION( 2,6,36 )) + .unlocked_ioctl = UserspaceunlockedIOCTL, +#else + .ioctl = UserspaceIOCTL, +#endif + .open = UserspaceOpen, +#ifdef quectel_no_for_each_process + .release = UserspaceClose, +#else + .flush = UserspaceClose, +#endif + .poll = UserspacePoll, +}; + +/*=========================================================================*/ +// Generic functions +/*=========================================================================*/ +static u8 QMIXactionIDGet( sGobiUSBNet *pDev) +{ + u8 transactionID; + + if( 0 == (transactionID = atomic_add_return( 1, &pDev->mQMIDev.mQMICTLTransactionID)) ) + { + transactionID = atomic_add_return( 1, &pDev->mQMIDev.mQMICTLTransactionID ); + } + +#if 1 //free these ununsed qmi response, or when these transactionID re-used, they will be regarded as qmi response of the qmi request that have same transactionID + if (transactionID) { + unsigned long flags; + void * pReadBuffer; + u16 readBufferSize; + + spin_lock_irqsave( &pDev->mQMIDev.mClientMemLock, flags ); + while (PopFromReadMemList( pDev, + QMICTL, + transactionID, + &pReadBuffer, + &readBufferSize ) == true) + { + kfree( pReadBuffer ); + } + spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags ); + } +#endif + + return transactionID; +} + +static struct usb_endpoint_descriptor *GetEndpoint( + struct usb_interface *pintf, + int type, + int dir ) +{ + int i; + struct usb_host_interface *iface = pintf->cur_altsetting; + struct usb_endpoint_descriptor *pendp; + + for( i = 0; i < iface->desc.bNumEndpoints; i++) + { + pendp = &iface->endpoint[i].desc; + if( ((pendp->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == dir) + && + (usb_endpoint_type(pendp) == type) ) + { + return pendp; + } + } + + return NULL; +} + +/*=========================================================================== +METHOD: + IsDeviceValid (Public Method) + +DESCRIPTION: + Basic test to see if device memory is valid + +PARAMETERS: + pDev [ I ] - Device specific memory + +RETURN VALUE: + bool +===========================================================================*/ +static bool IsDeviceValid( sGobiUSBNet * pDev ) +{ + if (pDev == NULL) + { + return false; + } + + if (pDev->mbQMIValid == false) + { + return false; + } + + return true; +} + +/*=========================================================================== +METHOD: + PrintHex (Public Method) + +DESCRIPTION: + Print Hex data, for debug purposes + +PARAMETERS: + pBuffer [ I ] - Data buffer + bufSize [ I ] - Size of data buffer + +RETURN VALUE: + None +===========================================================================*/ +void QuecPrintHex( + void * pBuffer, + u16 bufSize ) +{ + char * pPrintBuf; + u16 pos; + int status; + + if (quec_debug != 1) + { + return; + } + + pPrintBuf = kmalloc( bufSize * 3 + 1, GFP_ATOMIC ); + if (pPrintBuf == NULL) + { + DBG( "Unable to allocate buffer\n" ); + return; + } + memset( pPrintBuf, 0 , bufSize * 3 + 1 ); + + for (pos = 0; pos < bufSize; pos++) + { + status = snprintf( (pPrintBuf + (pos * 3)), + 4, + "%02X ", + *(u8 *)(pBuffer + pos) ); + if (status != 3) + { + DBG( "snprintf error %d\n", status ); + kfree( pPrintBuf ); + return; + } + } + + DBG( " : %s\n", pPrintBuf ); + + kfree( pPrintBuf ); + pPrintBuf = NULL; + return; +} + +/*=========================================================================== +METHOD: + GobiSetDownReason (Public Method) + +DESCRIPTION: + Sets mDownReason and turns carrier off + +PARAMETERS + pDev [ I ] - Device specific memory + reason [ I ] - Reason device is down + +RETURN VALUE: + None +===========================================================================*/ +void QuecGobiSetDownReason( + sGobiUSBNet * pDev, + u8 reason ) +{ + DBG("%s reason=%d, mDownReason=%x\n", __func__, reason, (unsigned)pDev->mDownReason); + +#ifdef QUECTEL_WWAN_QMAP + if (reason == NO_NDIS_CONNECTION) + return; +#endif + + set_bit( reason, &pDev->mDownReason ); + + netif_carrier_off( pDev->mpNetDev->net ); +} + +/*=========================================================================== +METHOD: + GobiClearDownReason (Public Method) + +DESCRIPTION: + Clear mDownReason and may turn carrier on + +PARAMETERS + pDev [ I ] - Device specific memory + reason [ I ] - Reason device is no longer down + +RETURN VALUE: + None +===========================================================================*/ +void QuecGobiClearDownReason( + sGobiUSBNet * pDev, + u8 reason ) +{ + clear_bit( reason, &pDev->mDownReason ); + + DBG("%s reason=%d, mDownReason=%x\n", __func__, reason, (unsigned)pDev->mDownReason); +#if 0 //(LINUX_VERSION_CODE >= KERNEL_VERSION( 3,11,0 )) + netif_carrier_on( pDev->mpNetDev->net ); +#else + if (pDev->mDownReason == 0) + { +#ifdef QUECTEL_WWAN_QMAP + if (pDev->qmap_mode && !pDev->link_state) + ; + else +#endif + netif_carrier_on( pDev->mpNetDev->net ); + } +#endif +} + +/*=========================================================================== +METHOD: + GobiTestDownReason (Public Method) + +DESCRIPTION: + Test mDownReason and returns whether reason is set + +PARAMETERS + pDev [ I ] - Device specific memory + reason [ I ] - Reason device is down + +RETURN VALUE: + bool +===========================================================================*/ +bool QuecGobiTestDownReason( + sGobiUSBNet * pDev, + u8 reason ) +{ + return test_bit( reason, &pDev->mDownReason ); +} + +/*=========================================================================*/ +// Driver level asynchronous read functions +/*=========================================================================*/ + +/*=========================================================================== +METHOD: + ResubmitIntURB (Public Method) + +DESCRIPTION: + Resubmit interrupt URB, re-using same values + +PARAMETERS + pIntURB [ I ] - Interrupt URB + +RETURN VALUE: + int - 0 for success + negative errno for failure +===========================================================================*/ +static int ResubmitIntURB( struct urb * pIntURB ) +{ + int status; + int interval; + + // Sanity test + if ( (pIntURB == NULL) + || (pIntURB->dev == NULL) ) + { + return -EINVAL; + } + + // Interval needs reset after every URB completion +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,22 )) + interval = max((int)(pIntURB->ep->desc.bInterval), + (pIntURB->dev->speed == USB_SPEED_HIGH) ? 7 : 3); +#else + interval = s_interval; +#endif + + // Reschedule interrupt URB + usb_fill_int_urb( pIntURB, + pIntURB->dev, + pIntURB->pipe, + pIntURB->transfer_buffer, + pIntURB->transfer_buffer_length, + pIntURB->complete, + pIntURB->context, + interval ); + status = usb_submit_urb( pIntURB, GFP_ATOMIC ); + if (status != 0) + { + DBG( "Error re-submitting Int URB %d\n", status ); + } + + return status; +} + + +#ifdef QUECTEL_QMI_MERGE +static int MergeRecQmiMsg( sQMIDev * pQMIDev, struct urb * pReadURB ) +{ + sQMIMsgHeader * mHeader; + sQMIMsgPacket * mPacket; + + DBG( "%s called \n", __func__ ); + mPacket = pQMIDev->mpQmiMsgPacket; + + if(pReadURB->actual_length < sizeof(sQMIMsgHeader)) + { + return -1; + } + + mHeader = (sQMIMsgHeader *)pReadURB->transfer_buffer; + if(le16_to_cpu(mHeader->idenity) != MERGE_PACKET_IDENTITY || le16_to_cpu(mHeader->version) != MERGE_PACKET_VERSION || le16_to_cpu(mHeader->cur_len) > le16_to_cpu(mHeader->total_len)) + return -1; + + if(le16_to_cpu(mHeader->cur_len) == le16_to_cpu(mHeader->total_len)) { + mPacket->len = le16_to_cpu(mHeader->total_len); + memcpy(pReadURB->transfer_buffer, pReadURB->transfer_buffer + sizeof(sQMIMsgHeader), mPacket->len); + pReadURB->actual_length = mPacket->len; + mPacket->len = 0; + + return 0; + } + + memcpy(mPacket->buf + mPacket->len, pReadURB->transfer_buffer + sizeof(sQMIMsgHeader), le16_to_cpu(mHeader->cur_len)); + mPacket->len += le16_to_cpu(mHeader->cur_len); + + if (le16_to_cpu(mHeader->cur_len) < MERGE_PACKET_MAX_PAYLOAD_SIZE || mPacket->len >= le16_to_cpu(mHeader->total_len)) { + memcpy(pReadURB->transfer_buffer, mPacket->buf, mPacket->len); + pReadURB->actual_length = mPacket->len; + mPacket->len = 0; + return 0; + } + + return -1; +} +#endif + +/*=========================================================================== +METHOD: + ReadCallback (Public Method) + +DESCRIPTION: + Put the data in storage and notify anyone waiting for data + +PARAMETERS + pReadURB [ I ] - URB this callback is run for + +RETURN VALUE: + None +===========================================================================*/ +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,18 )) +static void ReadCallback( struct urb * pReadURB ) +#else +static void ReadCallback(struct urb *pReadURB, struct pt_regs *regs) +#endif +{ + int result; + u16 clientID; + sClientMemList * pClientMem; + void * pData; + void * pDataCopy; + u16 dataSize; + sGobiUSBNet * pDev; + unsigned long flags; + u16 transactionID; + + if (pReadURB == NULL) + { + DBG( "bad read URB\n" ); + return; + } + + pDev = pReadURB->context; + if (IsDeviceValid( pDev ) == false) + { + DBG( "Invalid device!\n" ); + return; + } + +#ifdef READ_QMI_URB_ERROR + del_timer(&pDev->mQMIDev.mReadUrbTimer); + if ((pReadURB->status == -ECONNRESET) && (pReadURB->actual_length > 0)) + pReadURB->status = 0; +#endif + + if (pReadURB->status != 0) + { + DBG( "Read status = %d\n", pReadURB->status ); + + // Resubmit the interrupt URB + ResubmitIntURB( pDev->mQMIDev.mpIntURB ); + + return; + } + DBG( "Read %d bytes\n", pReadURB->actual_length ); + +#ifdef QUECTEL_QMI_MERGE + if(MergeRecQmiMsg(&pDev->mQMIDev, pReadURB)) + { + DBG( "not a full packet, read again\n"); + // Resubmit the interrupt URB + ResubmitIntURB( pDev->mQMIDev.mpIntURB ); + return; + } +#endif + + pData = pReadURB->transfer_buffer; + dataSize = pReadURB->actual_length; + + PrintHex( pData, dataSize ); + +#ifdef READ_QMI_URB_ERROR + if (dataSize < (le16_to_cpu(get_unaligned((u16*)(pData + 1))) + 1)) { + dataSize = (le16_to_cpu(get_unaligned((u16*)(pData + 1))) + 1); + memset(pReadURB->transfer_buffer + pReadURB->actual_length, 0x00, dataSize - pReadURB->actual_length); + INFO( "Read %d / %d bytes\n", pReadURB->actual_length, dataSize); + } +#endif + + result = ParseQMUX( &clientID, + pData, + dataSize ); + if (result < 0) + { + DBG( "Read error parsing QMUX %d\n", result ); + + // Resubmit the interrupt URB + ResubmitIntURB( pDev->mQMIDev.mpIntURB ); + + return; + } + + // Grab transaction ID + + // Data large enough? + if (dataSize < result + 3) + { + DBG( "Data buffer too small to parse\n" ); + + // Resubmit the interrupt URB + ResubmitIntURB( pDev->mQMIDev.mpIntURB ); + + return; + } + + // Transaction ID size is 1 for QMICTL, 2 for others + if (clientID == QMICTL) + { + transactionID = *(u8*)(pData + result + 1); + } + else + { + transactionID = le16_to_cpu( get_unaligned((u16*)(pData + result + 1)) ); + } + + // Critical section + spin_lock_irqsave( &pDev->mQMIDev.mClientMemLock, flags ); + + // Find memory storage for this service and Client ID + // Not using FindClientMem because it can't handle broadcasts + pClientMem = pDev->mQMIDev.mpClientMemList; + + while (pClientMem != NULL) + { + if (pClientMem->mClientID == clientID + || (pClientMem->mClientID | 0xff00) == clientID) + { + // Make copy of pData + pDataCopy = kmalloc( dataSize, GFP_ATOMIC ); + if (pDataCopy == NULL) + { + DBG( "Error allocating client data memory\n" ); + + // End critical section + spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags ); + + // Resubmit the interrupt URB + ResubmitIntURB( pDev->mQMIDev.mpIntURB ); + + return; + } + + memcpy( pDataCopy, pData, dataSize ); + + if (AddToReadMemList( pDev, + pClientMem->mClientID, + transactionID, + pDataCopy, + dataSize ) == false) + { + DBG( "Error allocating pReadMemListEntry " + "read will be discarded\n" ); + kfree( pDataCopy ); + + // End critical section + spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags ); + + // Resubmit the interrupt URB + ResubmitIntURB( pDev->mQMIDev.mpIntURB ); + + return; + } + + // Success + VDBG( "Creating new readListEntry for client 0x%04X, TID %x\n", + clientID, + transactionID ); + + // Notify this client data exists + NotifyAndPopNotifyList( pDev, + pClientMem->mClientID, + transactionID ); + + // Possibly notify poll() that data exists + wake_up_interruptible_sync( &pClientMem->mWaitQueue ); + + // Not a broadcast + if (clientID >> 8 != 0xff) + { + break; + } + } + + // Next element + pClientMem = pClientMem->mpNext; + } + + // End critical section + spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags ); + + // Resubmit the interrupt URB + ResubmitIntURB( pDev->mQMIDev.mpIntURB ); +} + +/*=========================================================================== +METHOD: + IntCallback (Public Method) + +DESCRIPTION: + Data is available, fire off a read URB + +PARAMETERS + pIntURB [ I ] - URB this callback is run for + +RETURN VALUE: + None +===========================================================================*/ +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,18 )) +static void IntCallback( struct urb * pIntURB ) +{ +#else +static void IntCallback(struct urb *pIntURB, struct pt_regs *regs) +{ +#endif + int status; + struct usb_cdc_notification *dr; + + sGobiUSBNet * pDev = (sGobiUSBNet *)pIntURB->context; + dr = (struct usb_cdc_notification *)pDev->mQMIDev.mpIntBuffer; + + if (IsDeviceValid( pDev ) == false) + { + DBG( "Invalid device!\n" ); + return; + } + + // Verify this was a normal interrupt + if (pIntURB->status != 0) + { + DBG( "IntCallback: Int status = %d\n", pIntURB->status ); + + // Ignore EOVERFLOW errors + if (pIntURB->status != -EOVERFLOW) + { + // Read 'thread' dies here + return; + } + } + else + { + //TODO cast transfer_buffer to struct usb_cdc_notification + + VDBG( "IntCallback: Encapsulated Response = 0x%llx\n", + (*(u64*)pIntURB->transfer_buffer)); + + switch (dr->bNotificationType) { + case USB_CDC_NOTIFY_RESPONSE_AVAILABLE: //0x01 + { + // Time to read + usb_fill_control_urb( pDev->mQMIDev.mpReadURB, + pDev->mpNetDev->udev, + usb_rcvctrlpipe( pDev->mpNetDev->udev, 0 ), + (unsigned char *)pDev->mQMIDev.mpReadSetupPacket, + pDev->mQMIDev.mpReadBuffer, + DEFAULT_READ_URB_LENGTH, + ReadCallback, + pDev ); + #ifdef READ_QMI_URB_ERROR + mod_timer( &pDev->mQMIDev.mReadUrbTimer, jiffies + msecs_to_jiffies(300) ); + #endif + status = usb_submit_urb( pDev->mQMIDev.mpReadURB, GFP_ATOMIC ); + if (status != 0) + { + DBG("Error submitting Read URB %d\n", status); + // Resubmit the interrupt urb + ResubmitIntURB(pIntURB); + return; + } + + // Int URB will be resubmitted during ReadCallback + return; + } + case USB_CDC_NOTIFY_SPEED_CHANGE: //0x2a + { + DBG( "IntCallback: Connection Speed Change = 0x%llx\n", + (*(u64*)pIntURB->transfer_buffer)); + + // if upstream or downstream is 0, stop traffic. Otherwise resume it + if ((*(u32*)(pIntURB->transfer_buffer + 8) == 0) + || (*(u32*)(pIntURB->transfer_buffer + 12) == 0)) + { + GobiSetDownReason( pDev, CDC_CONNECTION_SPEED ); + DBG( "traffic stopping due to CONNECTION_SPEED_CHANGE\n" ); + } + else + { + GobiClearDownReason( pDev, CDC_CONNECTION_SPEED ); + DBG( "resuming traffic due to CONNECTION_SPEED_CHANGE\n" ); + } + } + break; + default: + { + DBG( "ignoring invalid interrupt in packet\n" ); + PrintHex( pIntURB->transfer_buffer, pIntURB->actual_length ); + } + } + + // Resubmit the interrupt urb + ResubmitIntURB( pIntURB ); + + return; + } +} + +#ifdef READ_QMI_URB_ERROR +static void ReadUrbTimerFunc( struct urb * pReadURB ) +{ + int result; + + INFO( "%s called (%ld).\n", __func__, jiffies ); + + if ((pReadURB != NULL) && (pReadURB->status == -EINPROGRESS)) + { + // Asynchronously unlink URB. On success, -EINPROGRESS will be returned, + // URB status will be set to -ECONNRESET, and ReadCallback() executed + result = usb_unlink_urb( pReadURB ); + INFO( "%s called usb_unlink_urb, result = %d\n", __func__, result); + } +} +#endif + +/*=========================================================================== +METHOD: + StartRead (Public Method) + +DESCRIPTION: + Start continuous read "thread" (callback driven) + + Note: In case of error, KillRead() should be run + to remove urbs and clean up memory. + +PARAMETERS: + pDev [ I ] - Device specific memory + +RETURN VALUE: + int - 0 for success + negative errno for failure +===========================================================================*/ +int QuecStartRead( sGobiUSBNet * pDev ) +{ + int interval; + struct usb_endpoint_descriptor *pendp; + + if (IsDeviceValid( pDev ) == false) + { + DBG( "Invalid device!\n" ); + return -ENXIO; + } + + // Allocate URB buffers + pDev->mQMIDev.mpReadURB = usb_alloc_urb( 0, GFP_KERNEL ); + if (pDev->mQMIDev.mpReadURB == NULL) + { + DBG( "Error allocating read urb\n" ); + return -ENOMEM; + } + +#ifdef READ_QMI_URB_ERROR + setup_timer( &pDev->mQMIDev.mReadUrbTimer, (void*)ReadUrbTimerFunc, (unsigned long)pDev->mQMIDev.mpReadURB ); +#endif + + pDev->mQMIDev.mpIntURB = usb_alloc_urb( 0, GFP_KERNEL ); + if (pDev->mQMIDev.mpIntURB == NULL) + { + DBG( "Error allocating int urb\n" ); + usb_free_urb( pDev->mQMIDev.mpReadURB ); + pDev->mQMIDev.mpReadURB = NULL; + return -ENOMEM; + } + + // Create data buffers + pDev->mQMIDev.mpReadBuffer = kmalloc( DEFAULT_READ_URB_LENGTH, GFP_KERNEL ); + if (pDev->mQMIDev.mpReadBuffer == NULL) + { + DBG( "Error allocating read buffer\n" ); + usb_free_urb( pDev->mQMIDev.mpIntURB ); + pDev->mQMIDev.mpIntURB = NULL; + usb_free_urb( pDev->mQMIDev.mpReadURB ); + pDev->mQMIDev.mpReadURB = NULL; + return -ENOMEM; + } + + pDev->mQMIDev.mpIntBuffer = kmalloc( 64, GFP_KERNEL ); + if (pDev->mQMIDev.mpIntBuffer == NULL) + { + DBG( "Error allocating int buffer\n" ); + kfree( pDev->mQMIDev.mpReadBuffer ); + pDev->mQMIDev.mpReadBuffer = NULL; + usb_free_urb( pDev->mQMIDev.mpIntURB ); + pDev->mQMIDev.mpIntURB = NULL; + usb_free_urb( pDev->mQMIDev.mpReadURB ); + pDev->mQMIDev.mpReadURB = NULL; + return -ENOMEM; + } + + pDev->mQMIDev.mpReadSetupPacket = kmalloc( sizeof( sURBSetupPacket ), + GFP_KERNEL ); + if (pDev->mQMIDev.mpReadSetupPacket == NULL) + { + DBG( "Error allocating setup packet buffer\n" ); + kfree( pDev->mQMIDev.mpIntBuffer ); + pDev->mQMIDev.mpIntBuffer = NULL; + kfree( pDev->mQMIDev.mpReadBuffer ); + pDev->mQMIDev.mpReadBuffer = NULL; + usb_free_urb( pDev->mQMIDev.mpIntURB ); + pDev->mQMIDev.mpIntURB = NULL; + usb_free_urb( pDev->mQMIDev.mpReadURB ); + pDev->mQMIDev.mpReadURB = NULL; + return -ENOMEM; + } + + // CDC Get Encapsulated Response packet + pDev->mQMIDev.mpReadSetupPacket->mRequestType = 0xA1; + pDev->mQMIDev.mpReadSetupPacket->mRequestCode = 1; + pDev->mQMIDev.mpReadSetupPacket->mValue = 0; + pDev->mQMIDev.mpReadSetupPacket->mIndex = + cpu_to_le16(pDev->mpIntf->cur_altsetting->desc.bInterfaceNumber); /* interface number */ + pDev->mQMIDev.mpReadSetupPacket->mLength = cpu_to_le16(DEFAULT_READ_URB_LENGTH); + + pendp = GetEndpoint(pDev->mpIntf, USB_ENDPOINT_XFER_INT, USB_DIR_IN); + if (pendp == NULL) + { + DBG( "Invalid interrupt endpoint!\n" ); + kfree(pDev->mQMIDev.mpReadSetupPacket); + pDev->mQMIDev.mpReadSetupPacket = NULL; + kfree( pDev->mQMIDev.mpIntBuffer ); + pDev->mQMIDev.mpIntBuffer = NULL; + kfree( pDev->mQMIDev.mpReadBuffer ); + pDev->mQMIDev.mpReadBuffer = NULL; + usb_free_urb( pDev->mQMIDev.mpIntURB ); + pDev->mQMIDev.mpIntURB = NULL; + usb_free_urb( pDev->mQMIDev.mpReadURB ); + pDev->mQMIDev.mpReadURB = NULL; + return -ENXIO; + } + +#ifdef QUECTEL_QMI_MERGE + pDev->mQMIDev.mpQmiMsgPacket = kmalloc( sizeof(sQMIMsgPacket), GFP_KERNEL ); + if (pDev->mQMIDev.mpQmiMsgPacket == NULL) + { + DBG( "Error allocating qmi msg merge packet buffer!\n" ); + kfree(pDev->mQMIDev.mpReadSetupPacket); + pDev->mQMIDev.mpReadSetupPacket = NULL; + kfree( pDev->mQMIDev.mpIntBuffer ); + pDev->mQMIDev.mpIntBuffer = NULL; + kfree( pDev->mQMIDev.mpReadBuffer ); + pDev->mQMIDev.mpReadBuffer = NULL; + usb_free_urb( pDev->mQMIDev.mpIntURB ); + pDev->mQMIDev.mpIntURB = NULL; + usb_free_urb( pDev->mQMIDev.mpReadURB ); + pDev->mQMIDev.mpReadURB = NULL; + return -ENOMEM; + } +#endif + + // Interval needs reset after every URB completion + interval = max((int)(pendp->bInterval), + (pDev->mpNetDev->udev->speed == USB_SPEED_HIGH) ? 7 : 3); +#if (LINUX_VERSION_CODE <= KERNEL_VERSION( 2,6,22 )) + s_interval = interval; +#endif + + // Schedule interrupt URB + usb_fill_int_urb( pDev->mQMIDev.mpIntURB, + pDev->mpNetDev->udev, + /* QMI interrupt endpoint for the following + * interface configuration: DM, NMEA, MDM, NET + */ + usb_rcvintpipe( pDev->mpNetDev->udev, + pendp->bEndpointAddress), + pDev->mQMIDev.mpIntBuffer, + min((int)le16_to_cpu(pendp->wMaxPacketSize), 64), + IntCallback, + pDev, + interval ); + return usb_submit_urb( pDev->mQMIDev.mpIntURB, GFP_KERNEL ); +} + +/*=========================================================================== +METHOD: + KillRead (Public Method) + +DESCRIPTION: + Kill continuous read "thread" + +PARAMETERS: + pDev [ I ] - Device specific memory + +RETURN VALUE: + None +===========================================================================*/ +void QuecKillRead( sGobiUSBNet * pDev ) +{ + // Stop reading + if (pDev->mQMIDev.mpReadURB != NULL) + { + DBG( "Killng read URB\n" ); + usb_kill_urb( pDev->mQMIDev.mpReadURB ); + } + + if (pDev->mQMIDev.mpIntURB != NULL) + { + DBG( "Killng int URB\n" ); + usb_kill_urb( pDev->mQMIDev.mpIntURB ); + } + + // Release buffers + kfree( pDev->mQMIDev.mpReadSetupPacket ); + pDev->mQMIDev.mpReadSetupPacket = NULL; + kfree( pDev->mQMIDev.mpReadBuffer ); + pDev->mQMIDev.mpReadBuffer = NULL; + kfree( pDev->mQMIDev.mpIntBuffer ); + pDev->mQMIDev.mpIntBuffer = NULL; + + // Release URB's + usb_free_urb( pDev->mQMIDev.mpReadURB ); + pDev->mQMIDev.mpReadURB = NULL; + usb_free_urb( pDev->mQMIDev.mpIntURB ); + pDev->mQMIDev.mpIntURB = NULL; + +#ifdef QUECTEL_QMI_MERGE + kfree( pDev->mQMIDev.mpQmiMsgPacket ); + pDev->mQMIDev.mpQmiMsgPacket = NULL; +#endif +} + +/*=========================================================================*/ +// Internal read/write functions +/*=========================================================================*/ + +/*=========================================================================== +METHOD: + ReadAsync (Public Method) + +DESCRIPTION: + Start asynchronous read + NOTE: Reading client's data store, not device + +PARAMETERS: + pDev [ I ] - Device specific memory + clientID [ I ] - Requester's client ID + transactionID [ I ] - Transaction ID or 0 for any + pCallback [ I ] - Callback to be executed when data is available + pData [ I ] - Data buffer that willl be passed (unmodified) + to callback + +RETURN VALUE: + int - 0 for success + negative errno for failure +===========================================================================*/ +static int ReadAsync( + sGobiUSBNet * pDev, + u16 clientID, + u16 transactionID, + void (*pCallback)(sGobiUSBNet*, u16, void *), + void * pData ) +{ + sClientMemList * pClientMem; + sReadMemList ** ppReadMemList; + + unsigned long flags; + + if (IsDeviceValid( pDev ) == false) + { + DBG( "Invalid device!\n" ); + return -ENXIO; + } + + // Critical section + spin_lock_irqsave( &pDev->mQMIDev.mClientMemLock, flags ); + + // Find memory storage for this client ID + pClientMem = FindClientMem( pDev, clientID ); + if (pClientMem == NULL) + { + DBG( "Could not find matching client ID 0x%04X\n", + clientID ); + + // End critical section + spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags ); + return -ENXIO; + } + + ppReadMemList = &(pClientMem->mpList); + + // Does data already exist? + while (*ppReadMemList != NULL) + { + // Is this element our data? + if (transactionID == 0 + || transactionID == (*ppReadMemList)->mTransactionID) + { + // End critical section + spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags ); + + // Run our own callback + pCallback( pDev, clientID, pData ); + + return 0; + } + + // Next + ppReadMemList = &(*ppReadMemList)->mpNext; + } + + // Data not found, add ourself to list of waiters + if (AddToNotifyList( pDev, + clientID, + transactionID, + pCallback, + pData ) == false) + { + DBG( "Unable to register for notification\n" ); + } + + // End critical section + spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags ); + + // Success + return 0; +} + +/*=========================================================================== +METHOD: + UpSem (Public Method) + +DESCRIPTION: + Notification function for synchronous read + +PARAMETERS: + pDev [ I ] - Device specific memory + clientID [ I ] - Requester's client ID + pData [ I ] - Buffer that holds semaphore to be up()-ed + +RETURN VALUE: + None +===========================================================================*/ +#define QUEC_SEM_MAGIC 0x12345678 +struct QuecSem { + struct semaphore readSem; + int magic; +}; + +static void UpSem( + sGobiUSBNet * pDev, + u16 clientID, + void * pData ) +{ + struct QuecSem *pSem = (struct QuecSem *)pData; + + VDBG( "0x%04X\n", clientID ); + + if (pSem->magic == QUEC_SEM_MAGIC) + up( &(pSem->readSem) ); + else + kfree(pSem); + return; +} + +/*=========================================================================== +METHOD: + ReadSync (Public Method) + +DESCRIPTION: + Start synchronous read + NOTE: Reading client's data store, not device + +PARAMETERS: + pDev [ I ] - Device specific memory + ppOutBuffer [I/O] - On success, will be filled with a + pointer to read buffer + clientID [ I ] - Requester's client ID + transactionID [ I ] - Transaction ID or 0 for any + +RETURN VALUE: + int - size of data read for success + negative errno for failure +===========================================================================*/ +static int ReadSync( + sGobiUSBNet * pDev, + void ** ppOutBuffer, + u16 clientID, + u16 transactionID ) +{ + int result; + sClientMemList * pClientMem; + sNotifyList ** ppNotifyList, * pDelNotifyListEntry; + struct QuecSem readSem; + void * pData; + unsigned long flags; + u16 dataSize; + + if (IsDeviceValid( pDev ) == false) + { + DBG( "Invalid device!\n" ); + return -ENXIO; + } + + // Critical section + spin_lock_irqsave( &pDev->mQMIDev.mClientMemLock, flags ); + + // Find memory storage for this Client ID + pClientMem = FindClientMem( pDev, clientID ); + if (pClientMem == NULL) + { + DBG( "Could not find matching client ID 0x%04X\n", + clientID ); + + // End critical section + spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags ); + return -ENXIO; + } + + // Note: in cases where read is interrupted, + // this will verify client is still valid + while (PopFromReadMemList( pDev, + clientID, + transactionID, + &pData, + &dataSize ) == false) + { + // Data does not yet exist, wait + sema_init( &readSem.readSem, 0 ); + readSem.magic = QUEC_SEM_MAGIC; + + // Add ourself to list of waiters + if (AddToNotifyList( pDev, + clientID, + transactionID, + UpSem, + &readSem ) == false) + { + DBG( "unable to register for notification\n" ); + spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags ); + return -EFAULT; + } + + // End critical section while we block + spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags ); + + // Wait for notification + result = down_interruptible( &readSem.readSem ); + //if (result) INFO("down_interruptible = %d\n", result); + if (result == -EINTR) { + result = down_timeout(&readSem.readSem, msecs_to_jiffies(200)); + //if (result) INFO("down_timeout = %d\n", result); + } + if (result != 0) + { + DBG( "Down Timeout %d\n", result ); + + // readSem will fall out of scope, + // remove from notify list so it's not referenced + spin_lock_irqsave( &pDev->mQMIDev.mClientMemLock, flags ); + ppNotifyList = &(pClientMem->mpReadNotifyList); + pDelNotifyListEntry = NULL; + + // Find and delete matching entry + while (*ppNotifyList != NULL) + { + if ((*ppNotifyList)->mpData == &readSem) + { + pDelNotifyListEntry = *ppNotifyList; + *ppNotifyList = (*ppNotifyList)->mpNext; + kfree( pDelNotifyListEntry ); + break; + } + + // Next + ppNotifyList = &(*ppNotifyList)->mpNext; + } + + spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags ); + return -EINTR; + } + + // Verify device is still valid + if (IsDeviceValid( pDev ) == false) + { + DBG( "Invalid device!\n" ); + return -ENXIO; + } + + // Restart critical section and continue loop + spin_lock_irqsave( &pDev->mQMIDev.mClientMemLock, flags ); + } + + // End Critical section + spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags ); + + // Success + *ppOutBuffer = pData; + + return dataSize; +} + +/*=========================================================================== +METHOD: + WriteSyncCallback (Public Method) + +DESCRIPTION: + Write callback + +PARAMETERS + pWriteURB [ I ] - URB this callback is run for + +RETURN VALUE: + None +===========================================================================*/ +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,18 )) +static void WriteSyncCallback( struct urb * pWriteURB ) +#else +static void WriteSyncCallback(struct urb *pWriteURB, struct pt_regs *regs) +#endif +{ + if (pWriteURB == NULL) + { + DBG( "null urb\n" ); + return; + } + + DBG( "Write status/size %d/%d\n", + pWriteURB->status, + pWriteURB->actual_length ); + + // Notify that write has completed by up()-ing semeaphore + up( (struct semaphore * )pWriteURB->context ); + + return; +} + +/*=========================================================================== +METHOD: + WriteSync (Public Method) + +DESCRIPTION: + Start synchronous write + +PARAMETERS: + pDev [ I ] - Device specific memory + pWriteBuffer [ I ] - Data to be written + writeBufferSize [ I ] - Size of data to be written + clientID [ I ] - Client ID of requester + +RETURN VALUE: + int - write size (includes QMUX) + negative errno for failure +===========================================================================*/ +static int WriteSync( + sGobiUSBNet * pDev, + char * pWriteBuffer, + int writeBufferSize, + u16 clientID ) +{ + int result; + struct semaphore writeSem; + struct urb * pWriteURB; + sURBSetupPacket *writeSetup; + unsigned long flags; + + if (IsDeviceValid( pDev ) == false) + { + DBG( "Invalid device!\n" ); + return -ENXIO; + } + + pWriteURB = usb_alloc_urb( 0, GFP_KERNEL ); + if (pWriteURB == NULL) + { + DBG( "URB mem error\n" ); + return -ENOMEM; + } + + // Fill writeBuffer with QMUX + result = FillQMUX( clientID, pWriteBuffer, writeBufferSize ); + if (result < 0) + { + usb_free_urb( pWriteURB ); + return result; + } + + // CDC Send Encapsulated Request packet + writeSetup = kmalloc(sizeof(sURBSetupPacket), GFP_KERNEL); + writeSetup->mRequestType = 0x21; + writeSetup->mRequestCode = 0; + writeSetup->mValue = 0; + writeSetup->mIndex = cpu_to_le16(pDev->mpIntf->cur_altsetting->desc.bInterfaceNumber); + writeSetup->mLength = cpu_to_le16(writeBufferSize); + + // Create URB + usb_fill_control_urb( pWriteURB, + pDev->mpNetDev->udev, + usb_sndctrlpipe( pDev->mpNetDev->udev, 0 ), + (unsigned char *)writeSetup, + (void*)pWriteBuffer, + writeBufferSize, + NULL, + pDev ); + + DBG( "Actual Write:\n" ); + PrintHex( pWriteBuffer, writeBufferSize ); + + sema_init( &writeSem, 0 ); + + pWriteURB->complete = WriteSyncCallback; + pWriteURB->context = &writeSem; + + // Wake device + result = usb_autopm_get_interface( pDev->mpIntf ); + if (result < 0) + { + DBG( "unable to resume interface: %d\n", result ); + + // Likely caused by device going from autosuspend -> full suspend + if (result == -EPERM) + { +#ifdef CONFIG_PM +#if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,33 )) +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,18 )) + pDev->mpNetDev->udev->auto_pm = 0; +#endif +#endif + QuecGobiNetSuspend( pDev->mpIntf, PMSG_SUSPEND ); +#endif /* CONFIG_PM */ + } + usb_free_urb( pWriteURB ); + kfree(writeSetup); + + return result; + } + + // Critical section + spin_lock_irqsave( &pDev->mQMIDev.mClientMemLock, flags ); + + if (AddToURBList( pDev, clientID, pWriteURB ) == false) + { + usb_free_urb( pWriteURB ); + kfree(writeSetup); + + // End critical section + spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags ); + usb_autopm_put_interface( pDev->mpIntf ); + return -EINVAL; + } + + spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags ); + result = usb_submit_urb( pWriteURB, GFP_KERNEL ); + spin_lock_irqsave( &pDev->mQMIDev.mClientMemLock, flags ); + + if (result < 0) + { + DBG( "submit URB error %d\n", result ); + + // Get URB back so we can destroy it + if (PopFromURBList( pDev, clientID ) != pWriteURB) + { + // This shouldn't happen + DBG( "Didn't get write URB back\n" ); + //advoid ReleaseClientID() free again (no PopFromURBList) + } + else + { + usb_free_urb( pWriteURB ); + kfree(writeSetup); + } + + // End critical section + spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags ); + usb_autopm_put_interface( pDev->mpIntf ); + return result; + } + + // End critical section while we block + spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags ); + + // Wait for write to finish + if (1 != 0) //(interruptible != 0) + { + // Allow user interrupts + result = down_interruptible( &writeSem ); + //if (result) INFO("down_interruptible = %d\n", result); + if (result == -EINTR) { + result = down_timeout(&writeSem, msecs_to_jiffies(200)); + //if (result) INFO("down_interruptible = %d\n", result); + } + } + else + { + // Ignore user interrupts + result = 0; + down( &writeSem ); + } + + // Write is done, release device + usb_autopm_put_interface( pDev->mpIntf ); + + // Verify device is still valid + if (IsDeviceValid( pDev ) == false) + { + DBG( "Invalid device!\n" ); + + usb_kill_urb( pWriteURB ); +#if 0 //advoid ReleaseClientID() free again (no PopFromURBList) + usb_free_urb( pWriteURB ); + kfree(writeSetup); +#endif + return -ENXIO; + } + + // Restart critical section + spin_lock_irqsave( &pDev->mQMIDev.mClientMemLock, flags ); + + // Get URB back so we can destroy it + if (PopFromURBList( pDev, clientID ) != pWriteURB) + { + // This shouldn't happen + DBG( "Didn't get write URB back\n" ); + + // End critical section + spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags ); + usb_kill_urb( pWriteURB ); +#if 0 //advoid ReleaseClientID() free again (fail PopFromURBList) + usb_free_urb( pWriteURB ); + kfree(writeSetup); +#endif + return -EINVAL; + } + + // End critical section + spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags ); + + if (result == 0) + { + // Write is finished + if (pWriteURB->status == 0) + { + // Return number of bytes that were supposed to have been written, + // not size of QMI request + result = writeBufferSize; + } + else + { + DBG( "bad status = %d\n", pWriteURB->status ); + + // Return error value + result = pWriteURB->status; + } + } + else + { + // We have been forcibly interrupted + DBG( "Interrupted %d !!!\n", result ); + DBG( "Device may be in bad state and need reset !!!\n" ); + + // URB has not finished + usb_kill_urb( pWriteURB ); + } + + usb_free_urb( pWriteURB ); + kfree(writeSetup); + + return result; +} + +/*=========================================================================*/ +// Internal memory management functions +/*=========================================================================*/ + +/*=========================================================================== +METHOD: + GetClientID (Public Method) + +DESCRIPTION: + Request a QMI client for the input service type and initialize memory + structure + +PARAMETERS: + pDev [ I ] - Device specific memory + serviceType [ I ] - Desired QMI service type + +RETURN VALUE: + int - Client ID for success (positive) + Negative errno for error +===========================================================================*/ +static int GetClientID( + sGobiUSBNet * pDev, + u8 serviceType ) +{ + u16 clientID; + sClientMemList ** ppClientMem; + int result; + void * pWriteBuffer; + u16 writeBufferSize; + void * pReadBuffer; + u16 readBufferSize; + unsigned long flags; + u8 transactionID; + + if (IsDeviceValid( pDev ) == false) + { + DBG( "Invalid device!\n" ); + return -ENXIO; + } + + // Run QMI request to be asigned a Client ID + if (serviceType != 0) + { + writeBufferSize = QMICTLGetClientIDReqSize(); + pWriteBuffer = kmalloc( writeBufferSize, GFP_KERNEL ); + if (pWriteBuffer == NULL) + { + return -ENOMEM; + } + + transactionID = QMIXactionIDGet( pDev ); + + result = QMICTLGetClientIDReq( pWriteBuffer, + writeBufferSize, + transactionID, + serviceType ); + if (result < 0) + { + kfree( pWriteBuffer ); + return result; + } + + + result = WriteSync( pDev, + pWriteBuffer, + writeBufferSize, + QMICTL ); + kfree( pWriteBuffer ); + + if (result < 0) + { + return result; + } + + result = ReadSync( pDev, + &pReadBuffer, + QMICTL, + transactionID ); + if (result < 0) + { + DBG( "bad read data %d\n", result ); + return result; + } + readBufferSize = result; + + result = QMICTLGetClientIDResp( pReadBuffer, + readBufferSize, + &clientID ); + + /* Upon return from QMICTLGetClientIDResp, clientID + * low address contains the Service Number (SN), and + * clientID high address contains Client Number (CN) + * For the ReadCallback to function correctly,we swap + * the SN and CN on a Big Endian architecture. + */ + clientID = le16_to_cpu(clientID); + + kfree( pReadBuffer ); + + if (result < 0) + { + return result; + } + } + else + { + // QMI CTL will always have client ID 0 + clientID = 0; + } + + // Critical section + spin_lock_irqsave( &pDev->mQMIDev.mClientMemLock, flags ); + + // Verify client is not already allocated + if (FindClientMem( pDev, clientID ) != NULL) + { + DBG( "Client memory already exists\n" ); + + // End Critical section + spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags ); + return -ETOOMANYREFS; + } + + // Go to last entry in client mem list + ppClientMem = &pDev->mQMIDev.mpClientMemList; + while (*ppClientMem != NULL) + { + ppClientMem = &(*ppClientMem)->mpNext; + } + + // Create locations for read to place data into + *ppClientMem = kmalloc( sizeof( sClientMemList ), GFP_ATOMIC ); + if (*ppClientMem == NULL) + { + DBG( "Error allocating read list\n" ); + + // End critical section + spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags ); + return -ENOMEM; + } + + (*ppClientMem)->mClientID = clientID; + (*ppClientMem)->mpList = NULL; + (*ppClientMem)->mpReadNotifyList = NULL; + (*ppClientMem)->mpURBList = NULL; + (*ppClientMem)->mpNext = NULL; + + // Initialize workqueue for poll() + init_waitqueue_head( &(*ppClientMem)->mWaitQueue ); + + // End Critical section + spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags ); + + return (int)( (*ppClientMem)->mClientID ); +} + +/*=========================================================================== +METHOD: + ReleaseClientID (Public Method) + +DESCRIPTION: + Release QMI client and free memory + +PARAMETERS: + pDev [ I ] - Device specific memory + clientID [ I ] - Requester's client ID + +RETURN VALUE: + None +===========================================================================*/ +static void ReleaseClientID( + sGobiUSBNet * pDev, + u16 clientID ) +{ + int result; + sClientMemList ** ppDelClientMem; + sClientMemList * pNextClientMem; + struct urb * pDelURB; + void * pDelData; + u16 dataSize; + void * pWriteBuffer; + u16 writeBufferSize; + void * pReadBuffer; + u16 readBufferSize; + unsigned long flags; + u8 transactionID; + + // Is device is still valid? + if (IsDeviceValid( pDev ) == false) + { + DBG( "invalid device\n" ); + return; + } + + DBG( "releasing 0x%04X\n", clientID ); + + // Run QMI ReleaseClientID if this isn't QMICTL + if (clientID != QMICTL && pDev->mpNetDev->udev->state) + { + // Note: all errors are non fatal, as we always want to delete + // client memory in latter part of function + + writeBufferSize = QMICTLReleaseClientIDReqSize(); + pWriteBuffer = kmalloc( writeBufferSize, GFP_KERNEL ); + if (pWriteBuffer == NULL) + { + DBG( "memory error\n" ); + } + else + { + transactionID = QMIXactionIDGet( pDev ); + + result = QMICTLReleaseClientIDReq( pWriteBuffer, + writeBufferSize, + transactionID, + clientID ); + if (result < 0) + { + kfree( pWriteBuffer ); + DBG( "error %d filling req buffer\n", result ); + } + else + { + result = WriteSync( pDev, + pWriteBuffer, + writeBufferSize, + QMICTL ); + kfree( pWriteBuffer ); + + if (result < 0) + { + DBG( "bad write status %d\n", result ); + } + else + { + result = ReadSync( pDev, + &pReadBuffer, + QMICTL, + transactionID ); + if (result < 0) + { + DBG( "bad read status %d\n", result ); + } + else + { + readBufferSize = result; + + result = QMICTLReleaseClientIDResp( pReadBuffer, + readBufferSize ); + kfree( pReadBuffer ); + + if (result < 0) + { + DBG( "error %d parsing response\n", result ); + } + } + } + } + } + } + + // Cleaning up client memory + + // Critical section + spin_lock_irqsave( &pDev->mQMIDev.mClientMemLock, flags ); + + // Can't use FindClientMem, I need to keep pointer of previous + ppDelClientMem = &pDev->mQMIDev.mpClientMemList; + while (*ppDelClientMem != NULL) + { + if ((*ppDelClientMem)->mClientID == clientID) + { + pNextClientMem = (*ppDelClientMem)->mpNext; + + // Notify all clients + while (NotifyAndPopNotifyList( pDev, + clientID, + 0 ) == true ); + + // Kill and free all URB's + pDelURB = PopFromURBList( pDev, clientID ); + while (pDelURB != NULL) + { + spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags ); + usb_kill_urb( pDelURB ); + usb_free_urb( pDelURB ); + spin_lock_irqsave( &pDev->mQMIDev.mClientMemLock, flags ); + pDelURB = PopFromURBList( pDev, clientID ); + } + + // Free any unread data + while (PopFromReadMemList( pDev, + clientID, + 0, + &pDelData, + &dataSize ) == true ) + { + kfree( pDelData ); + } + + // Delete client Mem + if (!waitqueue_active( &(*ppDelClientMem)->mWaitQueue)) + kfree( *ppDelClientMem ); + else + INFO("memory leak!\n"); + + // Overwrite the pointer that was to this client mem + *ppDelClientMem = pNextClientMem; + } + else + { + // I now point to (a pointer of ((the node I was at)'s mpNext)) + ppDelClientMem = &(*ppDelClientMem)->mpNext; + } + } + + // End Critical section + spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags ); + + return; +} + +/*=========================================================================== +METHOD: + FindClientMem (Public Method) + +DESCRIPTION: + Find this client's memory + + Caller MUST have lock on mClientMemLock + +PARAMETERS: + pDev [ I ] - Device specific memory + clientID [ I ] - Requester's client ID + +RETURN VALUE: + sClientMemList - Pointer to requested sClientMemList for success + NULL for error +===========================================================================*/ +static sClientMemList * FindClientMem( + sGobiUSBNet * pDev, + u16 clientID ) +{ + sClientMemList * pClientMem; + + if (IsDeviceValid( pDev ) == false) + { + DBG( "Invalid device\n" ); + return NULL; + } + +#ifdef CONFIG_SMP + // Verify Lock + if (spin_is_locked( &pDev->mQMIDev.mClientMemLock ) == 0) + { + DBG( "unlocked\n" ); + BUG(); + } +#endif + + pClientMem = pDev->mQMIDev.mpClientMemList; + while (pClientMem != NULL) + { + if (pClientMem->mClientID == clientID) + { + // Success + VDBG("Found client's 0x%x memory\n", clientID); + return pClientMem; + } + + pClientMem = pClientMem->mpNext; + } + + DBG( "Could not find client mem 0x%04X\n", clientID ); + return NULL; +} + +/*=========================================================================== +METHOD: + AddToReadMemList (Public Method) + +DESCRIPTION: + Add Data to this client's ReadMem list + + Caller MUST have lock on mClientMemLock + +PARAMETERS: + pDev [ I ] - Device specific memory + clientID [ I ] - Requester's client ID + transactionID [ I ] - Transaction ID or 0 for any + pData [ I ] - Data to add + dataSize [ I ] - Size of data to add + +RETURN VALUE: + bool +===========================================================================*/ +static bool AddToReadMemList( + sGobiUSBNet * pDev, + u16 clientID, + u16 transactionID, + void * pData, + u16 dataSize ) +{ + sClientMemList * pClientMem; + sReadMemList ** ppThisReadMemList; + +#ifdef CONFIG_SMP + // Verify Lock + if (spin_is_locked( &pDev->mQMIDev.mClientMemLock ) == 0) + { + DBG( "unlocked\n" ); + BUG(); + } +#endif + + // Get this client's memory location + pClientMem = FindClientMem( pDev, clientID ); + if (pClientMem == NULL) + { + DBG( "Could not find this client's memory 0x%04X\n", + clientID ); + + return false; + } + + // Go to last ReadMemList entry + ppThisReadMemList = &pClientMem->mpList; + while (*ppThisReadMemList != NULL) + { + ppThisReadMemList = &(*ppThisReadMemList)->mpNext; + } + + *ppThisReadMemList = kmalloc( sizeof( sReadMemList ), GFP_ATOMIC ); + if (*ppThisReadMemList == NULL) + { + DBG( "Mem error\n" ); + + return false; + } + + (*ppThisReadMemList)->mpNext = NULL; + (*ppThisReadMemList)->mpData = pData; + (*ppThisReadMemList)->mDataSize = dataSize; + (*ppThisReadMemList)->mTransactionID = transactionID; + + return true; +} + +/*=========================================================================== +METHOD: + PopFromReadMemList (Public Method) + +DESCRIPTION: + Remove data from this client's ReadMem list if it matches + the specified transaction ID. + + Caller MUST have lock on mClientMemLock + +PARAMETERS: + pDev [ I ] - Device specific memory + clientID [ I ] - Requester's client ID + transactionID [ I ] - Transaction ID or 0 for any + ppData [I/O] - On success, will be filled with a + pointer to read buffer + pDataSize [I/O] - On succces, will be filled with the + read buffer's size + +RETURN VALUE: + bool +===========================================================================*/ +static bool PopFromReadMemList( + sGobiUSBNet * pDev, + u16 clientID, + u16 transactionID, + void ** ppData, + u16 * pDataSize ) +{ + sClientMemList * pClientMem; + sReadMemList * pDelReadMemList, ** ppReadMemList; + +#ifdef CONFIG_SMP + // Verify Lock + if (spin_is_locked( &pDev->mQMIDev.mClientMemLock ) == 0) + { + DBG( "unlocked\n" ); + BUG(); + } +#endif + + // Get this client's memory location + pClientMem = FindClientMem( pDev, clientID ); + if (pClientMem == NULL) + { + DBG( "Could not find this client's memory 0x%04X\n", + clientID ); + + return false; + } + + ppReadMemList = &(pClientMem->mpList); + pDelReadMemList = NULL; + + // Find first message that matches this transaction ID + while (*ppReadMemList != NULL) + { + // Do we care about transaction ID? + if (transactionID == 0 + || transactionID == (*ppReadMemList)->mTransactionID ) + { + pDelReadMemList = *ppReadMemList; + VDBG( "*ppReadMemList = 0x%p pDelReadMemList = 0x%p\n", + *ppReadMemList, pDelReadMemList ); + break; + } + + VDBG( "skipping 0x%04X data TID = %x\n", clientID, (*ppReadMemList)->mTransactionID ); + + // Next + ppReadMemList = &(*ppReadMemList)->mpNext; + } + VDBG( "*ppReadMemList = 0x%p pDelReadMemList = 0x%p\n", + *ppReadMemList, pDelReadMemList ); + if (pDelReadMemList != NULL) + { + *ppReadMemList = (*ppReadMemList)->mpNext; + + // Copy to output + *ppData = pDelReadMemList->mpData; + *pDataSize = pDelReadMemList->mDataSize; + VDBG( "*ppData = 0x%p pDataSize = %u\n", + *ppData, *pDataSize ); + + // Free memory + kfree( pDelReadMemList ); + + return true; + } + else + { + DBG( "No read memory to pop, Client 0x%04X, TID = %x\n", + clientID, + transactionID ); + return false; + } +} + +/*=========================================================================== +METHOD: + AddToNotifyList (Public Method) + +DESCRIPTION: + Add Notify entry to this client's notify List + + Caller MUST have lock on mClientMemLock + +PARAMETERS: + pDev [ I ] - Device specific memory + clientID [ I ] - Requester's client ID + transactionID [ I ] - Transaction ID or 0 for any + pNotifyFunct [ I ] - Callback function to be run when data is available + pData [ I ] - Data buffer that willl be passed (unmodified) + to callback + +RETURN VALUE: + bool +===========================================================================*/ +static bool AddToNotifyList( + sGobiUSBNet * pDev, + u16 clientID, + u16 transactionID, + void (* pNotifyFunct)(sGobiUSBNet *, u16, void *), + void * pData ) +{ + sClientMemList * pClientMem; + sNotifyList ** ppThisNotifyList; + +#ifdef CONFIG_SMP + // Verify Lock + if (spin_is_locked( &pDev->mQMIDev.mClientMemLock ) == 0) + { + DBG( "unlocked\n" ); + BUG(); + } +#endif + + // Get this client's memory location + pClientMem = FindClientMem( pDev, clientID ); + if (pClientMem == NULL) + { + DBG( "Could not find this client's memory 0x%04X\n", clientID ); + return false; + } + + // Go to last URBList entry + ppThisNotifyList = &pClientMem->mpReadNotifyList; + while (*ppThisNotifyList != NULL) + { + ppThisNotifyList = &(*ppThisNotifyList)->mpNext; + } + + *ppThisNotifyList = kmalloc( sizeof( sNotifyList ), GFP_ATOMIC ); + if (*ppThisNotifyList == NULL) + { + DBG( "Mem error\n" ); + return false; + } + + (*ppThisNotifyList)->mpNext = NULL; + (*ppThisNotifyList)->mpNotifyFunct = pNotifyFunct; + (*ppThisNotifyList)->mpData = pData; + (*ppThisNotifyList)->mTransactionID = transactionID; + + return true; +} + +/*=========================================================================== +METHOD: + NotifyAndPopNotifyList (Public Method) + +DESCRIPTION: + Remove first Notify entry from this client's notify list + and Run function + + Caller MUST have lock on mClientMemLock + +PARAMETERS: + pDev [ I ] - Device specific memory + clientID [ I ] - Requester's client ID + transactionID [ I ] - Transaction ID or 0 for any + +RETURN VALUE: + bool +===========================================================================*/ +static bool NotifyAndPopNotifyList( + sGobiUSBNet * pDev, + u16 clientID, + u16 transactionID ) +{ + sClientMemList * pClientMem; + sNotifyList * pDelNotifyList, ** ppNotifyList; + +#ifdef CONFIG_SMP + // Verify Lock + if (spin_is_locked( &pDev->mQMIDev.mClientMemLock ) == 0) + { + DBG( "unlocked\n" ); + BUG(); + } +#endif + + // Get this client's memory location + pClientMem = FindClientMem( pDev, clientID ); + if (pClientMem == NULL) + { + DBG( "Could not find this client's memory 0x%04X\n", clientID ); + return false; + } + + ppNotifyList = &(pClientMem->mpReadNotifyList); + pDelNotifyList = NULL; + + // Remove from list + while (*ppNotifyList != NULL) + { + // Do we care about transaction ID? + if (transactionID == 0 + || (*ppNotifyList)->mTransactionID == 0 + || transactionID == (*ppNotifyList)->mTransactionID) + { + pDelNotifyList = *ppNotifyList; + break; + } + + DBG( "skipping data TID = %x\n", (*ppNotifyList)->mTransactionID ); + + // next + ppNotifyList = &(*ppNotifyList)->mpNext; + } + + if (pDelNotifyList != NULL) + { + // Remove element + *ppNotifyList = (*ppNotifyList)->mpNext; + + // Run notification function + if (pDelNotifyList->mpNotifyFunct != NULL) + { + // Unlock for callback + spin_unlock( &pDev->mQMIDev.mClientMemLock ); + + pDelNotifyList->mpNotifyFunct( pDev, + clientID, + pDelNotifyList->mpData ); + + // Restore lock + spin_lock( &pDev->mQMIDev.mClientMemLock ); + } + + // Delete memory + kfree( pDelNotifyList ); + + return true; + } + else + { + DBG( "no one to notify for TID %x\n", transactionID ); + + return false; + } +} + +/*=========================================================================== +METHOD: + AddToURBList (Public Method) + +DESCRIPTION: + Add URB to this client's URB list + + Caller MUST have lock on mClientMemLock + +PARAMETERS: + pDev [ I ] - Device specific memory + clientID [ I ] - Requester's client ID + pURB [ I ] - URB to be added + +RETURN VALUE: + bool +===========================================================================*/ +static bool AddToURBList( + sGobiUSBNet * pDev, + u16 clientID, + struct urb * pURB ) +{ + sClientMemList * pClientMem; + sURBList ** ppThisURBList; + +#ifdef CONFIG_SMP + // Verify Lock + if (spin_is_locked( &pDev->mQMIDev.mClientMemLock ) == 0) + { + DBG( "unlocked\n" ); + BUG(); + } +#endif + + // Get this client's memory location + pClientMem = FindClientMem( pDev, clientID ); + if (pClientMem == NULL) + { + DBG( "Could not find this client's memory 0x%04X\n", clientID ); + return false; + } + + // Go to last URBList entry + ppThisURBList = &pClientMem->mpURBList; + while (*ppThisURBList != NULL) + { + ppThisURBList = &(*ppThisURBList)->mpNext; + } + + *ppThisURBList = kmalloc( sizeof( sURBList ), GFP_ATOMIC ); + if (*ppThisURBList == NULL) + { + DBG( "Mem error\n" ); + return false; + } + + (*ppThisURBList)->mpNext = NULL; + (*ppThisURBList)->mpURB = pURB; + + return true; +} + +/*=========================================================================== +METHOD: + PopFromURBList (Public Method) + +DESCRIPTION: + Remove URB from this client's URB list + + Caller MUST have lock on mClientMemLock + +PARAMETERS: + pDev [ I ] - Device specific memory + clientID [ I ] - Requester's client ID + +RETURN VALUE: + struct urb - Pointer to requested client's URB + NULL for error +===========================================================================*/ +static struct urb * PopFromURBList( + sGobiUSBNet * pDev, + u16 clientID ) +{ + sClientMemList * pClientMem; + sURBList * pDelURBList; + struct urb * pURB; + +#ifdef CONFIG_SMP + // Verify Lock + if (spin_is_locked( &pDev->mQMIDev.mClientMemLock ) == 0) + { + DBG( "unlocked\n" ); + BUG(); + } +#endif + + // Get this client's memory location + pClientMem = FindClientMem( pDev, clientID ); + if (pClientMem == NULL) + { + DBG( "Could not find this client's memory 0x%04X\n", clientID ); + return NULL; + } + + // Remove from list + if (pClientMem->mpURBList != NULL) + { + pDelURBList = pClientMem->mpURBList; + pClientMem->mpURBList = pClientMem->mpURBList->mpNext; + + // Copy to output + pURB = pDelURBList->mpURB; + + // Delete memory + kfree( pDelURBList ); + + return pURB; + } + else + { + DBG( "No URB's to pop\n" ); + + return NULL; + } +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION( 3,19,0 )) +#ifndef f_dentry +#define f_dentry f_path.dentry +#endif +#endif + +/*=========================================================================*/ +// Internal userspace wrappers +/*=========================================================================*/ + +/*=========================================================================== +METHOD: + UserspaceunlockedIOCTL (Public Method) + +DESCRIPTION: + Internal wrapper for Userspace IOCTL interface + +PARAMETERS + pFilp [ I ] - userspace file descriptor + cmd [ I ] - IOCTL command + arg [ I ] - IOCTL argument + +RETURN VALUE: + long - 0 for success + Negative errno for failure +===========================================================================*/ +static long UserspaceunlockedIOCTL( + struct file * pFilp, + unsigned int cmd, + unsigned long arg ) +{ + int result; + u32 devVIDPID; + + sQMIFilpStorage * pFilpData = (sQMIFilpStorage *)pFilp->private_data; + + if (pFilpData == NULL) + { + DBG( "Bad file data\n" ); + return -EBADF; + } + + if (IsDeviceValid( pFilpData->mpDev ) == false) + { + DBG( "Invalid device! Updating f_ops\n" ); + pFilp->f_op = pFilp->f_dentry->d_inode->i_fop; + return -ENXIO; + } + + switch (cmd) + { + case IOCTL_QMI_GET_SERVICE_FILE: + DBG( "Setting up QMI for service %lu\n", arg ); + if ((u8)arg == 0) + { + DBG( "Cannot use QMICTL from userspace\n" ); + return -EINVAL; + } + + // Connection is already setup + if (pFilpData->mClientID != (u16)-1) + { + DBG( "Close the current connection before opening a new one\n" ); + return -EBADR; + } + + result = GetClientID( pFilpData->mpDev, (u8)arg ); +// it seems QMIWDA only allow one client, if the last quectel-CM donot realese it (killed by SIGKILL). +// can force release it at here +#if 1 + if (result < 0 && (u8)arg == QMIWDA) + { + ReleaseClientID( pFilpData->mpDev, QMIWDA | (1 << 8) ); + result = GetClientID( pFilpData->mpDev, (u8)arg ); + } +#endif + if (result < 0) + { + return result; + } + pFilpData->mClientID = (u16)result; + DBG("pFilpData->mClientID = 0x%x\n", pFilpData->mClientID ); + return 0; + break; + + + case IOCTL_QMI_GET_DEVICE_VIDPID: + if (arg == 0) + { + DBG( "Bad VIDPID buffer\n" ); + return -EINVAL; + } + + // Extra verification + if (pFilpData->mpDev->mpNetDev == 0) + { + DBG( "Bad mpNetDev\n" ); + return -ENOMEM; + } + if (pFilpData->mpDev->mpNetDev->udev == 0) + { + DBG( "Bad udev\n" ); + return -ENOMEM; + } + + devVIDPID = ((le16_to_cpu( pFilpData->mpDev->mpNetDev->udev->descriptor.idVendor ) << 16) + + le16_to_cpu( pFilpData->mpDev->mpNetDev->udev->descriptor.idProduct ) ); + + result = copy_to_user( (unsigned int *)arg, &devVIDPID, 4 ); + if (result != 0) + { + DBG( "Copy to userspace failure %d\n", result ); + } + + return result; + + break; + + case IOCTL_QMI_GET_DEVICE_MEID: + if (arg == 0) + { + DBG( "Bad MEID buffer\n" ); + return -EINVAL; + } + + result = copy_to_user( (unsigned int *)arg, &pFilpData->mpDev->mMEID[0], 14 ); + if (result != 0) + { + DBG( "Copy to userspace failure %d\n", result ); + } + + return result; + + break; + + default: + return -EBADRQC; + } +} + +/*=========================================================================*/ +// Userspace wrappers +/*=========================================================================*/ + +/*=========================================================================== +METHOD: + UserspaceOpen (Public Method) + +DESCRIPTION: + Userspace open + IOCTL must be called before reads or writes + +PARAMETERS + pInode [ I ] - kernel file descriptor + pFilp [ I ] - userspace file descriptor + +RETURN VALUE: + int - 0 for success + Negative errno for failure +===========================================================================*/ +static int UserspaceOpen( + struct inode * pInode, + struct file * pFilp ) +{ + sQMIFilpStorage * pFilpData; + + // Optain device pointer from pInode + sQMIDev * pQMIDev = container_of( pInode->i_cdev, + sQMIDev, + mCdev ); + sGobiUSBNet * pDev = container_of( pQMIDev, + sGobiUSBNet, + mQMIDev ); + + if (pDev->mbMdm9x07) + { + atomic_inc(&pDev->refcount); + if (!pDev->mbQMIReady) { + if (wait_for_completion_interruptible_timeout(&pDev->mQMIReadyCompletion, 15*HZ) <= 0) { + if (atomic_dec_and_test(&pDev->refcount)) { + kfree( pDev ); + } + return -ETIMEDOUT; + } + } + atomic_dec(&pDev->refcount); + } + + if (IsDeviceValid( pDev ) == false) + { + DBG( "Invalid device\n" ); + return -ENXIO; + } + + // Setup data in pFilp->private_data + pFilp->private_data = kmalloc( sizeof( sQMIFilpStorage ), GFP_KERNEL ); + if (pFilp->private_data == NULL) + { + DBG( "Mem error\n" ); + return -ENOMEM; + } + + pFilpData = (sQMIFilpStorage *)pFilp->private_data; + pFilpData->mClientID = (u16)-1; + pFilpData->mpDev = pDev; + atomic_inc(&pFilpData->mpDev->refcount); + + return 0; +} + +/*=========================================================================== +METHOD: + UserspaceIOCTL (Public Method) + +DESCRIPTION: + Userspace IOCTL functions + +PARAMETERS + pUnusedInode [ I ] - (unused) kernel file descriptor + pFilp [ I ] - userspace file descriptor + cmd [ I ] - IOCTL command + arg [ I ] - IOCTL argument + +RETURN VALUE: + int - 0 for success + Negative errno for failure +===========================================================================*/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,36 )) +static int UserspaceIOCTL( + struct inode * pUnusedInode, + struct file * pFilp, + unsigned int cmd, + unsigned long arg ) +{ + // call the internal wrapper function + return (int)UserspaceunlockedIOCTL( pFilp, cmd, arg ); +} +#endif + +#ifdef quectel_no_for_each_process +static int UserspaceClose( + struct inode * pInode, + struct file * pFilp ) +{ + sQMIFilpStorage * pFilpData = (sQMIFilpStorage *)pFilp->private_data; + + if (pFilpData == NULL) + { + DBG( "bad file data\n" ); + return -EBADF; + } + + atomic_dec(&pFilpData->mpDev->refcount); + + if (IsDeviceValid( pFilpData->mpDev ) == false) + { + return -ENXIO; + } + + DBG( "0x%04X\n", pFilpData->mClientID ); + + // Disable pFilpData so they can't keep sending read or write + // should this function hang + // Note: memory pointer is still saved in pFilpData to be deleted later + pFilp->private_data = NULL; + + if (pFilpData->mClientID != (u16)-1) + { + if (pFilpData->mpDev->mbDeregisterQMIDevice) + pFilpData->mClientID = (u16)-1; //DeregisterQMIDevice() will release this ClientID + else + ReleaseClientID( pFilpData->mpDev, + pFilpData->mClientID ); + } + + kfree( pFilpData ); + return 0; +} +#else +/*=========================================================================== +METHOD: + UserspaceClose (Public Method) + +DESCRIPTION: + Userspace close + Release client ID and free memory + +PARAMETERS + pFilp [ I ] - userspace file descriptor + unusedFileTable [ I ] - (unused) file table + +RETURN VALUE: + int - 0 for success + Negative errno for failure +===========================================================================*/ +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,14 )) +int UserspaceClose( + struct file * pFilp, + fl_owner_t unusedFileTable ) +#else +int UserspaceClose( struct file * pFilp ) +#endif +{ + sQMIFilpStorage * pFilpData = (sQMIFilpStorage *)pFilp->private_data; + struct task_struct * pEachTask; + struct fdtable * pFDT; + int count = 0; + int used = 0; + unsigned long flags; + + if (pFilpData == NULL) + { + DBG( "bad file data\n" ); + return -EBADF; + } + + // Fallthough. If f_count == 1 no need to do more checks +#if (LINUX_VERSION_CODE <= KERNEL_VERSION( 2,6,24 )) + if (atomic_read( &pFilp->f_count ) != 1) +#else + if (atomic_long_read( &pFilp->f_count ) != 1) +#endif + { + rcu_read_lock(); + for_each_process( pEachTask ) + { + task_lock(pEachTask); + if (pEachTask == NULL || pEachTask->files == NULL) + { + // Some tasks may not have files (e.g. Xsession) + task_unlock(pEachTask); + continue; + } + spin_lock_irqsave( &pEachTask->files->file_lock, flags ); + task_unlock(pEachTask); //kernel/exit.c:do_exit() -> fs/file.c:exit_files() + pFDT = files_fdtable( pEachTask->files ); + for (count = 0; count < pFDT->max_fds; count++) + { + // Before this function was called, this file was removed + // from our task's file table so if we find it in a file + // table then it is being used by another task + if (pFDT->fd[count] == pFilp) + { + used++; + break; + } + } + spin_unlock_irqrestore( &pEachTask->files->file_lock, flags ); + } + rcu_read_unlock(); + + if (used > 0) + { + DBG( "not closing, as this FD is open by %d other process\n", used ); + return 0; + } + } + + if (IsDeviceValid( pFilpData->mpDev ) == false) + { + DBG( "Invalid device! Updating f_ops\n" ); + pFilp->f_op = pFilp->f_dentry->d_inode->i_fop; + return -ENXIO; + } + + DBG( "0x%04X\n", pFilpData->mClientID ); + + // Disable pFilpData so they can't keep sending read or write + // should this function hang + // Note: memory pointer is still saved in pFilpData to be deleted later + pFilp->private_data = NULL; + + if (pFilpData->mClientID != (u16)-1) + { + if (pFilpData->mpDev->mbDeregisterQMIDevice) + pFilpData->mClientID = (u16)-1; //DeregisterQMIDevice() will release this ClientID + else + ReleaseClientID( pFilpData->mpDev, + pFilpData->mClientID ); + } + atomic_dec(&pFilpData->mpDev->refcount); + + kfree( pFilpData ); + return 0; +} +#endif + +/*=========================================================================== +METHOD: + UserspaceRead (Public Method) + +DESCRIPTION: + Userspace read (synchronous) + +PARAMETERS + pFilp [ I ] - userspace file descriptor + pBuf [ I ] - read buffer + size [ I ] - size of read buffer + pUnusedFpos [ I ] - (unused) file position + +RETURN VALUE: + ssize_t - Number of bytes read for success + Negative errno for failure +===========================================================================*/ +static ssize_t UserspaceRead( + struct file * pFilp, + char __user * pBuf, + size_t size, + loff_t * pUnusedFpos ) +{ + int result; + void * pReadData = NULL; + void * pSmallReadData; + sQMIFilpStorage * pFilpData = (sQMIFilpStorage *)pFilp->private_data; + + if (pFilpData == NULL) + { + DBG( "Bad file data\n" ); + return -EBADF; + } + + if (IsDeviceValid( pFilpData->mpDev ) == false) + { + DBG( "Invalid device! Updating f_ops\n" ); + pFilp->f_op = pFilp->f_dentry->d_inode->i_fop; + return -ENXIO; + } + + if (pFilpData->mClientID == (u16)-1) + { + DBG( "Client ID must be set before reading 0x%04X\n", + pFilpData->mClientID ); + return -EBADR; + } + + // Perform synchronous read + result = ReadSync( pFilpData->mpDev, + &pReadData, + pFilpData->mClientID, + 0 ); + if (result <= 0) + { + return result; + } + + // Discard QMUX header + result -= QMUXHeaderSize(); + pSmallReadData = pReadData + QMUXHeaderSize(); + + if (result > size) + { + DBG( "Read data is too large for amount user has requested\n" ); + kfree( pReadData ); + return -EOVERFLOW; + } + + DBG( "pBuf = 0x%p pSmallReadData = 0x%p, result = %d", + pBuf, pSmallReadData, result ); + + if (copy_to_user( pBuf, pSmallReadData, result ) != 0) + { + DBG( "Error copying read data to user\n" ); + result = -EFAULT; + } + + // Reader is responsible for freeing read buffer + kfree( pReadData ); + + return result; +} + +/*=========================================================================== +METHOD: + UserspaceWrite (Public Method) + +DESCRIPTION: + Userspace write (synchronous) + +PARAMETERS + pFilp [ I ] - userspace file descriptor + pBuf [ I ] - write buffer + size [ I ] - size of write buffer + pUnusedFpos [ I ] - (unused) file position + +RETURN VALUE: + ssize_t - Number of bytes read for success + Negative errno for failure +===========================================================================*/ +static ssize_t UserspaceWrite( + struct file * pFilp, + const char __user * pBuf, + size_t size, + loff_t * pUnusedFpos ) +{ + int status; + void * pWriteBuffer; + sQMIFilpStorage * pFilpData = (sQMIFilpStorage *)pFilp->private_data; + + if (pFilpData == NULL) + { + DBG( "Bad file data\n" ); + return -EBADF; + } + + if (IsDeviceValid( pFilpData->mpDev ) == false) + { + DBG( "Invalid device! Updating f_ops\n" ); + pFilp->f_op = pFilp->f_dentry->d_inode->i_fop; + return -ENXIO; + } + + if (pFilpData->mClientID == (u16)-1) + { + DBG( "Client ID must be set before writing 0x%04X\n", + pFilpData->mClientID ); + return -EBADR; + } + + // Copy data from user to kernel space + pWriteBuffer = kmalloc( size + QMUXHeaderSize(), GFP_KERNEL ); + if (pWriteBuffer == NULL) + { + return -ENOMEM; + } + status = copy_from_user( pWriteBuffer + QMUXHeaderSize(), pBuf, size ); + if (status != 0) + { + DBG( "Unable to copy data from userspace %d\n", status ); + kfree( pWriteBuffer ); + return status; + } + + status = WriteSync( pFilpData->mpDev, + pWriteBuffer, + size + QMUXHeaderSize(), + pFilpData->mClientID ); + + kfree( pWriteBuffer ); + + // On success, return requested size, not full QMI reqest size + if (status == size + QMUXHeaderSize()) + { + return size; + } + else + { + return status; + } +} + +/*=========================================================================== +METHOD: + UserspacePoll (Public Method) + +DESCRIPTION: + Used to determine if read/write operations are possible without blocking + +PARAMETERS + pFilp [ I ] - userspace file descriptor + pPollTable [I/O] - Wait object to notify the kernel when data + is ready + +RETURN VALUE: + unsigned int - bitmask of what operations can be done immediately +===========================================================================*/ +static unsigned int UserspacePoll( + struct file * pFilp, + struct poll_table_struct * pPollTable ) +{ + sQMIFilpStorage * pFilpData = (sQMIFilpStorage *)pFilp->private_data; + sClientMemList * pClientMem; + unsigned long flags; + + // Always ready to write + unsigned long status = POLLOUT | POLLWRNORM; + + if (pFilpData == NULL) + { + DBG( "Bad file data\n" ); + return POLLERR; + } + + if (IsDeviceValid( pFilpData->mpDev ) == false) + { + DBG( "Invalid device! Updating f_ops\n" ); + pFilp->f_op = pFilp->f_dentry->d_inode->i_fop; + return POLLERR; + } + + if (pFilpData->mpDev->mbDeregisterQMIDevice) + { + DBG( "DeregisterQMIDevice ing\n" ); + return POLLHUP | POLLERR; + } + + if (pFilpData->mClientID == (u16)-1) + { + DBG( "Client ID must be set before polling 0x%04X\n", + pFilpData->mClientID ); + return POLLERR; + } + + // Critical section + spin_lock_irqsave( &pFilpData->mpDev->mQMIDev.mClientMemLock, flags ); + + // Get this client's memory location + pClientMem = FindClientMem( pFilpData->mpDev, + pFilpData->mClientID ); + if (pClientMem == NULL) + { + DBG( "Could not find this client's memory 0x%04X\n", + pFilpData->mClientID ); + + spin_unlock_irqrestore( &pFilpData->mpDev->mQMIDev.mClientMemLock, + flags ); + return POLLERR; + } + + poll_wait( pFilp, &pClientMem->mWaitQueue, pPollTable ); + + if (pClientMem->mpList != NULL) + { + status |= POLLIN | POLLRDNORM; + } + + // End critical section + spin_unlock_irqrestore( &pFilpData->mpDev->mQMIDev.mClientMemLock, flags ); + + // Always ready to write + return (status | POLLOUT | POLLWRNORM); +} + +/*=========================================================================*/ +// Initializer and destructor +/*=========================================================================*/ +static int QMICTLSyncProc(sGobiUSBNet *pDev) +{ + void *pWriteBuffer; + void *pReadBuffer; + int result; + u16 writeBufferSize; + u16 readBufferSize; + u8 transactionID; + unsigned long flags; + + if (IsDeviceValid( pDev ) == false) + { + DBG( "Invalid device\n" ); + return -EFAULT; + } + + writeBufferSize= QMICTLSyncReqSize(); + pWriteBuffer = kmalloc( writeBufferSize, GFP_KERNEL ); + if (pWriteBuffer == NULL) + { + return -ENOMEM; + } + + transactionID = QMIXactionIDGet(pDev); + + /* send a QMI_CTL_SYNC_REQ (0x0027) */ + result = QMICTLSyncReq( pWriteBuffer, + writeBufferSize, + transactionID ); + if (result < 0) + { + kfree( pWriteBuffer ); + return result; + } + + result = WriteSync( pDev, + pWriteBuffer, + writeBufferSize, + QMICTL ); + + if (result < 0) + { + kfree( pWriteBuffer ); + return result; + } + + // QMI CTL Sync Response + result = ReadSync( pDev, + &pReadBuffer, + QMICTL, + transactionID ); + if (result < 0) + { + return result; + } + + result = QMICTLSyncResp( pReadBuffer, + (u16)result ); + + kfree( pReadBuffer ); + + if (result < 0) /* need to re-sync */ + { + DBG( "sync response error code %d\n", result ); + /* start timer and wait for the response */ + /* process response */ + return result; + } + +#if 1 //free these ununsed qmi response, or when these transactionID re-used, they will be regarded as qmi response of the qmi request that have same transactionID + // Enter critical section + spin_lock_irqsave( &pDev->mQMIDev.mClientMemLock, flags ); + + // Free any unread data + while (PopFromReadMemList( pDev, QMICTL, 0, &pReadBuffer, &readBufferSize) == true) { + kfree( pReadBuffer ); + } + + // End critical section + spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags ); +#endif + + // Success + return 0; +} + +static int qmi_sync_thread(void *data) { + sGobiUSBNet * pDev = (sGobiUSBNet *)data; + int result = 0; + +#if 1 + // Device is not ready for QMI connections right away + // Wait up to 30 seconds before failing + if (QMIReady( pDev, 30000 ) == false) + { + DBG( "Device unresponsive to QMI\n" ); + goto __qmi_sync_finished; + } + + // Initiate QMI CTL Sync Procedure + DBG( "Sending QMI CTL Sync Request\n" ); + result = QMICTLSyncProc(pDev); + if (result != 0) + { + DBG( "QMI CTL Sync Procedure Error\n" ); + goto __qmi_sync_finished; + } + else + { + DBG( "QMI CTL Sync Procedure Successful\n" ); + } + +#if defined(QUECTEL_WWAN_QMAP) +if (pDev->qmap_mode) { + // Setup Data Format + result = QMIWDASetDataFormat (pDev, pDev->qmap_mode, &pDev->qmap_size); + if (result != 0) + { + goto __qmi_sync_finished; + } + pDev->mpNetDev->rx_urb_size = pDev->qmap_size; +} +#endif + + // Setup WDS callback + result = SetupQMIWDSCallback( pDev ); + if (result != 0) + { + goto __qmi_sync_finished; + } + + // Fill MEID for device + result = QMIDMSGetMEID( pDev ); + if (result != 0) + { + goto __qmi_sync_finished; + } +#endif + +__qmi_sync_finished: + pDev->mbQMIReady = true; + complete_all(&pDev->mQMIReadyCompletion); + pDev->mbQMISyncIng = false; + if (atomic_dec_and_test(&pDev->refcount)) { + kfree( pDev ); + } + return result; +} + +/*=========================================================================== +METHOD: + RegisterQMIDevice (Public Method) + +DESCRIPTION: + QMI Device initialization function + +PARAMETERS: + pDev [ I ] - Device specific memory + +RETURN VALUE: + int - 0 for success + Negative errno for failure +===========================================================================*/ +int RegisterQMIDevice( sGobiUSBNet * pDev ) +{ + int result; + int GobiQMIIndex = 0; + dev_t devno; + char * pDevName; + + if (pDev->mQMIDev.mbCdevIsInitialized == true) + { + // Should never happen, but always better to check + DBG( "device already exists\n" ); + return -EEXIST; + } + + pDev->mbQMIValid = true; + pDev->mbDeregisterQMIDevice = false; + + // Set up for QMICTL + // (does not send QMI message, just sets up memory) + result = GetClientID( pDev, QMICTL ); + if (result != 0) + { + pDev->mbQMIValid = false; + return result; + } + atomic_set( &pDev->mQMIDev.mQMICTLTransactionID, 1 ); + + // Start Async reading + result = StartRead( pDev ); + if (result != 0) + { + pDev->mbQMIValid = false; + return result; + } + + if (pDev->mbMdm9x07) + { + usb_control_msg( pDev->mpNetDev->udev, + usb_sndctrlpipe( pDev->mpNetDev->udev, 0 ), + SET_CONTROL_LINE_STATE_REQUEST, + SET_CONTROL_LINE_STATE_REQUEST_TYPE, + CONTROL_DTR, + /* USB interface number to receive control message */ + pDev->mpIntf->cur_altsetting->desc.bInterfaceNumber, + NULL, + 0, + 100 ); + } + + //for EC21&25, must wait about 15 seconds to wait QMI ready. it is too long for driver probe(will block other drivers probe). + if (pDev->mbMdm9x07) + { + struct task_struct *qmi_sync_task; + atomic_inc(&pDev->refcount); + init_completion(&pDev->mQMIReadyCompletion); + pDev->mbQMIReady = false; + pDev->mbQMISyncIng = true; + qmi_sync_task = kthread_run(qmi_sync_thread, (void *)pDev, "qmi_sync/%d", pDev->mpNetDev->udev->devnum); + if (IS_ERR(qmi_sync_task)) { + pDev->mbQMISyncIng = false; + atomic_dec(&pDev->refcount); + DBG( "Create qmi_sync_thread fail\n" ); + return PTR_ERR(qmi_sync_task); + } + goto __register_chardev_qccmi; + } + + // Device is not ready for QMI connections right away + // Wait up to 30 seconds before failing + if (QMIReady( pDev, 30000 ) == false) + { + DBG( "Device unresponsive to QMI\n" ); + return -ETIMEDOUT; + } + + // Initiate QMI CTL Sync Procedure + DBG( "Sending QMI CTL Sync Request\n" ); + result = QMICTLSyncProc(pDev); + if (result != 0) + { + DBG( "QMI CTL Sync Procedure Error\n" ); + return result; + } + else + { + DBG( "QMI CTL Sync Procedure Successful\n" ); + } + + // Setup Data Format +#if defined(QUECTEL_WWAN_QMAP) + result = QMIWDASetDataFormat (pDev, pDev->qmap_mode, NULL); +#else + result = QMIWDASetDataFormat (pDev, 0, NULL); +#endif + if (result != 0) + { + return result; + } + + // Setup WDS callback + result = SetupQMIWDSCallback( pDev ); + if (result != 0) + { + return result; + } + + // Fill MEID for device + result = QMIDMSGetMEID( pDev ); + if (result != 0) + { + return result; + } + +__register_chardev_qccmi: + // allocate and fill devno with numbers + result = alloc_chrdev_region( &devno, 0, 1, "qcqmi" ); + if (result < 0) + { + return result; + } + + // Create cdev + cdev_init( &pDev->mQMIDev.mCdev, &UserspaceQMIFops ); + pDev->mQMIDev.mCdev.owner = THIS_MODULE; + pDev->mQMIDev.mCdev.ops = &UserspaceQMIFops; + pDev->mQMIDev.mbCdevIsInitialized = true; + + result = cdev_add( &pDev->mQMIDev.mCdev, devno, 1 ); + if (result != 0) + { + DBG( "error adding cdev\n" ); + return result; + } + + // Match interface number (usb# or eth#) + if (!!(pDevName = strstr( pDev->mpNetDev->net->name, "eth" ))) { + pDevName += strlen( "eth" ); + } else if (!!(pDevName = strstr( pDev->mpNetDev->net->name, "usb" ))) { + pDevName += strlen( "usb" ); +#if 1 //openWRT like use ppp# or lte# + } else if (!!(pDevName = strstr( pDev->mpNetDev->net->name, "ppp" ))) { + pDevName += strlen( "ppp" ); + } else if (!!(pDevName = strstr( pDev->mpNetDev->net->name, "lte" ))) { + pDevName += strlen( "lte" ); +#endif + } else { + DBG( "Bad net name: %s\n", pDev->mpNetDev->net->name ); + return -ENXIO; + } + GobiQMIIndex = simple_strtoul( pDevName, NULL, 10 ); + if (GobiQMIIndex < 0) + { + DBG( "Bad minor number\n" ); + return -ENXIO; + } + + // Always print this output + printk( KERN_INFO "creating qcqmi%d\n", + GobiQMIIndex ); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION( 2,6,27 )) + // kernel 2.6.27 added a new fourth parameter to device_create + // void * drvdata : the data to be added to the device for callbacks + device_create( pDev->mQMIDev.mpDevClass, + &pDev->mpIntf->dev, + devno, + NULL, + "qcqmi%d", + GobiQMIIndex ); +#else + device_create( pDev->mQMIDev.mpDevClass, + &pDev->mpIntf->dev, + devno, + "qcqmi%d", + GobiQMIIndex ); +#endif + + pDev->mQMIDev.mDevNum = devno; + + // Success + return 0; +} + +/*=========================================================================== +METHOD: + DeregisterQMIDevice (Public Method) + +DESCRIPTION: + QMI Device cleanup function + + NOTE: When this function is run the device is no longer valid + +PARAMETERS: + pDev [ I ] - Device specific memory + +RETURN VALUE: + None +===========================================================================*/ +void DeregisterQMIDevice( sGobiUSBNet * pDev ) +{ +#ifndef quectel_no_for_each_process + struct inode * pOpenInode; + struct list_head * pInodeList; + struct task_struct * pEachTask; + struct fdtable * pFDT; + struct file * pFilp; + int count = 0; +#endif + unsigned long flags; + int tries; + int result; + + // Should never happen, but check anyway + if (IsDeviceValid( pDev ) == false) + { + DBG( "wrong device\n" ); + return; + } + + pDev->mbDeregisterQMIDevice = true; + + for (tries = 0; tries < 3000; tries += 10) { + if (pDev->mbQMISyncIng == false) + break; + msleep(10); + } + + if (pDev->mbQMISyncIng) { + DBG( "QMI sync ing\n" ); + } + + // Release all clients + spin_lock_irqsave( &pDev->mQMIDev.mClientMemLock, flags ); + while (pDev->mQMIDev.mpClientMemList != NULL) + { + u16 mClientID = pDev->mQMIDev.mpClientMemList->mClientID; + if (waitqueue_active(&pDev->mQMIDev.mpClientMemList->mWaitQueue)) { + DBG("WaitQueue 0x%04X\n", mClientID); + wake_up_interruptible_sync( &pDev->mQMIDev.mpClientMemList->mWaitQueue ); + spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags ); + msleep(10); + spin_lock_irqsave( &pDev->mQMIDev.mClientMemLock, flags ); + continue; + } + + DBG( "release 0x%04X\n", pDev->mQMIDev.mpClientMemList->mClientID ); + + spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags ); + ReleaseClientID( pDev, mClientID ); + // NOTE: pDev->mQMIDev.mpClientMemList will + // be updated in ReleaseClientID() + spin_lock_irqsave( &pDev->mQMIDev.mClientMemLock, flags ); + } + spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags ); + + // Stop all reads + KillRead( pDev ); + + pDev->mbQMIValid = false; + + if (pDev->mQMIDev.mbCdevIsInitialized == false) + { + return; + } + +#ifndef quectel_no_for_each_process + // Find each open file handle, and manually close it + + // Generally there will only be only one inode, but more are possible + list_for_each( pInodeList, &pDev->mQMIDev.mCdev.list ) + { + // Get the inode + pOpenInode = container_of( pInodeList, struct inode, i_devices ); + if (pOpenInode != NULL && (IS_ERR( pOpenInode ) == false)) + { + // Look for this inode in each task + + rcu_read_lock(); + for_each_process( pEachTask ) + { + task_lock(pEachTask); + if (pEachTask == NULL || pEachTask->files == NULL) + { + // Some tasks may not have files (e.g. Xsession) + task_unlock(pEachTask); + continue; + } + // For each file this task has open, check if it's referencing + // our inode. + spin_lock_irqsave( &pEachTask->files->file_lock, flags ); + task_unlock(pEachTask); //kernel/exit.c:do_exit() -> fs/file.c:exit_files() + pFDT = files_fdtable( pEachTask->files ); + for (count = 0; count < pFDT->max_fds; count++) + { + pFilp = pFDT->fd[count]; + if (pFilp != NULL && pFilp->f_dentry != NULL) + { + if (pFilp->f_dentry->d_inode == pOpenInode) + { + // Close this file handle + rcu_assign_pointer( pFDT->fd[count], NULL ); + spin_unlock_irqrestore( &pEachTask->files->file_lock, flags ); + + DBG( "forcing close of open file handle\n" ); + filp_close( pFilp, pEachTask->files ); + + spin_lock_irqsave( &pEachTask->files->file_lock, flags ); + } + } + } + spin_unlock_irqrestore( &pEachTask->files->file_lock, flags ); + } + rcu_read_unlock(); + } + } +#endif + +if (pDev->mpNetDev->udev->state) { + // Send SetControlLineState request (USB_CDC) + result = usb_control_msg( pDev->mpNetDev->udev, + usb_sndctrlpipe( pDev->mpNetDev->udev, 0 ), + SET_CONTROL_LINE_STATE_REQUEST, + SET_CONTROL_LINE_STATE_REQUEST_TYPE, + 0, // DTR not present + /* USB interface number to receive control message */ + pDev->mpIntf->cur_altsetting->desc.bInterfaceNumber, + NULL, + 0, + 100 ); + if (result < 0) + { + DBG( "Bad SetControlLineState status %d\n", result ); + } +} + + // Remove device (so no more calls can be made by users) + if (IS_ERR( pDev->mQMIDev.mpDevClass ) == false) + { + device_destroy( pDev->mQMIDev.mpDevClass, + pDev->mQMIDev.mDevNum ); + } + + // Hold onto cdev memory location until everyone is through using it. + // Timeout after 30 seconds (10 ms interval). Timeout should never happen, + // but exists to prevent an infinate loop just in case. + for (tries = 0; tries < 30 * 100; tries++) + { +#if (LINUX_VERSION_CODE < KERNEL_VERSION( 4,11,0 )) + int ref = atomic_read( &pDev->mQMIDev.mCdev.kobj.kref.refcount ); +#else + int ref = kref_read( &pDev->mQMIDev.mCdev.kobj.kref ); +#endif + if (ref > 1) + { + DBG( "cdev in use by %d tasks\n", ref - 1 ); + if (tries > 10) + INFO( "cdev in use by %d tasks\n", ref - 1 ); + msleep( 10 ); + } + else + { + break; + } + } + + cdev_del( &pDev->mQMIDev.mCdev ); + + unregister_chrdev_region( pDev->mQMIDev.mDevNum, 1 ); + + return; +} + +/*=========================================================================*/ +// Driver level client management +/*=========================================================================*/ + +/*=========================================================================== +METHOD: + QMIReady (Public Method) + +DESCRIPTION: + Send QMI CTL GET VERSION INFO REQ and SET DATA FORMAT REQ + Wait for response or timeout + +PARAMETERS: + pDev [ I ] - Device specific memory + timeout [ I ] - Milliseconds to wait for response + +RETURN VALUE: + bool +===========================================================================*/ +static bool QMIReady( + sGobiUSBNet * pDev, + u16 timeout ) +{ + int result; + void * pWriteBuffer; + u16 writeBufferSize; + void * pReadBuffer; + u16 readBufferSize; + u16 curTime; + unsigned long flags; + u8 transactionID; + u16 interval = 2000; + + if (IsDeviceValid( pDev ) == false) + { + DBG( "Invalid device\n" ); + return false; + } + + writeBufferSize = QMICTLReadyReqSize(); + pWriteBuffer = kmalloc( writeBufferSize, GFP_KERNEL ); + if (pWriteBuffer == NULL) + { + return false; + } + + // An implimentation of down_timeout has not been agreed on, + // so it's been added and removed from the kernel several times. + // We're just going to ignore it and poll the semaphore. + + // Send a write every 1000 ms and see if we get a response + for (curTime = 0; curTime < timeout; curTime += interval) + { + // Start read + struct QuecSem *readSem = kmalloc(sizeof(struct QuecSem ), GFP_KERNEL); + readSem->magic = QUEC_SEM_MAGIC; + sema_init( &readSem->readSem, 0 ); + + transactionID = QMIXactionIDGet( pDev ); + + result = ReadAsync( pDev, QMICTL, transactionID, UpSem, readSem ); + if (result != 0) + { + kfree( pWriteBuffer ); + return false; + } + + // Fill buffer + result = QMICTLReadyReq( pWriteBuffer, + writeBufferSize, + transactionID ); + if (result < 0) + { + kfree( pWriteBuffer ); + return false; + } + + // Disregard status. On errors, just try again + result = WriteSync( pDev, + pWriteBuffer, + writeBufferSize, + QMICTL ); + + if (result < 0) //maybe caused by usb disconnect + { + kfree( pWriteBuffer ); + return false; + } + +#if 1 + if (down_timeout( &readSem->readSem, msecs_to_jiffies(interval) ) == 0) +#else + msleep( interval ); + if (down_trylock( &readSem->readSem ) == 0) +#endif + { + kfree(readSem); + // Enter critical section + spin_lock_irqsave( &pDev->mQMIDev.mClientMemLock, flags ); + + // Pop the read data + if (PopFromReadMemList( pDev, + QMICTL, + transactionID, + &pReadBuffer, + &readBufferSize ) == true) + { + // Success + + // End critical section + spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags ); + + // We don't care about the result + kfree( pReadBuffer ); + + break; + } + else + { + // Read mismatch/failure, unlock and continue + spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags ); + } + } + else + { + readSem->magic = 0; + // Enter critical section + spin_lock_irqsave( &pDev->mQMIDev.mClientMemLock, flags ); + + // Timeout, remove the async read + NotifyAndPopNotifyList( pDev, QMICTL, transactionID ); + + // End critical section + spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags ); + } + + if (pDev->mbDeregisterQMIDevice) + { + kfree( pWriteBuffer ); + return false; + } + } + + kfree( pWriteBuffer ); + + // Did we time out? + if (curTime >= timeout) + { + return false; + } + + DBG( "QMI Ready after %u milliseconds\n", curTime ); + + // Success + return true; +} + +/*=========================================================================== +METHOD: + QMIWDSCallback (Public Method) + +DESCRIPTION: + QMI WDS callback function + Update net stats or link state + +PARAMETERS: + pDev [ I ] - Device specific memory + clientID [ I ] - Client ID + pData [ I ] - Callback data (unused) + +RETURN VALUE: + None +===========================================================================*/ +static void QMIWDSCallback( + sGobiUSBNet * pDev, + u16 clientID, + void * pData ) +{ + bool bRet; + int result; + void * pReadBuffer; + u16 readBufferSize; + +#if 0 +#if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,31 )) + struct net_device_stats * pStats = &(pDev->mpNetDev->stats); +#else + struct net_device_stats * pStats = &(pDev->mpNetDev->net->stats); +#endif +#endif + + u32 TXOk = (u32)-1; + u32 RXOk = (u32)-1; + u32 TXErr = (u32)-1; + u32 RXErr = (u32)-1; + u32 TXOfl = (u32)-1; + u32 RXOfl = (u32)-1; + u64 TXBytesOk = (u64)-1; + u64 RXBytesOk = (u64)-1; + bool bLinkState; + bool bReconfigure; + unsigned long flags; + + if (IsDeviceValid( pDev ) == false) + { + DBG( "Invalid device\n" ); + return; + } + + // Critical section + spin_lock_irqsave( &pDev->mQMIDev.mClientMemLock, flags ); + + bRet = PopFromReadMemList( pDev, + clientID, + 0, + &pReadBuffer, + &readBufferSize ); + + // End critical section + spin_unlock_irqrestore( &pDev->mQMIDev.mClientMemLock, flags ); + + if (bRet == false) + { + DBG( "WDS callback failed to get data\n" ); + return; + } + + // Default values + bLinkState = ! GobiTestDownReason( pDev, NO_NDIS_CONNECTION ); + bReconfigure = false; + + result = QMIWDSEventResp( pReadBuffer, + readBufferSize, + &TXOk, + &RXOk, + &TXErr, + &RXErr, + &TXOfl, + &RXOfl, + &TXBytesOk, + &RXBytesOk, + &bLinkState, + &bReconfigure ); + if (result < 0) + { + DBG( "bad WDS packet\n" ); + } + else + { +#if 0 //usbbet.c will do this job + // Fill in new values, ignore max values + if (TXOfl != (u32)-1) + { + pStats->tx_fifo_errors = TXOfl; + } + + if (RXOfl != (u32)-1) + { + pStats->rx_fifo_errors = RXOfl; + } + + if (TXErr != (u32)-1) + { + pStats->tx_errors = TXErr; + } + + if (RXErr != (u32)-1) + { + pStats->rx_errors = RXErr; + } + + if (TXOk != (u32)-1) + { + pStats->tx_packets = TXOk + pStats->tx_errors; + } + + if (RXOk != (u32)-1) + { + pStats->rx_packets = RXOk + pStats->rx_errors; + } + + if (TXBytesOk != (u64)-1) + { + pStats->tx_bytes = TXBytesOk; + } + + if (RXBytesOk != (u64)-1) + { + pStats->rx_bytes = RXBytesOk; + } +#endif + + if (bReconfigure == true) + { + DBG( "Net device link reset\n" ); + GobiSetDownReason( pDev, NO_NDIS_CONNECTION ); + GobiClearDownReason( pDev, NO_NDIS_CONNECTION ); + } + else + { + if (bLinkState == true) + { + if (GobiTestDownReason( pDev, NO_NDIS_CONNECTION )) { + DBG( "Net device link is connected\n" ); + GobiClearDownReason( pDev, NO_NDIS_CONNECTION ); + } + } + else + { + if (!GobiTestDownReason( pDev, NO_NDIS_CONNECTION )) { + DBG( "Net device link is disconnected\n" ); + GobiSetDownReason( pDev, NO_NDIS_CONNECTION ); + } + } + } + } + + kfree( pReadBuffer ); + + // Setup next read + result = ReadAsync( pDev, + clientID, + 0, + QMIWDSCallback, + pData ); + if (result != 0) + { + DBG( "unable to setup next async read\n" ); + } + + return; +} + +/*=========================================================================== +METHOD: + SetupQMIWDSCallback (Public Method) + +DESCRIPTION: + Request client and fire off reqests and start async read for + QMI WDS callback + +PARAMETERS: + pDev [ I ] - Device specific memory + +RETURN VALUE: + int - 0 for success + Negative errno for failure +===========================================================================*/ +static int SetupQMIWDSCallback( sGobiUSBNet * pDev ) +{ + int result; + void * pWriteBuffer; + u16 writeBufferSize; + u16 WDSClientID; + + if (IsDeviceValid( pDev ) == false) + { + DBG( "Invalid device\n" ); + return -EFAULT; + } + + result = GetClientID( pDev, QMIWDS ); + if (result < 0) + { + return result; + } + WDSClientID = result; + +#if 0 // add for "AT$QCRMCALL=1,1", be careful: donot enable these codes if use quectel-CM, or cannot obtain IP by udhcpc + if (pDev->mbMdm9x07) + { + void * pReadBuffer; + u16 readBufferSize; + + writeBufferSize = QMIWDSSetQMUXBindMuxDataPortSize(); + pWriteBuffer = kmalloc( writeBufferSize, GFP_KERNEL ); + if (pWriteBuffer == NULL) + { + return -ENOMEM; + } + + result = QMIWDSSetQMUXBindMuxDataPortReq( pWriteBuffer, + writeBufferSize, + 0x81, + 3 ); + if (result < 0) + { + kfree( pWriteBuffer ); + return result; + } + + result = WriteSync( pDev, + pWriteBuffer, + writeBufferSize, + WDSClientID ); + kfree( pWriteBuffer ); + + if (result < 0) + { + return result; + } + + result = ReadSync( pDev, + &pReadBuffer, + WDSClientID, + 3 ); + if (result < 0) + { + return result; + } + readBufferSize = result; + + kfree( pReadBuffer ); + } +#endif + + // QMI WDS Set Event Report + writeBufferSize = QMIWDSSetEventReportReqSize(); + pWriteBuffer = kmalloc( writeBufferSize, GFP_KERNEL ); + if (pWriteBuffer == NULL) + { + return -ENOMEM; + } + + result = QMIWDSSetEventReportReq( pWriteBuffer, + writeBufferSize, + 1 ); + if (result < 0) + { + kfree( pWriteBuffer ); + return result; + } + + result = WriteSync( pDev, + pWriteBuffer, + writeBufferSize, + WDSClientID ); + kfree( pWriteBuffer ); + + if (result < 0) + { + return result; + } + + // QMI WDS Get PKG SRVC Status + writeBufferSize = QMIWDSGetPKGSRVCStatusReqSize(); + pWriteBuffer = kmalloc( writeBufferSize, GFP_KERNEL ); + if (pWriteBuffer == NULL) + { + return -ENOMEM; + } + + result = QMIWDSGetPKGSRVCStatusReq( pWriteBuffer, + writeBufferSize, + 2 ); + if (result < 0) + { + kfree( pWriteBuffer ); + return result; + } + + result = WriteSync( pDev, + pWriteBuffer, + writeBufferSize, + WDSClientID ); + kfree( pWriteBuffer ); + + if (result < 0) + { + return result; + } + + // Setup asnyc read callback + result = ReadAsync( pDev, + WDSClientID, + 0, + QMIWDSCallback, + NULL ); + if (result != 0) + { + DBG( "unable to setup async read\n" ); + return result; + } + + // Send SetControlLineState request (USB_CDC) + // Required for Autoconnect + result = usb_control_msg( pDev->mpNetDev->udev, + usb_sndctrlpipe( pDev->mpNetDev->udev, 0 ), + SET_CONTROL_LINE_STATE_REQUEST, + SET_CONTROL_LINE_STATE_REQUEST_TYPE, + CONTROL_DTR, + /* USB interface number to receive control message */ + pDev->mpIntf->cur_altsetting->desc.bInterfaceNumber, + NULL, + 0, + 100 ); + if (result < 0) + { + DBG( "Bad SetControlLineState status %d\n", result ); + return result; + } + + return 0; +} + +/*=========================================================================== +METHOD: + QMIDMSGetMEID (Public Method) + +DESCRIPTION: + Register DMS client + send MEID req and parse response + Release DMS client + +PARAMETERS: + pDev [ I ] - Device specific memory + +RETURN VALUE: + None +===========================================================================*/ +static int QMIDMSGetMEID( sGobiUSBNet * pDev ) +{ + int result; + void * pWriteBuffer; + u16 writeBufferSize; + void * pReadBuffer; + u16 readBufferSize; + u16 DMSClientID; + + if (IsDeviceValid( pDev ) == false) + { + DBG( "Invalid device\n" ); + return -EFAULT; + } + + result = GetClientID( pDev, QMIDMS ); + if (result < 0) + { + return result; + } + DMSClientID = result; + + // QMI DMS Get Serial numbers Req + writeBufferSize = QMIDMSGetMEIDReqSize(); + pWriteBuffer = kmalloc( writeBufferSize, GFP_KERNEL ); + if (pWriteBuffer == NULL) + { + return -ENOMEM; + } + + result = QMIDMSGetMEIDReq( pWriteBuffer, + writeBufferSize, + 1 ); + if (result < 0) + { + kfree( pWriteBuffer ); + return result; + } + + result = WriteSync( pDev, + pWriteBuffer, + writeBufferSize, + DMSClientID ); + kfree( pWriteBuffer ); + + if (result < 0) + { + return result; + } + + // QMI DMS Get Serial numbers Resp + result = ReadSync( pDev, + &pReadBuffer, + DMSClientID, + 1 ); + if (result < 0) + { + return result; + } + readBufferSize = result; + + result = QMIDMSGetMEIDResp( pReadBuffer, + readBufferSize, + &pDev->mMEID[0], + 14 ); + kfree( pReadBuffer ); + + if (result < 0) + { + DBG( "bad get MEID resp\n" ); + + // Non fatal error, device did not return any MEID + // Fill with 0's + memset( &pDev->mMEID[0], '0', 14 ); + } + + ReleaseClientID( pDev, DMSClientID ); + + // Success + return 0; +} + +/*=========================================================================== +METHOD: + QMIWDASetDataFormat (Public Method) + +DESCRIPTION: + Register WDA client + send Data format request and parse response + Release WDA client + +PARAMETERS: + pDev [ I ] - Device specific memory + +RETURN VALUE: + None +===========================================================================*/ +static int QMIWDASetDataFormat( sGobiUSBNet * pDev, int qmap_mode, int *rx_urb_size ) +{ + int result; + void * pWriteBuffer; + u16 writeBufferSize; + void * pReadBuffer; + u16 readBufferSize; + u16 WDAClientID; + + DBG("\n"); + + if (IsDeviceValid( pDev ) == false) + { + DBG( "Invalid device\n" ); + return -EFAULT; + } + + result = GetClientID( pDev, QMIWDA ); + if (result < 0) + { + return result; + } + WDAClientID = result; + + // QMI WDA Set Data Format Request + writeBufferSize = QMIWDASetDataFormatReqSize(qmap_mode); + pWriteBuffer = kmalloc( writeBufferSize, GFP_KERNEL ); + if (pWriteBuffer == NULL) + { + return -ENOMEM; + } + + result = QMIWDASetDataFormatReq( pWriteBuffer, + writeBufferSize, pDev->mbRawIPMode, + qmap_mode ? pDev->qmap_version : 0, (31*1024), + 1 ); + + if (result < 0) + { + kfree( pWriteBuffer ); + return result; + } + + result = WriteSync( pDev, + pWriteBuffer, + writeBufferSize, + WDAClientID ); + kfree( pWriteBuffer ); + + if (result < 0) + { + return result; + } + + // QMI DMS Get Serial numbers Resp + result = ReadSync( pDev, + &pReadBuffer, + WDAClientID, + 1 ); + if (result < 0) + { + return result; + } + readBufferSize = result; + +if (qmap_mode && rx_urb_size) { + int qmap_version = 0, rx_size = 0, tx_size = 0; + result = QMIWDASetDataFormatResp( pReadBuffer, + readBufferSize, pDev->mbRawIPMode, &qmap_version, &rx_size, &tx_size, &pDev->qmap_settings); + INFO( "qmap settings qmap_version=%d, rx_size=%d, tx_size=%d\n", + le32_to_cpu(qmap_version), le32_to_cpu(rx_size), le32_to_cpu(tx_size)); + + if (le32_to_cpu(qmap_version)) { +#if defined(QUECTEL_UL_DATA_AGG) + struct ul_agg_ctx *ctx = &pDev->agg_ctx; + + if (le32_to_cpu(pDev->qmap_settings.ul_data_aggregation_max_datagrams) > 1) { + ctx->ul_data_aggregation_max_size = le32_to_cpu(pDev->qmap_settings.ul_data_aggregation_max_size); + ctx->ul_data_aggregation_max_datagrams = le32_to_cpu(pDev->qmap_settings.ul_data_aggregation_max_datagrams); + ctx->dl_minimum_padding = le32_to_cpu(pDev->qmap_settings.dl_minimum_padding); + } + INFO( "qmap settings ul_data_aggregation_max_size=%d, ul_data_aggregation_max_datagrams=%d\n", + ctx->ul_data_aggregation_max_size, ctx->ul_data_aggregation_max_datagrams); + if (ctx->ul_data_aggregation_max_datagrams > 11) + ctx->ul_data_aggregation_max_datagrams = 11; +#endif + *rx_urb_size = le32_to_cpu(rx_size); + } else { + *rx_urb_size = 0; + result = -EFAULT; + } +} else { + int qmap_enabled = 0, rx_size = 0, tx_size = 0; + result = QMIWDASetDataFormatResp( pReadBuffer, + readBufferSize, pDev->mbRawIPMode, &qmap_enabled, &rx_size, &tx_size, NULL); +} + + kfree( pReadBuffer ); + + if (result < 0) + { + DBG( "Data Format Cannot be set\n" ); + } + + ReleaseClientID( pDev, WDAClientID ); + + // Success + return 0; +} + +int QuecQMIWDASetDataFormat( sGobiUSBNet * pDev, int qmap_mode, int *rx_urb_size ) { + return QMIWDASetDataFormat(pDev, qmap_mode, rx_urb_size); +} diff --git a/package/wwan/driver/quectel_Gobinet/src/QMIDevice.h b/package/wwan/driver/quectel_Gobinet/src/QMIDevice.h new file mode 100644 index 000000000..93984fa08 --- /dev/null +++ b/package/wwan/driver/quectel_Gobinet/src/QMIDevice.h @@ -0,0 +1,368 @@ +/*=========================================================================== +FILE: + QMIDevice.h + +DESCRIPTION: + Functions related to the QMI interface device + +FUNCTIONS: + Generic functions + IsDeviceValid + PrintHex + GobiSetDownReason + GobiClearDownReason + GobiTestDownReason + + Driver level asynchronous read functions + ResubmitIntURB + ReadCallback + IntCallback + StartRead + KillRead + + Internal read/write functions + ReadAsync + UpSem + ReadSync + WriteSyncCallback + WriteSync + + Internal memory management functions + GetClientID + ReleaseClientID + FindClientMem + AddToReadMemList + PopFromReadMemList + AddToNotifyList + NotifyAndPopNotifyList + AddToURBList + PopFromURBList + + Internal userspace wrapper functions + UserspaceunlockedIOCTL + + Userspace wrappers + UserspaceOpen + UserspaceIOCTL + UserspaceClose + UserspaceRead + UserspaceWrite + UserspacePoll + + Initializer and destructor + RegisterQMIDevice + DeregisterQMIDevice + + Driver level client management + QMIReady + QMIWDSCallback + SetupQMIWDSCallback + QMIDMSGetMEID + +Copyright (c) 2011, Code Aurora Forum. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Code Aurora Forum nor + the names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. + + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. +===========================================================================*/ + +//--------------------------------------------------------------------------- +// Pragmas +//--------------------------------------------------------------------------- +#pragma once + +//--------------------------------------------------------------------------- +// Include Files +//--------------------------------------------------------------------------- +#include "Structs.h" +#include "QMI.h" + +/*=========================================================================*/ +// Generic functions +/*=========================================================================*/ + +#ifdef __QUECTEL_INTER__ + +// Basic test to see if device memory is valid +static bool IsDeviceValid( sGobiUSBNet * pDev ); + +/*=========================================================================*/ +// Driver level asynchronous read functions +/*=========================================================================*/ + +// Resubmit interrupt URB, re-using same values +static int ResubmitIntURB( struct urb * pIntURB ); + +// Read callback +// Put the data in storage and notify anyone waiting for data +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,18 )) +static void ReadCallback( struct urb * pReadURB ); +#else +static void ReadCallback(struct urb *pReadURB, struct pt_regs *regs); +#endif + +// Inturrupt callback +// Data is available, start a read URB +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,18 )) +static void IntCallback( struct urb * pIntURB ); +#else +static void IntCallback(struct urb *pIntURB, struct pt_regs *regs); +#endif + +/*=========================================================================*/ +// Internal read/write functions +/*=========================================================================*/ + +// Start asynchronous read +// Reading client's data store, not device +static int ReadAsync( + sGobiUSBNet * pDev, + u16 clientID, + u16 transactionID, + void (*pCallback)(sGobiUSBNet *, u16, void *), + void * pData ); + +// Notification function for synchronous read +static void UpSem( + sGobiUSBNet * pDev, + u16 clientID, + void * pData ); + +// Start synchronous read +// Reading client's data store, not device +static int ReadSync( + sGobiUSBNet * pDev, + void ** ppOutBuffer, + u16 clientID, + u16 transactionID ); + +// Write callback +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,18 )) +static void WriteSyncCallback( struct urb * pWriteURB ); +#else +static void WriteSyncCallback(struct urb *pWriteURB, struct pt_regs *regs); +#endif + +// Start synchronous write +static int WriteSync( + sGobiUSBNet * pDev, + char * pInWriteBuffer, + int size, + u16 clientID ); + +/*=========================================================================*/ +// Internal memory management functions +/*=========================================================================*/ + +// Create client and allocate memory +static int GetClientID( + sGobiUSBNet * pDev, + u8 serviceType ); + +// Release client and free memory +static void ReleaseClientID( + sGobiUSBNet * pDev, + u16 clientID ); + +// Find this client's memory +static sClientMemList * FindClientMem( + sGobiUSBNet * pDev, + u16 clientID ); + +// Add Data to this client's ReadMem list +static bool AddToReadMemList( + sGobiUSBNet * pDev, + u16 clientID, + u16 transactionID, + void * pData, + u16 dataSize ); + +// Remove data from this client's ReadMem list if it matches +// the specified transaction ID. +static bool PopFromReadMemList( + sGobiUSBNet * pDev, + u16 clientID, + u16 transactionID, + void ** ppData, + u16 * pDataSize ); + +// Add Notify entry to this client's notify List +static bool AddToNotifyList( + sGobiUSBNet * pDev, + u16 clientID, + u16 transactionID, + void (* pNotifyFunct)(sGobiUSBNet *, u16, void *), + void * pData ); + +// Remove first Notify entry from this client's notify list +// and Run function +static bool NotifyAndPopNotifyList( + sGobiUSBNet * pDev, + u16 clientID, + u16 transactionID ); + +// Add URB to this client's URB list +static bool AddToURBList( + sGobiUSBNet * pDev, + u16 clientID, + struct urb * pURB ); + +// Remove URB from this client's URB list +static struct urb * PopFromURBList( + sGobiUSBNet * pDev, + u16 clientID ); + +/*=========================================================================*/ +// Internal userspace wrappers +/*=========================================================================*/ + +// Userspace unlocked ioctl +static long UserspaceunlockedIOCTL( + struct file * pFilp, + unsigned int cmd, + unsigned long arg ); + +/*=========================================================================*/ +// Userspace wrappers +/*=========================================================================*/ + +// Userspace open +static int UserspaceOpen( + struct inode * pInode, + struct file * pFilp ); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,36 )) +// Userspace ioctl +static int UserspaceIOCTL( + struct inode * pUnusedInode, + struct file * pFilp, + unsigned int cmd, + unsigned long arg ); +#endif + +// Userspace close +#define quectel_no_for_each_process +#ifdef quectel_no_for_each_process +static int UserspaceClose( + struct inode * pInode, + struct file * pFilp ); +#else +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,14 )) +static int UserspaceClose( + struct file * pFilp, + fl_owner_t unusedFileTable ); +#else +static int UserspaceClose( struct file * pFilp ); +#endif +#endif + +// Userspace read (synchronous) +static ssize_t UserspaceRead( + struct file * pFilp, + char __user * pBuf, + size_t size, + loff_t * pUnusedFpos ); + +// Userspace write (synchronous) +static ssize_t UserspaceWrite( + struct file * pFilp, + const char __user * pBuf, + size_t size, + loff_t * pUnusedFpos ); + +static unsigned int UserspacePoll( + struct file * pFilp, + struct poll_table_struct * pPollTable ); + +/*=========================================================================*/ +// Driver level client management +/*=========================================================================*/ + +// Check if QMI is ready for use +static bool QMIReady( + sGobiUSBNet * pDev, + u16 timeout ); + +// QMI WDS callback function +static void QMIWDSCallback( + sGobiUSBNet * pDev, + u16 clientID, + void * pData ); + +// Fire off reqests and start async read for QMI WDS callback +static int SetupQMIWDSCallback( sGobiUSBNet * pDev ); + +// Register client, send req and parse MEID response, release client +static int QMIDMSGetMEID( sGobiUSBNet * pDev ); + +// Register client, send req and parse Data format response, release client +static int QMIWDASetDataFormat( sGobiUSBNet * pDev, int qmap_mode, int *rx_urb_size ); +#endif + +// Print Hex data, for debug purposes +void QuecPrintHex( + void * pBuffer, + u16 bufSize ); + +// Sets mDownReason and turns carrier off +void QuecGobiSetDownReason( + sGobiUSBNet * pDev, + u8 reason ); + +// Clear mDownReason and may turn carrier on +void QuecGobiClearDownReason( + sGobiUSBNet * pDev, + u8 reason ); + +// Tests mDownReason and returns whether reason is set +bool QuecGobiTestDownReason( + sGobiUSBNet * pDev, + u8 reason ); + +// Start continuous read "thread" + int QuecStartRead( sGobiUSBNet * pDev ); + +// Kill continuous read "thread" + void QuecKillRead( sGobiUSBNet * pDev ); + +/*=========================================================================*/ +// Initializer and destructor +/*=========================================================================*/ + +// QMI Device initialization function +int QuecRegisterQMIDevice( sGobiUSBNet * pDev ); + +// QMI Device cleanup function +void QuecDeregisterQMIDevice( sGobiUSBNet * pDev ); + +int QuecQMIWDASetDataFormat( sGobiUSBNet * pDev, int qmap_mode, int *rx_urb_size ); + +#define PrintHex QuecPrintHex +#define GobiSetDownReason QuecGobiSetDownReason +#define GobiClearDownReason QuecGobiClearDownReason +#define GobiTestDownReason QuecGobiTestDownReason +#define StartRead QuecStartRead +#define KillRead QuecKillRead +#define RegisterQMIDevice QuecRegisterQMIDevice +#define DeregisterQMIDevice QuecDeregisterQMIDevice diff --git a/package/wwan/driver/quectel_Gobinet/src/Readme.txt b/package/wwan/driver/quectel_Gobinet/src/Readme.txt new file mode 100644 index 000000000..0df201a89 --- /dev/null +++ b/package/wwan/driver/quectel_Gobinet/src/Readme.txt @@ -0,0 +1,78 @@ +Gobi3000 network driver 2011-07-29-1026 + +This readme covers important information concerning +the Gobi Net driver. + +Table of Contents + +1. What's new in this release +2. Known issues +3. Known platform issues + + +------------------------------------------------------------------------------- + +1. WHAT'S NEW + +This Release (Gobi3000 network driver 2011-07-29-1026) +a. Signal the device to leave low power mode on enumeration +b. Add "txQueueLength" parameter, which will set the Tx Queue Length +c. Send SetControlLineState message during driver/device removal +d. Change to new date-based versioning scheme + +Prior Release (Gobi3000 network driver 1.0.60) 06/29/2011 +a. Add UserspacePoll() function, to support select() +b. Fix possible deadlock on GobiUSBNetTXTimeout() +c. Fix memory leak on data transmission + +Prior Release (Gobi3000 network driver 1.0.50) 05/18/2011 +a. Add support for kernels up to 2.6.38 +b. Add support for dynamic interface binding + +Prior Release (Gobi3000 network driver 1.0.40) 02/28/2011 +a. In cases of QMI read errors, discard the error and continue reading. +b. Add "interruptible" parameter, which may be disabled for debugging purposes. + +Prior Release (Gobi3000 network driver 1.0.30) 01/05/2011 +a. Fix rare kernel PANIC if a process terminates while file handle close + or device removal is in progress. + +Prior Release (Gobi3000 network driver 1.0.20) 11/01/2010 +a. Fix possible kernel WARNING if device removed before QCWWANDisconnect(). +b. Fix multiple memory leaks in error cases. + +Prior Release (Gobi3000 network driver 1.0.10) 09/17/2010 +a. Initial release + +------------------------------------------------------------------------------- + +2. KNOWN ISSUES + +No known issues. + +------------------------------------------------------------------------------- + +3. KNOWN PLATFORM ISSUES + +a. Enabling autosuspend: + Autosuspend is supported by the Gobi3000 module and its drivers, + but by default it is not enabled by the open source kernel. As such, + the Gobi3000 module will not enter autosuspend unless the + user specifically turns on autosuspend with the command: + echo auto > /sys/bus/usb/devices/.../power/level +b. Ksoftirq using 100% CPU: + There is a known issue with the open source usbnet driver that can + result in infinite software interrupts. The fix for this is to test + (in the usbnet_bh() function) if the usb_device can submit URBs before + attempting to submit the response URB buffers. +c. NetworkManager does not recognize connection after resume: + After resuming from sleep/hibernate, NetworkManager may not recognize new + network connections by the Gobi device. This is a system issue not specific + to the Gobi device, which may result in dhcp not being run and the default + route not being updated. One way to fix this is to simply restart the + NetworkManager service. + +------------------------------------------------------------------------------- + + + diff --git a/package/wwan/driver/quectel_Gobinet/src/ReleaseNote.txt b/package/wwan/driver/quectel_Gobinet/src/ReleaseNote.txt new file mode 100644 index 000000000..c3015e844 --- /dev/null +++ b/package/wwan/driver/quectel_Gobinet/src/ReleaseNote.txt @@ -0,0 +1,166 @@ +Release Notes + +[V1.6.3] +Date: 9/26/2021 +enhancement: + 1. change version to 1.6.3 +fix: + +[V1.6.2.16] +Date: 9/17/2021 +enhancement: +fix: + 1. add sdx6x platform support + +[V1.6.2.15] +Date: 3/23/2021 +enhancement: +fix: + 1. add sdx12 platform support + +[V1.6.2.14] +Date: 3/18/2021 +enhancement: +fix: + 1. fix kasam: use-after-free when do modem reboot stress test + 2. wait qmi_sync_thread() finish in DeregisterQMIDevice(), usb will disconnect when driver is still in qmi_sync_thread() + +[V1.6.2.13] +Date: 12/31/2020 +enhancement: +fix: + 1. fix quectel-CM open error when driver is still in qmi_sync_thread() but SOC enter sleep. + +[V1.6.2.12] +Date: 12/31/2020 +enhancement: +fix: + 1. for multi-pdn-call, can not ping when usb resume for usb suspend state. + +[V1.6.2.11] +Date: 11/7/2020 +enhancement: + 1. support QUECTEL_QMI_MERGE, for some SOC, control endpoint only support read max 64 bytes QMI. + for QMI that size > 64, we need read serval times, and merge. +fix: + +[V1.6.2.10] +Date: 9/15/2020 +enhancement: +fix: + 1. for X55, fix panic on kernel V2.6 ~ V3.2 + +[V1.6.2.9] +Date: 7/24/2020 +enhancement: +fix: + 1. for X55, fix errors on Big Endian SOC. + +[V1.6.2.8] +Date: 7/2/2020 +enhancement: + 1. support QMAPV5, UL AGG (porting from qmi_wwan_q) +fix: + 1. fix errors kernel V2.6 . + +[V1.6.2.7] +Date: 6/9/2020 +enhancement: +fix: + 1. when send qmi ctl request, clear qmi ctl response which's TID is same + +[V1.6.2.6] +Date: 5/19/2020 +enhancement: + 1. support bridge mode for multi-pdn-call +fix: + +[V1.6.2.5] +Date: 4/26/2020 +enhancement: + 1. fix netcard name as usbX (from ethX) +fix: + +...... + +[Quectel_WCDMA<E_Linux&Android_GobiNet_Driver_V1.5.0] +Date: 2018/04/17 +enhancement:: +1. support EG20&RG500 +2. fix set rx_urb_size as 1520. do not change accroding to MTU + +[Quectel_WCDMA<E_Linux&Android_GobiNet_Driver_V1.4.3] +Date: 2018/04/16 +enhancement:: +1. increase QMAP's rx_urb_size to 32KB + +[Quectel_WCDMA<E_Linux&Android_GobiNet_Driver_V1.4.2] +Date: 2018/04/03 +bug fix: +1. fix qmi client can not be released when quectel-CM killed by kill -9 + +[Quectel_WCDMA<E_Linux&Android_GobiNet_Driver_V1.4.1] +Date: 2018/02/20 +bug fix: +1. fix a compiler error on Kernel lager than 4.11 + +[Quectel_WCDMA<E_Linux&Android_GobiNet_Driver_V1.4.0] +Date: 2018/12/17 +bug fix: +1. fix a USB DMA error when built as GobiNet.ko on Kernel lager than 4.15 + +[Quectel_WCDMA<E_Linux&Android_GobiNet_Driver_V1.3.8] +[Quectel_WCDMA<E_Linux&Android_GobiNet_Driver_V1.3.7] +Date: 2018/09/25 +enhancement: +1. check skb length in tx_fixup functions. +2. when QMAP enabled, set FLAG_RX_ASSEMBLE to advoid 'RX errors' of ifconfig + +[Quectel_WCDMA<E_Linux&Android_GobiNet_Driver_V1.3.6] +Date: 2018/09/11 +enhancement: +1. support EG12 EM12 +2. optimization QMAP source code +3. fix compile errors and warnnings on kernel version 4.15 + +[Quectel_WCDMA<E_Linux&Android_GobiNet_Driver_V1.3.5] +Date: 2018/05/12 +enhancement: +1. provide two method to enable QMAP function. + 1.1 set module parameters 'qmap_mode' to X(1~4) to enable QMAP. + 1.2 ifconfig usb0 down, then 'echo X > /sys/class/usbX/qmap_mode' to enable QMAP + for above two method, X(1) used to enable 'IP Aggregation' and X(2~4) to enable 'IP Mux' +2. support bridge mode, also provide two method to enable bridge mode. + 2.1 set module parameters 'bridge_mode' to 1 to enable bridge mode. + 2.2 'echo 1 > /sys/class/usbX/bridge_mode' to enable bridge mode. + bridge mode setups: + brctl addbr br0; brctl addif br0 eth0; brctl addif usb0; ./quectel-CM; ifconfig br0 up; ifconfig eth0 up + then connect eth0 to PC by ethernet cable. and PC run DHCP tool to obtain network public IP address. + + 'WCDMA<E_QConnectManager_Linux&Android_V1.1.40' and later version is required to use QMAP and bridge mode. + +[Quectel_WCDMA<E_Linux&Android_GobiNet_Driver_V1.3.4] +Date: 2018/05/07 +enhancement: +1. support use 'AT$QCRMCALL=1,1' to setup data call. + when use 'AT$QCRMCALL=1,1', must set module parameters 'qcrmcall_mode' to 1, + and GobiNet Driver will do not tx&rx QMI. + +[Quectel_WCDMA<E_Linux&Android_GobiNet_Driver_V1.3.3] +Date: 2018/04/04 +optimization: +1. optimization QMAP source code + +[Quectel_WCDMA<E_Linux&Android_GobiNet_Driver_V1.3.2] +Date: 2018/03/23 +enhancement: +1. support Qualcomm Mux and Aggregation Protocol (QMAP) + 1.1 IP Mux: GobiNet Driver register multiple netcards, one netcards corresponding to one PDP. + and GobiNet Driver will tx/rx multiple IP packets maybe belong to different PDPs in one URB. + 1.2 IP Aggregation: GobiNet Driver will rx multiple IP packets in one URB, used to increase throughput theoretically by reducing the number of usb interrupts. + the max rx URB size of MDM9x07 is 4KB, the max rx URB size of MDM9x40&SDX20 is 16KB + +[Quectel_WCDMA<E_Linux&Android_GobiNet_Driver_V1.3.1] +Date: 2017/11/20 +enhancement: +1. support BG96 diff --git a/package/wwan/driver/quectel_Gobinet/src/Structs.h b/package/wwan/driver/quectel_Gobinet/src/Structs.h new file mode 100644 index 000000000..d5a78e7af --- /dev/null +++ b/package/wwan/driver/quectel_Gobinet/src/Structs.h @@ -0,0 +1,529 @@ +/*=========================================================================== +FILE: + Structs.h + +DESCRIPTION: + Declaration of structures used by the Qualcomm Linux USB Network driver + +FUNCTIONS: + none + +Copyright (c) 2011, Code Aurora Forum. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Code Aurora Forum nor + the names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. + + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. +===========================================================================*/ + +//--------------------------------------------------------------------------- +// Pragmas +//--------------------------------------------------------------------------- +#pragma once + +//--------------------------------------------------------------------------- +// Include Files +//--------------------------------------------------------------------------- +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define QUECTEL_WWAN_QMAP 4 //MAX is 7 +#ifdef QUECTEL_WWAN_QMAP +#define QUECTEL_QMAP_MUX_ID 0x81 +#endif + +//#define QUECTEL_QMI_MERGE + +#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) +#define QUECTEL_BRIDGE_MODE +#endif + +#if (LINUX_VERSION_CODE <= KERNEL_VERSION( 2,6,21 )) +static inline void skb_reset_mac_header(struct sk_buff *skb) +{ + skb->mac.raw = skb->data; +} +#endif + +#if (LINUX_VERSION_CODE <= KERNEL_VERSION( 2,6,22 )) +#define bool u8 +#ifndef URB_FREE_BUFFER +#define URB_FREE_BUFFER_BY_SELF //usb_free_urb will not free, should free by self +#define URB_FREE_BUFFER 0x0100 /* Free transfer buffer with the URB */ +#endif + +/** + * usb_endpoint_type - get the endpoint's transfer type + * @epd: endpoint to be checked + * + * Returns one of USB_ENDPOINT_XFER_{CONTROL, ISOC, BULK, INT} according + * to @epd's transfer type. + */ +static inline int usb_endpoint_type(const struct usb_endpoint_descriptor *epd) +{ + return epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; +} +#endif + +#if (LINUX_VERSION_CODE <= KERNEL_VERSION( 2,6,18 )) +/** + * usb_endpoint_dir_in - check if the endpoint has IN direction + * @epd: endpoint to be checked + * + * Returns true if the endpoint is of type IN, otherwise it returns false. + */ +static inline int usb_endpoint_dir_in(const struct usb_endpoint_descriptor *epd) +{ + return ((epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN); +} + +/** + * usb_endpoint_dir_out - check if the endpoint has OUT direction + * @epd: endpoint to be checked + * + * Returns true if the endpoint is of type OUT, otherwise it returns false. + */ +static inline int usb_endpoint_dir_out( + const struct usb_endpoint_descriptor *epd) +{ + return ((epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_OUT); +} + +/** + * usb_endpoint_xfer_int - check if the endpoint has interrupt transfer type + * @epd: endpoint to be checked + * + * Returns true if the endpoint is of type interrupt, otherwise it returns + * false. + */ +static inline int usb_endpoint_xfer_int( + const struct usb_endpoint_descriptor *epd) +{ + return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == + USB_ENDPOINT_XFER_INT); +} + +static inline int usb_autopm_set_interface(struct usb_interface *intf) +{ return 0; } + +static inline int usb_autopm_get_interface(struct usb_interface *intf) +{ return 0; } + +static inline int usb_autopm_get_interface_async(struct usb_interface *intf) +{ return 0; } + +static inline void usb_autopm_put_interface(struct usb_interface *intf) +{ } +static inline void usb_autopm_put_interface_async(struct usb_interface *intf) +{ } +static inline void usb_autopm_enable(struct usb_interface *intf) +{ } +static inline void usb_autopm_disable(struct usb_interface *intf) +{ } +static inline void usb_mark_last_busy(struct usb_device *udev) +{ } +#endif + +#if (LINUX_VERSION_CODE <= KERNEL_VERSION( 2,6,24 )) + #include "usbnet.h" +#else + #include +#endif + +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,25 )) + #include +#else + #include +#endif + +// Used in recursion, defined later below +struct sGobiUSBNet; + + +#if defined(QUECTEL_WWAN_QMAP) +#define QUECTEL_UL_DATA_AGG 1 + +#if defined(QUECTEL_UL_DATA_AGG) +struct ul_agg_ctx { + /* QMIWDS_ADMIN_SET_DATA_FORMAT_RESP TLV_0x17 and TLV_0x18 */ + uint ul_data_aggregation_max_datagrams; //UplinkDataAggregationMaxDatagramsTlv + uint ul_data_aggregation_max_size; //UplinkDataAggregationMaxSizeTlv + uint dl_minimum_padding; +}; +#endif +#endif + +/*=========================================================================*/ +// Struct sReadMemList +// +// Structure that defines an entry in a Read Memory linked list +/*=========================================================================*/ +typedef struct sReadMemList +{ + /* Data buffer */ + void * mpData; + + /* Transaction ID */ + u16 mTransactionID; + + /* Size of data buffer */ + u16 mDataSize; + + /* Next entry in linked list */ + struct sReadMemList * mpNext; + +} sReadMemList; + +/*=========================================================================*/ +// Struct sNotifyList +// +// Structure that defines an entry in a Notification linked list +/*=========================================================================*/ +typedef struct sNotifyList +{ + /* Function to be run when data becomes available */ + void (* mpNotifyFunct)(struct sGobiUSBNet *, u16, void *); + + /* Transaction ID */ + u16 mTransactionID; + + /* Data to provide as parameter to mpNotifyFunct */ + void * mpData; + + /* Next entry in linked list */ + struct sNotifyList * mpNext; + +} sNotifyList; + +/*=========================================================================*/ +// Struct sURBList +// +// Structure that defines an entry in a URB linked list +/*=========================================================================*/ +typedef struct sURBList +{ + /* The current URB */ + struct urb * mpURB; + + /* Next entry in linked list */ + struct sURBList * mpNext; + +} sURBList; + +/*=========================================================================*/ +// Struct sClientMemList +// +// Structure that defines an entry in a Client Memory linked list +// Stores data specific to a Service Type and Client ID +/*=========================================================================*/ +typedef struct sClientMemList +{ + /* Client ID for this Client */ + u16 mClientID; + + /* Linked list of Read entries */ + /* Stores data read from device before sending to client */ + sReadMemList * mpList; + + /* Linked list of Notification entries */ + /* Stores notification functions to be run as data becomes + available or the device is removed */ + sNotifyList * mpReadNotifyList; + + /* Linked list of URB entries */ + /* Stores pointers to outstanding URBs which need canceled + when the client is deregistered or the device is removed */ + sURBList * mpURBList; + + /* Next entry in linked list */ + struct sClientMemList * mpNext; + + /* Wait queue object for poll() */ + wait_queue_head_t mWaitQueue; + +} sClientMemList; + +/*=========================================================================*/ +// Struct sURBSetupPacket +// +// Structure that defines a USB Setup packet for Control URBs +// Taken from USB CDC specifications +/*=========================================================================*/ +typedef struct sURBSetupPacket +{ + /* Request type */ + u8 mRequestType; + + /* Request code */ + u8 mRequestCode; + + /* Value */ + u16 mValue; + + /* Index */ + u16 mIndex; + + /* Length of Control URB */ + u16 mLength; + +} sURBSetupPacket; + +// Common value for sURBSetupPacket.mLength +#define DEFAULT_READ_URB_LENGTH 0x1000 + +#ifdef QUECTEL_QMI_MERGE +#define MERGE_PACKET_IDENTITY 0x2c7c +#define MERGE_PACKET_VERSION 0x0001 +#define MERGE_PACKET_MAX_PAYLOAD_SIZE 56 +typedef struct sQMIMsgHeader { + u16 idenity; + u16 version; + u16 cur_len; + u16 total_len; +} sQMIMsgHeader; + +typedef struct sQMIMsgPacket { + sQMIMsgHeader header; + u16 len; + char buf[DEFAULT_READ_URB_LENGTH]; +} sQMIMsgPacket; +#endif + +#ifdef CONFIG_PM +#if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,29 )) +/*=========================================================================*/ +// Struct sAutoPM +// +// Structure used to manage AutoPM thread which determines whether the +// device is in use or may enter autosuspend. Also submits net +// transmissions asynchronously. +/*=========================================================================*/ +typedef struct sAutoPM +{ + /* Thread for atomic autopm function */ + struct task_struct * mpThread; + + /* Signal for completion when it's time for the thread to work */ + struct completion mThreadDoWork; + + /* Time to exit? */ + bool mbExit; + + /* List of URB's queued to be sent to the device */ + sURBList * mpURBList; + + /* URB list lock (for adding and removing elements) */ + spinlock_t mURBListLock; + + /* Length of the URB list */ + atomic_t mURBListLen; + + /* Active URB */ + struct urb * mpActiveURB; + + /* Active URB lock (for adding and removing elements) */ + spinlock_t mActiveURBLock; + + /* Duplicate pointer to USB device interface */ + struct usb_interface * mpIntf; + +} sAutoPM; +#endif +#endif /* CONFIG_PM */ + +/*=========================================================================*/ +// Struct sQMIDev +// +// Structure that defines the data for the QMI device +/*=========================================================================*/ +typedef struct sQMIDev +{ + /* Device number */ + dev_t mDevNum; + + /* Device class */ + struct class * mpDevClass; + + /* cdev struct */ + struct cdev mCdev; + + /* is mCdev initialized? */ + bool mbCdevIsInitialized; + + /* Pointer to read URB */ + struct urb * mpReadURB; + +//#define READ_QMI_URB_ERROR +#ifdef READ_QMI_URB_ERROR + struct timer_list mReadUrbTimer; +#endif + +#ifdef QUECTEL_QMI_MERGE + sQMIMsgPacket * mpQmiMsgPacket; +#endif + + /* Read setup packet */ + sURBSetupPacket * mpReadSetupPacket; + + /* Read buffer attached to current read URB */ + void * mpReadBuffer; + + /* Inturrupt URB */ + /* Used to asynchronously notify when read data is available */ + struct urb * mpIntURB; + + /* Buffer used by Inturrupt URB */ + void * mpIntBuffer; + + /* Pointer to memory linked list for all clients */ + sClientMemList * mpClientMemList; + + /* Spinlock for client Memory entries */ + spinlock_t mClientMemLock; + + /* Transaction ID associated with QMICTL "client" */ + atomic_t mQMICTLTransactionID; + +} sQMIDev; + +typedef struct { + u32 qmap_enabled; + u32 dl_data_aggregation_max_datagrams; + u32 dl_data_aggregation_max_size ; + u32 ul_data_aggregation_max_datagrams; + u32 ul_data_aggregation_max_size; + u32 dl_minimum_padding; +} QMAP_SETTING; + +/*=========================================================================*/ +// Struct sGobiUSBNet +// +// Structure that defines the data associated with the Qualcomm USB device +/*=========================================================================*/ +typedef struct sGobiUSBNet +{ + atomic_t refcount; + + /* Net device structure */ + struct usbnet * mpNetDev; +#ifdef QUECTEL_WWAN_QMAP + unsigned link_state; + int qmap_mode; + int qmap_size; + int qmap_version; + struct net_device *mpQmapNetDev[QUECTEL_WWAN_QMAP]; + struct tasklet_struct txq; + + QMAP_SETTING qmap_settings; +#if defined(QUECTEL_UL_DATA_AGG) + struct ul_agg_ctx agg_ctx; +#endif + +#ifdef QUECTEL_BRIDGE_MODE + int m_qmap_bridge_mode[QUECTEL_WWAN_QMAP]; +#endif +#endif + +#if 1 //def DATA_MODE_RP + bool mbMdm9x07; + bool mbMdm9x06; //for BG96 + /* QMI "device" work in IP Mode or ETH Mode */ + bool mbRawIPMode; +#ifdef QUECTEL_BRIDGE_MODE + int m_bridge_mode; + uint m_bridge_ipv4; + unsigned char mHostMAC[6]; +#endif + int m_qcrmcall_mode; +#endif + + struct completion mQMIReadyCompletion; + bool mbQMIReady; + bool mbProbeDone; + bool mbQMISyncIng; + + /* Usb device interface */ + struct usb_interface * mpIntf; + + /* Pointers to usbnet_open and usbnet_stop functions */ + int (* mpUSBNetOpen)(struct net_device *); + int (* mpUSBNetStop)(struct net_device *); + + /* Reason(s) why interface is down */ + /* Used by Gobi*DownReason */ + unsigned long mDownReason; +#define NO_NDIS_CONNECTION 0 +#define CDC_CONNECTION_SPEED 1 +#define DRIVER_SUSPENDED 2 +#define NET_IFACE_STOPPED 3 + + /* QMI "device" status */ + bool mbQMIValid; + + bool mbDeregisterQMIDevice; + + /* QMI "device" memory */ + sQMIDev mQMIDev; + + /* Device MEID */ + char mMEID[14]; + struct hrtimer timer; + struct tasklet_struct bh; + unsigned long + pending_num : 8, + pending_size : 16; + struct sk_buff *pending_pool[16]; + +#ifdef CONFIG_PM + #if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,29 )) + /* AutoPM thread */ + sAutoPM mAutoPM; +#endif +#endif /* CONFIG_PM */ +} sGobiUSBNet; + +/*=========================================================================*/ +// Struct sQMIFilpStorage +// +// Structure that defines the storage each file handle contains +// Relates the file handle to a client +/*=========================================================================*/ +typedef struct sQMIFilpStorage +{ + /* Client ID */ + u16 mClientID; + + /* Device pointer */ + sGobiUSBNet * mpDev; + +} sQMIFilpStorage; + diff --git a/package/wwan/driver/quectel_MHI/Makefile b/package/wwan/driver/quectel_MHI/Makefile new file mode 100755 index 000000000..bf6f718a0 --- /dev/null +++ b/package/wwan/driver/quectel_MHI/Makefile @@ -0,0 +1,47 @@ +# +# Copyright (C) 2015 OpenWrt.org +# +# This is free software, licensed under the GNU General Public License v2. +# See /LICENSE for more information. +# + +include $(TOPDIR)/rules.mk + +PKG_NAME:=pcie_mhi +PKG_VERSION:=3.2 +PKG_RELEASE:=1 + +include $(INCLUDE_DIR)/kernel.mk +include $(INCLUDE_DIR)/package.mk + +define KernelPackage/pcie_mhi + SUBMENU:=PCIE Support + TITLE:=Kernel pcie driver for MHI device + DEPENDS:=+pciids +pciutils +quectel-CM-5G + FILES:=$(PKG_BUILD_DIR)/pcie_mhi.ko + AUTOLOAD:=$(call AutoLoad,90,pcie_mhi) +endef + +define KernelPackage/pcie_mhi/description + Kernel module for register a custom pciemhi platform device. +endef + +MAKE_OPTS:= \ + ARCH="$(LINUX_KARCH)" \ + CROSS_COMPILE="$(TARGET_CROSS)" \ + CXXFLAGS="$(TARGET_CXXFLAGS)" \ + M="$(PKG_BUILD_DIR)" \ + $(EXTRA_KCONFIG) + +define Build/Prepare + mkdir -p $(PKG_BUILD_DIR) + $(CP) ./src/* $(PKG_BUILD_DIR)/ +endef + +define Build/Compile + $(MAKE) -C "$(LINUX_DIR)" \ + $(MAKE_OPTS) \ + modules +endef + +$(eval $(call KernelPackage,pcie_mhi)) diff --git a/package/wwan/driver/quectel_MHI/src/Makefile b/package/wwan/driver/quectel_MHI/src/Makefile new file mode 100644 index 000000000..1b2c1f585 --- /dev/null +++ b/package/wwan/driver/quectel_MHI/src/Makefile @@ -0,0 +1,34 @@ +#ccflags-y += -g +obj-m += pcie_mhi.o +pcie_mhi-objs := core/mhi_init.o core/mhi_main.o core/mhi_pm.o core/mhi_boot.o core/mhi_dtr.o controllers/mhi_qti.o +pcie_mhi-objs += devices/mhi_uci.o + +ifeq (1,1) +pcie_mhi-objs += devices/mhi_netdev_quectel.o +else +pcie_mhi-objs += devices/mhi_netdev.o +pcie_mhi-objs += devices/rmnet_handler.o +endif + +PWD := $(shell pwd) +ifeq ($(ARCH),) +ARCH := $(shell uname -m) +endif +ifeq ($(CROSS_COMPILE),) +CROSS_COMPILE := +endif +ifeq ($(KDIR),) +KDIR := /lib/modules/$(shell uname -r)/build +endif + +pcie_mhi: clean + $(MAKE) ARCH=${ARCH} CROSS_COMPILE=${CROSS_COMPILE} -C $(KDIR) M=$(PWD) modules + #cp pcie_mhi.ko /tftpboot/ + +clean: + $(MAKE) ARCH=${ARCH} CROSS_COMPILE=${CROSS_COMPILE} -C $(KDIR) M=$(PWD) clean + find . -name *.o.ur-safe | xargs rm -f + +install: pcie_mhi + sudo cp pcie_mhi.ko /lib/modules/${shell uname -r}/kernel/drivers/pci/ + sudo depmod diff --git a/package/wwan/driver/quectel_MHI/src/README b/package/wwan/driver/quectel_MHI/src/README new file mode 100644 index 000000000..2bb6ff0a9 --- /dev/null +++ b/package/wwan/driver/quectel_MHI/src/README @@ -0,0 +1,36 @@ +1. porting pcie_mhi driver as next + +$ git diff drivers/Makefile +diff --git a/drivers/Makefile b/drivers/Makefile +index 77fbc52..e45837e 100644 +--- a/drivers/Makefile ++++ b/drivers/Makefile +@@ -184,3 +184,4 @@ obj-$(CONFIG_FPGA) += fpga/ + obj-$(CONFIG_FSI) += fsi/ + obj-$(CONFIG_TEE) += tee/ + obj-$(CONFIG_MULTIPLEXER) += mux/ ++obj-y += pcie_mhi/ + +$ tree drivers/pcie_mhi/ -L 1 +drivers/pcie_mhi/ + controllers + core + devices + Makefile + +2. check RG500 attach pcie_mhi driver successful + +root@OpenWrt:/# lspci +00:00.0 Class 0604: 17cb:0302 +01:00.0 Class ff00: 17cb:0306 + +root@OpenWrt:~# dmesg | grep mhi +[ 138.483252] mhi_init Quectel_Linux_PCIE_MHI_Driver_V1.3.0.6 +[ 138.492350] mhi_pci_probe pci_dev->name = 0000:01:00.0, domain=0, bus=1, slot=0, vendor=17CB, device=0306 + +3. how to use, see next logs + +log/QXDM_OVER_PCIE.txt +log/AT_OVER_PCIE.txt +log/MBIM_OVER_PCIE.txt +log/QMI_OVER_PCIE.txt diff --git a/package/wwan/driver/quectel_MHI/src/ReleaseNote.txt b/package/wwan/driver/quectel_MHI/src/ReleaseNote.txt new file mode 100644 index 000000000..d923b98db --- /dev/null +++ b/package/wwan/driver/quectel_MHI/src/ReleaseNote.txt @@ -0,0 +1,103 @@ +Release Notes + +[V1.3.4] +Date: 12/8/2022 +enhancement: + 1. only allow to enable autosuspend when module is in MHI_EE_AMSS + 2. show pcie link speed and width when driver probe + 3. check pcie link status by read pcie vid and pid when driver probe, + if pcie link is down, return -EIO + 4. support RM520 (1eac:1004) + 5. support qmap command packet +fix: + 1. fix tx queue is wrong stop when do uplink TPUT + 2. fix after QFirehose, module fail to bootup at very small probability + 3. mhi uci add mutex lock for concurrent reads/writes + +[V1.3.3] +Date: 30/6/2022 +enhancement: + 1. remove one un-necessary kmalloc when do qfirehose + 2. support mhi monitor (like usbmon), usage: cat /sys/kernel/debug/mhi_q/0306_00\:01.00/mhimon + 3. set ring size of event 0 to 256 (from 1024), required by x6x + 4. support PCIE local network card mhi_swip0 (chan 46/47), default disabled + 5. porting IPQ5018 mhi rate controll code from spf11.5 + 6. set pcie rmnet download max qmap packet size to 15KB (same to IPQ MHI Driver) + 7. support set different mac address for rmnet net card + 8. when mhi netdev fail to malloc, use delay_work instead work + 9. optimize code for 'when driver load, modem is still in MHI_EE_PTHRU' +fix: + 1. Fix not synchronize access rp/wp when mhi_queue_xxx and mhi_process_xxx_ring run on different CPU + 2. set dma mask when driver probe, some SOC like rpi_4 need it + +[V1.3.2] +Date: 12/16/2021 +enhancement: + 1. support Linux Kernel V5.14 + 2. mhi_netdev_quectel.c do not print log in softirq context + +[V1.3.1] +Date: 9/26/2021 +enhancement: +fix: + +[V1.3.0.19] +Date: 9/18/2021 +enhancement: + 1. support sdx62 (17cb:0308) + 2. support IPQ5018's NSS + 3. use 'qsdk/qca/src/data-kernel/drivers/rmnet-nss/rmnet_nss.c' instead myself rmnet_nss.c + and pcie_mhi.ko must load after then rmnet_nss.ko + 4. allow bhi irq is not 0 (for ipq5018) +fix: + +[V1.3.0.18] +Date: 4/14/2021 +enhancement: + 1. support mbim multiple call, usage: + # insmod pcie_mhi.ko mhi_mbim_enabeld=1 qmap_mode=4 + # quectel-mbim-proxy -d /dev/mhi_MBIM & + # quectel-CM -n X +fix: + +[V1.3.0.17] +Date: 3/11/2021 +enhancement: +fix: + 1. fix CPU loading very high when TPUT test when only one MSI interrupt + 2. fix error on latest X24 modem + +[V1.3.0.16] +Date: 11/18/2020 +enhancement: +fix: + 1. add ring size to 32, for in-bound chan, if one ring is full, modem will not generate MSI interrupt for all chan + +[V1.3.0.15] +Date: 10/30/2020 +enhancement: + 1. support multi-modems, named as /dev/mhi_X +fix: + 1. fix compile error on kernel v5.8 + +[V1.3.0.14] +Date: 10/9/2020 +enhancement: + 1. suppport EM120&EM160 +fix: + 1. fix compile error on kernel v5.6 + 2. support runtime suspend + +[V1.3.0.13] +Date: 9/7/2020 +enhancement: + 1. suppport EM120&EM160 +fix: + 1. fix error on X55 + PCIE2.0(e.g IPQ4019) + 2. support runtime suspend + +[V1.3.0.12] +Date: 7/7/2020 +enhancement: + 1. suppport create only none netcard (enabled by marco MHI_NETDEV_ONE_CARD_MODE), +fix: \ No newline at end of file diff --git a/package/wwan/driver/quectel_MHI/src/controllers/Kconfig b/package/wwan/driver/quectel_MHI/src/controllers/Kconfig new file mode 100644 index 000000000..e18b38b25 --- /dev/null +++ b/package/wwan/driver/quectel_MHI/src/controllers/Kconfig @@ -0,0 +1,13 @@ +menu "MHI controllers" + +config MHI_QTI + tristate "MHI QTI" + depends on MHI_BUS + help + If you say yes to this option, MHI bus support for QTI modem chipsets + will be enabled. QTI PCIe based modems uses MHI as the communication + protocol. MHI control driver is the bus master for such modems. As the + bus master driver, it oversees power management operations such as + suspend, resume, powering on and off the device. + +endmenu diff --git a/package/wwan/driver/quectel_MHI/src/controllers/Makefile b/package/wwan/driver/quectel_MHI/src/controllers/Makefile new file mode 100644 index 000000000..ab9ec55fb --- /dev/null +++ b/package/wwan/driver/quectel_MHI/src/controllers/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_MHI_QTI) += mhi_qti.o mhi_arch_qti.o diff --git a/package/wwan/driver/quectel_MHI/src/controllers/mhi_arch_qti.c b/package/wwan/driver/quectel_MHI/src/controllers/mhi_arch_qti.c new file mode 100644 index 000000000..de19d94c0 --- /dev/null +++ b/package/wwan/driver/quectel_MHI/src/controllers/mhi_arch_qti.c @@ -0,0 +1,275 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../core/mhi.h" +#include "mhi_qti.h" + +struct arch_info { + struct mhi_dev *mhi_dev; + struct msm_bus_scale_pdata *msm_bus_pdata; + u32 bus_client; + struct pci_saved_state *pcie_state; + struct pci_saved_state *ref_pcie_state; + struct dma_iommu_mapping *mapping; +}; + +struct mhi_bl_info { + struct mhi_device *mhi_device; + async_cookie_t cookie; + void *ipc_log; +}; + +/* ipc log markings */ +#define DLOG "Dev->Host: " +#define HLOG "Host: " + +#ifdef CONFIG_MHI_DEBUG + +#define MHI_IPC_LOG_PAGES (100) +enum MHI_DEBUG_LEVEL mhi_ipc_log_lvl = MHI_MSG_LVL_VERBOSE; + +#else + +#define MHI_IPC_LOG_PAGES (10) +enum MHI_DEBUG_LEVEL mhi_ipc_log_lvl = MHI_MSG_LVL_ERROR; + +#endif + +static int mhi_arch_set_bus_request(struct mhi_controller *mhi_cntrl, int index) +{ + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + struct arch_info *arch_info = mhi_dev->arch_info; + + MHI_LOG("Setting bus request to index %d\n", index); + + if (arch_info->bus_client) + return msm_bus_scale_client_update_request( + arch_info->bus_client, + index); + + /* default return success */ + return 0; +} + +static void mhi_bl_dl_cb(struct mhi_device *mhi_dev, + struct mhi_result *mhi_result) +{ + struct mhi_bl_info *mhi_bl_info = mhi_device_get_devdata(mhi_dev); + char *buf = mhi_result->buf_addr; + + /* force a null at last character */ + buf[mhi_result->bytes_xferd - 1] = 0; + + ipc_log_string(mhi_bl_info->ipc_log, "%s %s", DLOG, buf); +} + +static void mhi_bl_dummy_cb(struct mhi_device *mhi_dev, + struct mhi_result *mhi_result) +{ +} + +static void mhi_bl_remove(struct mhi_device *mhi_dev) +{ + struct mhi_bl_info *mhi_bl_info = mhi_device_get_devdata(mhi_dev); + + ipc_log_string(mhi_bl_info->ipc_log, HLOG "Received Remove notif.\n"); + + /* wait for boot monitor to exit */ + async_synchronize_cookie(mhi_bl_info->cookie + 1); +} + +static void mhi_bl_boot_monitor(void *data, async_cookie_t cookie) +{ + struct mhi_bl_info *mhi_bl_info = data; + struct mhi_device *mhi_device = mhi_bl_info->mhi_device; + struct mhi_controller *mhi_cntrl = mhi_device->mhi_cntrl; + /* 15 sec timeout for booting device */ + const u32 timeout = msecs_to_jiffies(15000); + + /* wait for device to enter boot stage */ + wait_event_timeout(mhi_cntrl->state_event, mhi_cntrl->ee == MHI_EE_AMSS + || mhi_cntrl->ee == MHI_EE_DISABLE_TRANSITION, + timeout); + + if (mhi_cntrl->ee == MHI_EE_AMSS) { + ipc_log_string(mhi_bl_info->ipc_log, HLOG + "Device successfully booted to mission mode\n"); + + mhi_unprepare_from_transfer(mhi_device); + } else { + ipc_log_string(mhi_bl_info->ipc_log, HLOG + "Device failed to boot to mission mode, ee = %s\n", + TO_MHI_EXEC_STR(mhi_cntrl->ee)); + } +} + +static int mhi_bl_probe(struct mhi_device *mhi_dev, + const struct mhi_device_id *id) +{ + char node_name[32]; + struct mhi_bl_info *mhi_bl_info; + + mhi_bl_info = devm_kzalloc(&mhi_dev->dev, sizeof(*mhi_bl_info), + GFP_KERNEL); + if (!mhi_bl_info) + return -ENOMEM; + + snprintf(node_name, sizeof(node_name), "mhi_bl_%04x_%02u.%02u.%02u", + mhi_dev->dev_id, mhi_dev->domain, mhi_dev->bus, mhi_dev->slot); + + mhi_bl_info->ipc_log = ipc_log_context_create(MHI_IPC_LOG_PAGES, + node_name, 0); + if (!mhi_bl_info->ipc_log) + return -EINVAL; + + mhi_bl_info->mhi_device = mhi_dev; + mhi_device_set_devdata(mhi_dev, mhi_bl_info); + + ipc_log_string(mhi_bl_info->ipc_log, HLOG + "Entered SBL, Session ID:0x%x\n", + mhi_dev->mhi_cntrl->session_id); + + /* start a thread to monitor entering mission mode */ + mhi_bl_info->cookie = async_schedule(mhi_bl_boot_monitor, mhi_bl_info); + + return 0; +} + +static const struct mhi_device_id mhi_bl_match_table[] = { + { .chan = "BL" }, + {}, +}; + +static struct mhi_driver mhi_bl_driver = { + .id_table = mhi_bl_match_table, + .remove = mhi_bl_remove, + .probe = mhi_bl_probe, + .ul_xfer_cb = mhi_bl_dummy_cb, + .dl_xfer_cb = mhi_bl_dl_cb, + .driver = { + .name = "MHI_BL", + .owner = THIS_MODULE, + }, +}; + +int mhi_arch_pcie_init(struct mhi_controller *mhi_cntrl) +{ + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + struct arch_info *arch_info = mhi_dev->arch_info; + char node[32]; + + if (!arch_info) { + arch_info = devm_kzalloc(&mhi_dev->pci_dev->dev, + sizeof(*arch_info), GFP_KERNEL); + if (!arch_info) + return -ENOMEM; + + mhi_dev->arch_info = arch_info; + + snprintf(node, sizeof(node), "mhi_%04x_%02u.%02u.%02u", + mhi_cntrl->dev_id, mhi_cntrl->domain, mhi_cntrl->bus, + mhi_cntrl->slot); + mhi_cntrl->log_buf = ipc_log_context_create(MHI_IPC_LOG_PAGES, + node, 0); + mhi_cntrl->log_lvl = mhi_ipc_log_lvl; + + /* save reference state for pcie config space */ + arch_info->ref_pcie_state = pci_store_saved_state( + mhi_dev->pci_dev); + + mhi_driver_register(&mhi_bl_driver); + } + + return mhi_arch_set_bus_request(mhi_cntrl, 1); +} + +void mhi_arch_pcie_deinit(struct mhi_controller *mhi_cntrl) +{ + mhi_arch_set_bus_request(mhi_cntrl, 0); +} + +int mhi_arch_link_off(struct mhi_controller *mhi_cntrl, bool graceful) +{ + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + struct arch_info *arch_info = mhi_dev->arch_info; + struct pci_dev *pci_dev = mhi_dev->pci_dev; + int ret; + + MHI_LOG("Entered\n"); + + if (graceful) { + pci_clear_master(pci_dev); + ret = pci_save_state(mhi_dev->pci_dev); + if (ret) { + MHI_ERR("Failed with pci_save_state, ret:%d\n", ret); + return ret; + } + + arch_info->pcie_state = pci_store_saved_state(pci_dev); + pci_disable_device(pci_dev); + } + + /* + * We will always attempt to put link into D3hot, however + * link down may have happened due to error fatal, so + * ignoring the return code + */ + pci_set_power_state(pci_dev, PCI_D3hot); + + /* release the resources */ + mhi_arch_set_bus_request(mhi_cntrl, 0); + + MHI_LOG("Exited\n"); + + return 0; +} + +int mhi_arch_link_on(struct mhi_controller *mhi_cntrl) +{ + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + struct arch_info *arch_info = mhi_dev->arch_info; + struct pci_dev *pci_dev = mhi_dev->pci_dev; + int ret; + + MHI_LOG("Entered\n"); + + /* request resources and establish link trainning */ + ret = mhi_arch_set_bus_request(mhi_cntrl, 1); + if (ret) + MHI_LOG("Could not set bus frequency, ret:%d\n", ret); + + ret = pci_set_power_state(pci_dev, PCI_D0); + if (ret) { + MHI_ERR("Failed to set PCI_D0 state, ret:%d\n", ret); + return ret; + } + + ret = pci_enable_device(pci_dev); + if (ret) { + MHI_ERR("Failed to enable device, ret:%d\n", ret); + return ret; + } + + ret = pci_load_and_free_saved_state(pci_dev, &arch_info->pcie_state); + if (ret) + MHI_LOG("Failed to load saved cfg state\n"); + + pci_restore_state(pci_dev); + pci_set_master(pci_dev); + + MHI_LOG("Exited\n"); + + return 0; +} diff --git a/package/wwan/driver/quectel_MHI/src/controllers/mhi_qcom.c b/package/wwan/driver/quectel_MHI/src/controllers/mhi_qcom.c new file mode 100644 index 000000000..df6ce193c --- /dev/null +++ b/package/wwan/driver/quectel_MHI/src/controllers/mhi_qcom.c @@ -0,0 +1,715 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../core/mhi.h" +#include "mhi_qcom.h" + +#if 1 +#ifndef PCI_IRQ_MSI +#define PCI_IRQ_MSI (1 << 1) /* Allow MSI interrupts */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION( 3,10,53 )) +int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec) +{ + int nvec = maxvec; + int rc; + + if (maxvec < minvec) + return -ERANGE; + + do { + rc = pci_enable_msi_block(dev, nvec); + if (rc < 0) { + return rc; + } else if (rc > 0) { + if (rc < minvec) + return -ENOSPC; + nvec = rc; + } + } while (rc); + + return nvec; +} +#endif + +static int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs, + unsigned int max_vecs, unsigned int flags) +{ + return pci_enable_msi_range(dev, min_vecs, max_vecs); +} + +static void pci_free_irq_vectors(struct pci_dev *dev) +{ + pci_disable_msi(dev); +} + +static int pci_irq_vector(struct pci_dev *dev, unsigned int nr) +{ + return dev->irq + nr; +} +#endif +#endif + +static struct pci_device_id mhi_pcie_device_id[] = { + {PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0303)}, //SDX20 + {PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0304)}, //SDX24 + {PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0305)}, + {PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0306)}, //SDX55 + {PCI_DEVICE(0x2C7C, 0x0512)}, + {PCI_DEVICE(MHI_PCIE_VENDOR_ID, MHI_PCIE_DEBUG_ID)}, + {0}, +}; + +MODULE_DEVICE_TABLE(pci, mhi_pcie_device_id); + +static struct pci_driver mhi_pcie_driver; + +void mhi_deinit_pci_dev(struct mhi_controller *mhi_cntrl) +{ + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + struct pci_dev *pci_dev = mhi_dev->pci_dev; + + pci_free_irq_vectors(pci_dev); + iounmap(mhi_cntrl->regs); + mhi_cntrl->regs = NULL; + pci_clear_master(pci_dev); + pci_release_region(pci_dev, mhi_dev->resn); + pci_disable_device(pci_dev); +} + +static int mhi_init_pci_dev(struct mhi_controller *mhi_cntrl) +{ + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + struct pci_dev *pci_dev = mhi_dev->pci_dev; + int ret; + resource_size_t start, len; + int i; + + mhi_dev->resn = MHI_PCI_BAR_NUM; + ret = pci_assign_resource(pci_dev, mhi_dev->resn); + if (ret) { + MHI_ERR("Error assign pci resources, ret:%d\n", ret); + return ret; + } + + ret = pci_enable_device(pci_dev); + if (ret) { + MHI_ERR("Error enabling device, ret:%d\n", ret); + goto error_enable_device; + } + + ret = pci_request_region(pci_dev, mhi_dev->resn, "mhi"); + if (ret) { + MHI_ERR("Error pci_request_region, ret:%d\n", ret); + goto error_request_region; + } + + pci_set_master(pci_dev); + + start = pci_resource_start(pci_dev, mhi_dev->resn); + len = pci_resource_len(pci_dev, mhi_dev->resn); + mhi_cntrl->regs = ioremap_nocache(start, len); + MHI_LOG("mhi_cntrl->regs = %p\n", mhi_cntrl->regs); + if (!mhi_cntrl->regs) { + MHI_ERR("Error ioremap region\n"); + goto error_ioremap; + } + + ret = pci_alloc_irq_vectors(pci_dev, 1, mhi_cntrl->msi_required, PCI_IRQ_MSI); + if (IS_ERR_VALUE((ulong)ret) || ret < mhi_cntrl->msi_required) { + if (ret == -ENOSPC) { + /* imx_3.14.52_1.1.0_ga + diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c + index f06e8f0..6a9614f 100644 + --- a/drivers/pci/host/pcie-designware.c + +++ b/drivers/pci/host/pcie-designware.c + @@ -376,6 +376,13 @@ static int dw_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev, + if (msgvec > 5) + msgvec = 0; + + +#if 1 //Add by Quectel 20190419 + + if (msgvec > 0 && pdev->vendor == 0x17cb) { + + dev_info(&pdev->dev, "%s quectel fixup pos=%d, msg_ctr=%04x, msgvec=%d\n", __func__, desc->msi_attrib.pos, msg_ctr, msgvec); + + msgvec = 0; + + } + +#endif + + + irq = assign_irq((1 << msgvec), desc, &pos); + if (irq < 0) + return irq; + */ + } + //imx_4.1.15_2.0.0_ga & DELL_OPTIPLEX_7010 only alloc one msi interrupt for one pcie device + if (ret != 1) { + MHI_ERR("Failed to enable MSI, ret=%d, msi_required=%d\n", ret, mhi_cntrl->msi_required); + goto error_req_msi; + } + } + + mhi_cntrl->msi_allocated = ret; + MHI_LOG("msi_required = %d, msi_allocated = %d, msi_irq = %u\n", mhi_cntrl->msi_required, mhi_cntrl->msi_allocated, pci_dev->irq); + + for (i = 0; i < mhi_cntrl->msi_allocated; i++) { + mhi_cntrl->irq[i] = pci_irq_vector(pci_dev, i); + if (mhi_cntrl->irq[i] < 0) { + ret = mhi_cntrl->irq[i]; + goto error_get_irq_vec; + } + } + +#if 0 + /* configure runtime pm */ + pm_runtime_set_autosuspend_delay(&pci_dev->dev, MHI_RPM_SUSPEND_TMR_MS); + pm_runtime_dont_use_autosuspend(&pci_dev->dev); + pm_suspend_ignore_children(&pci_dev->dev, true); + + /* + * pci framework will increment usage count (twice) before + * calling local device driver probe function. + * 1st pci.c pci_pm_init() calls pm_runtime_forbid + * 2nd pci-driver.c local_pci_probe calls pm_runtime_get_sync + * Framework expect pci device driver to call + * pm_runtime_put_noidle to decrement usage count after + * successful probe and and call pm_runtime_allow to enable + * runtime suspend. + */ + pm_runtime_mark_last_busy(&pci_dev->dev); + pm_runtime_put_noidle(&pci_dev->dev); +#endif + + return 0; + +error_get_irq_vec: + pci_free_irq_vectors(pci_dev); + +error_req_msi: + iounmap(mhi_cntrl->regs); + +error_ioremap: + pci_clear_master(pci_dev); + +error_request_region: + pci_disable_device(pci_dev); + +error_enable_device: + pci_release_region(pci_dev, mhi_dev->resn); + + return ret; +} + +#ifdef CONFIG_PM +static int mhi_runtime_idle(struct device *dev) +{ + struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev); + + MHI_LOG("Entered returning -EBUSY\n"); + + /* + * RPM framework during runtime resume always calls + * rpm_idle to see if device ready to suspend. + * If dev.power usage_count count is 0, rpm fw will call + * rpm_idle cb to see if device is ready to suspend. + * if cb return 0, or cb not defined the framework will + * assume device driver is ready to suspend; + * therefore, fw will schedule runtime suspend. + * In MHI power management, MHI host shall go to + * runtime suspend only after entering MHI State M2, even if + * usage count is 0. Return -EBUSY to disable automatic suspend. + */ + return -EBUSY; +} + +static int mhi_runtime_suspend(struct device *dev) +{ + int ret = 0; + struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev); + + MHI_LOG("Enter\n"); + + mutex_lock(&mhi_cntrl->pm_mutex); + + ret = mhi_pm_suspend(mhi_cntrl); + if (ret) { + MHI_LOG("Abort due to ret:%d\n", ret); + goto exit_runtime_suspend; + } + + ret = mhi_arch_link_off(mhi_cntrl, true); + if (ret) + MHI_ERR("Failed to Turn off link ret:%d\n", ret); + +exit_runtime_suspend: + mutex_unlock(&mhi_cntrl->pm_mutex); + MHI_LOG("Exited with ret:%d\n", ret); + + return ret; +} + +static int mhi_runtime_resume(struct device *dev) +{ + int ret = 0; + struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev); + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + + MHI_LOG("Enter\n"); + + mutex_lock(&mhi_cntrl->pm_mutex); + + if (!mhi_dev->powered_on) { + MHI_LOG("Not fully powered, return success\n"); + mutex_unlock(&mhi_cntrl->pm_mutex); + return 0; + } + + /* turn on link */ + ret = mhi_arch_link_on(mhi_cntrl); + if (ret) + goto rpm_resume_exit; + + /* enter M0 state */ + ret = mhi_pm_resume(mhi_cntrl); + +rpm_resume_exit: + mutex_unlock(&mhi_cntrl->pm_mutex); + MHI_LOG("Exited with :%d\n", ret); + + return ret; +} + +static int mhi_system_resume(struct device *dev) +{ + int ret = 0; + struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev); + + ret = mhi_runtime_resume(dev); + if (ret) { + MHI_ERR("Failed to resume link\n"); + } else { + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + } + + return ret; +} + +int mhi_system_suspend(struct device *dev) +{ + struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev); + + MHI_LOG("Entered\n"); + + /* if rpm status still active then force suspend */ + if (!pm_runtime_status_suspended(dev)) + return mhi_runtime_suspend(dev); + + pm_runtime_set_suspended(dev); + pm_runtime_disable(dev); + + MHI_LOG("Exit\n"); + return 0; +} +#endif + +/* checks if link is down */ +static int mhi_link_status(struct mhi_controller *mhi_cntrl, void *priv) +{ + struct mhi_dev *mhi_dev = priv; + u16 dev_id; + int ret; + + /* try reading device id, if dev id don't match, link is down */ + ret = pci_read_config_word(mhi_dev->pci_dev, PCI_DEVICE_ID, &dev_id); + + return (ret || dev_id != mhi_cntrl->dev_id) ? -EIO : 0; +} + +static int mhi_runtime_get(struct mhi_controller *mhi_cntrl, void *priv) +{ + struct mhi_dev *mhi_dev = priv; + struct device *dev = &mhi_dev->pci_dev->dev; + + return pm_runtime_get(dev); +} + +static void mhi_runtime_put(struct mhi_controller *mhi_cntrl, void *priv) +{ + struct mhi_dev *mhi_dev = priv; + struct device *dev = &mhi_dev->pci_dev->dev; + + pm_runtime_put_noidle(dev); +} + +static void mhi_status_cb(struct mhi_controller *mhi_cntrl, + void *priv, + enum MHI_CB reason) +{ + struct mhi_dev *mhi_dev = priv; + struct device *dev = &mhi_dev->pci_dev->dev; + + if (reason == MHI_CB_IDLE) { + MHI_LOG("Schedule runtime suspend 1\n"); + pm_runtime_mark_last_busy(dev); + pm_request_autosuspend(dev); + } +} + +int mhi_debugfs_trigger_m0(void *data, u64 val) +{ + struct mhi_controller *mhi_cntrl = data; + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + + MHI_LOG("Trigger M3 Exit\n"); + pm_runtime_get(&mhi_dev->pci_dev->dev); + pm_runtime_put(&mhi_dev->pci_dev->dev); + + return 0; +} + +int mhi_debugfs_trigger_m3(void *data, u64 val) +{ + struct mhi_controller *mhi_cntrl = data; + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + + MHI_LOG("Trigger M3 Entry\n"); + pm_runtime_mark_last_busy(&mhi_dev->pci_dev->dev); + pm_request_autosuspend(&mhi_dev->pci_dev->dev); + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(debugfs_trigger_m0_fops, NULL, + mhi_debugfs_trigger_m0, "%llu\n"); + +DEFINE_SIMPLE_ATTRIBUTE(debugfs_trigger_m3_fops, NULL, + mhi_debugfs_trigger_m3, "%llu\n"); + +static int mhi_init_debugfs_trigger_go(void *data, u64 val) +{ + struct mhi_controller *mhi_cntrl = data; + + MHI_LOG("Trigger power up sequence\n"); + + mhi_async_power_up(mhi_cntrl); + + return 0; +} +DEFINE_SIMPLE_ATTRIBUTE(mhi_init_debugfs_trigger_go_fops, NULL, + mhi_init_debugfs_trigger_go, "%llu\n"); + + +int mhi_init_debugfs_debug_show(struct seq_file *m, void *d) +{ + seq_puts(m, "Enable debug mode to debug external soc\n"); + seq_puts(m, + "Usage: echo 'devid,timeout,domain,smmu_cfg' > debug_mode\n"); + seq_puts(m, "No spaces between parameters\n"); + seq_puts(m, "\t1. devid : 0 or pci device id to register\n"); + seq_puts(m, "\t2. timeout: mhi cmd/state transition timeout\n"); + seq_puts(m, "\t3. domain: Rootcomplex\n"); + seq_puts(m, "\t4. smmu_cfg: smmu configuration mask:\n"); + seq_puts(m, "\t\t- BIT0: ATTACH\n"); + seq_puts(m, "\t\t- BIT1: S1 BYPASS\n"); + seq_puts(m, "\t\t-BIT2: FAST_MAP\n"); + seq_puts(m, "\t\t-BIT3: ATOMIC\n"); + seq_puts(m, "\t\t-BIT4: FORCE_COHERENT\n"); + seq_puts(m, "\t\t-BIT5: GEOMETRY\n"); + seq_puts(m, "\tAll timeout are in ms, enter 0 to keep default\n"); + seq_puts(m, "Examples inputs: '0x307,10000'\n"); + seq_puts(m, "\techo '0,10000,1'\n"); + seq_puts(m, "\techo '0x307,10000,0,0x3d'\n"); + seq_puts(m, "firmware image name will be changed to debug.mbn\n"); + + return 0; +} + +static int mhi_init_debugfs_debug_open(struct inode *node, struct file *file) +{ + return single_open(file, mhi_init_debugfs_debug_show, NULL); +} + +static ssize_t mhi_init_debugfs_debug_write(struct file *fp, + const char __user *ubuf, + size_t count, + loff_t *pos) +{ + char *buf = kmalloc(count + 1, GFP_KERNEL); + /* #,devid,timeout,domain,smmu-cfg */ + int args[5] = {0}; + static char const *dbg_fw = "debug.mbn"; + int ret; + struct mhi_controller *mhi_cntrl = fp->f_inode->i_private; + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + struct pci_device_id *id; + + if (!buf) + return -ENOMEM; + + ret = copy_from_user(buf, ubuf, count); + if (ret) + goto error_read; + buf[count] = 0; + get_options(buf, ARRAY_SIZE(args), args); + kfree(buf); + + /* override default parameters */ + mhi_cntrl->fw_image = dbg_fw; + mhi_cntrl->edl_image = dbg_fw; + + if (args[0] >= 2 && args[2]) + mhi_cntrl->timeout_ms = args[2]; + + if (args[0] >= 3 && args[3]) + mhi_cntrl->domain = args[3]; + + if (args[0] >= 4 && args[4]) + mhi_dev->smmu_cfg = args[4]; + + /* If it's a new device id register it */ + if (args[0] && args[1]) { + /* find the debug_id and overwrite it */ + for (id = mhi_pcie_device_id; id->vendor; id++) + if (id->device == MHI_PCIE_DEBUG_ID) { + id->device = args[1]; + pci_unregister_driver(&mhi_pcie_driver); + ret = pci_register_driver(&mhi_pcie_driver); + } + } + + mhi_dev->debug_mode = true; + debugfs_create_file("go", 0444, mhi_cntrl->parent, mhi_cntrl, + &mhi_init_debugfs_trigger_go_fops); + pr_info( + "%s: ret:%d pcidev:0x%x smm_cfg:%u timeout:%u\n", + __func__, ret, args[1], mhi_dev->smmu_cfg, + mhi_cntrl->timeout_ms); + return count; + +error_read: + kfree(buf); + return ret; +} + +static const struct file_operations debugfs_debug_ops = { + .open = mhi_init_debugfs_debug_open, + .release = single_release, + .read = seq_read, + .write = mhi_init_debugfs_debug_write, +}; + +static struct mhi_controller * mhi_platform_probe(struct pci_dev *pci_dev) +{ + struct mhi_controller *mhi_cntrl; + struct mhi_dev *mhi_dev; + u64 addr_win[2]; + int ret; + + mhi_cntrl = mhi_alloc_controller(sizeof(*mhi_dev)); + if (!mhi_cntrl) { + pr_err("mhi_alloc_controller fail\n"); + return NULL; + } + + mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + + mhi_cntrl->dev_id = pci_dev->device; + mhi_cntrl->domain = pci_domain_nr(pci_dev->bus); + mhi_cntrl->bus = pci_dev->bus->number; + mhi_cntrl->slot = PCI_SLOT(pci_dev->devfn); + mhi_dev->smmu_cfg = 0; + #if 0 //def CONFIG_HAVE_MEMBLOCK + addr_win[0] = memblock_start_of_DRAM(); + addr_win[1] = memblock_end_of_DRAM(); + #else +#define MHI_MEM_BASE_DEFAULT 0x000000000 +#define MHI_MEM_SIZE_DEFAULT 0x2000000000 + addr_win[0] = MHI_MEM_BASE_DEFAULT; + addr_win[1] = MHI_MEM_SIZE_DEFAULT; + if (sizeof(dma_addr_t) == 4) { + addr_win[1] = 0xFFFFFFFF; + } + #endif + + mhi_cntrl->iova_start = addr_win[0]; + mhi_cntrl->iova_stop = addr_win[1]; + + mhi_dev->pci_dev = pci_dev; + mhi_cntrl->pci_dev = pci_dev; + + /* setup power management apis */ + mhi_cntrl->status_cb = mhi_status_cb; + mhi_cntrl->runtime_get = mhi_runtime_get; + mhi_cntrl->runtime_put = mhi_runtime_put; + mhi_cntrl->link_status = mhi_link_status; + + ret = mhi_arch_platform_init(mhi_dev); + if (ret) + goto error_probe; + + ret = mhi_register_mhi_controller(mhi_cntrl); + if (ret) + goto error_register; + + if (mhi_cntrl->parent) + debugfs_create_file("debug_mode", 0444, mhi_cntrl->parent, + mhi_cntrl, &debugfs_debug_ops); + + return mhi_cntrl; + +error_register: + mhi_arch_platform_deinit(mhi_dev); + +error_probe: + mhi_free_controller(mhi_cntrl); + + return NULL; +} + +int mhi_pci_probe(struct pci_dev *pci_dev, + const struct pci_device_id *device_id) +{ + struct mhi_controller *mhi_cntrl = NULL; + u32 domain = pci_domain_nr(pci_dev->bus); + u32 bus = pci_dev->bus->number; + u32 slot = PCI_SLOT(pci_dev->devfn); + struct mhi_dev *mhi_dev; + int ret; + + pr_info("%s pci_dev->name = %s, domain=%d, bus=%d, slot=%d, vendor=%04X, device=%04X\n", + __func__, dev_name(&pci_dev->dev), domain, bus, slot, pci_dev->vendor, pci_dev->device); + + mhi_cntrl = mhi_platform_probe(pci_dev); + if (!mhi_cntrl) { + pr_err("mhi_platform_probe fail\n"); + return -EPROBE_DEFER; + } + + mhi_cntrl->dev_id = pci_dev->device; + mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + mhi_dev->pci_dev = pci_dev; + mhi_dev->powered_on = true; + + ret = mhi_arch_pcie_init(mhi_cntrl); + if (ret) { + MHI_ERR("Error mhi_arch_pcie_init, ret:%d\n", ret); + return ret; + } + + ret = mhi_arch_iommu_init(mhi_cntrl); + if (ret) { + MHI_ERR("Error mhi_arch_iommu_init, ret:%d\n", ret); + goto error_iommu_init; + } + + ret = mhi_init_pci_dev(mhi_cntrl); + if (ret) { + MHI_ERR("Error mhi_init_pci_dev, ret:%d\n", ret); + goto error_init_pci; + } + + /* start power up sequence if not in debug mode */ + if (!mhi_dev->debug_mode) { + ret = mhi_async_power_up(mhi_cntrl); + if (ret) { + MHI_ERR("Error mhi_async_power_up, ret:%d\n", ret); + goto error_power_up; + } + } + +#if 0 + pm_runtime_mark_last_busy(&pci_dev->dev); + pm_runtime_allow(&pci_dev->dev); + pm_runtime_disable(&pci_dev->dev); +#endif + + if (mhi_cntrl->dentry) { + debugfs_create_file("m0", 0444, mhi_cntrl->dentry, mhi_cntrl, + &debugfs_trigger_m0_fops); + debugfs_create_file("m3", 0444, mhi_cntrl->dentry, mhi_cntrl, + &debugfs_trigger_m3_fops); + } + + dev_set_drvdata(&pci_dev->dev, mhi_cntrl); + MHI_LOG("Return successful\n"); + + return 0; + +error_power_up: + mhi_deinit_pci_dev(mhi_cntrl); + +error_init_pci: + mhi_arch_iommu_deinit(mhi_cntrl); + +error_iommu_init: + mhi_arch_pcie_deinit(mhi_cntrl); + + return ret; +} + +static void mhi_pci_remove(struct pci_dev *pci_dev) +{ + struct mhi_controller *mhi_cntrl = (struct mhi_controller *)dev_get_drvdata(&pci_dev->dev); + + if (mhi_cntrl && mhi_cntrl->pci_dev == pci_dev) { + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + MHI_LOG("%s\n", dev_name(&pci_dev->dev)); + if (!mhi_dev->debug_mode) { + mhi_power_down(mhi_cntrl, 1); + } + mhi_deinit_pci_dev(mhi_cntrl); + mhi_arch_iommu_deinit(mhi_cntrl); + mhi_arch_pcie_deinit(mhi_cntrl); + mhi_unregister_mhi_controller(mhi_cntrl); + } +} + +static const struct dev_pm_ops pm_ops = { + SET_RUNTIME_PM_OPS(mhi_runtime_suspend, + mhi_runtime_resume, + mhi_runtime_idle) + SET_SYSTEM_SLEEP_PM_OPS(mhi_system_suspend, mhi_system_resume) +}; + +static struct pci_driver mhi_pcie_driver = { + .name = "mhi", + .id_table = mhi_pcie_device_id, + .probe = mhi_pci_probe, + .remove = mhi_pci_remove, + .driver = { + .pm = &pm_ops + } +}; + +int __init mhi_controller_qcom_init(void) +{ + return pci_register_driver(&mhi_pcie_driver); +}; + +void mhi_controller_qcom_exit(void) +{ + pr_info("%s enter\n", __func__); + pci_unregister_driver(&mhi_pcie_driver); + pr_info("%s exit\n", __func__); +} diff --git a/package/wwan/driver/quectel_MHI/src/controllers/mhi_qcom.h b/package/wwan/driver/quectel_MHI/src/controllers/mhi_qcom.h new file mode 100644 index 000000000..bced45b38 --- /dev/null +++ b/package/wwan/driver/quectel_MHI/src/controllers/mhi_qcom.h @@ -0,0 +1,92 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef _MHI_QCOM_ +#define _MHI_QCOM_ + +/* iova cfg bitmask */ +#define MHI_SMMU_ATTACH BIT(0) +#define MHI_SMMU_S1_BYPASS BIT(1) +#define MHI_SMMU_FAST BIT(2) +#define MHI_SMMU_ATOMIC BIT(3) +#define MHI_SMMU_FORCE_COHERENT BIT(4) + +#define MHI_PCIE_VENDOR_ID (0x17cb) +#define MHI_PCIE_DEBUG_ID (0xffff) +#define MHI_RPM_SUSPEND_TMR_MS (3000) +#define MHI_PCI_BAR_NUM (0) + +struct mhi_dev { + struct pci_dev *pci_dev; + u32 smmu_cfg; + int resn; + void *arch_info; + bool powered_on; + bool debug_mode; +}; + +void mhi_deinit_pci_dev(struct mhi_controller *mhi_cntrl); +int mhi_pci_probe(struct pci_dev *pci_dev, + const struct pci_device_id *device_id); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION( 3,10,65 )) +static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask) +{ + int rc = dma_set_mask(dev, mask); + if (rc == 0) + dma_set_coherent_mask(dev, mask); + return rc; +} +#endif + +static inline int mhi_arch_iommu_init(struct mhi_controller *mhi_cntrl) +{ + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + + mhi_cntrl->dev = &mhi_dev->pci_dev->dev; + + return dma_set_mask_and_coherent(mhi_cntrl->dev, DMA_BIT_MASK(64)); +} + +static inline void mhi_arch_iommu_deinit(struct mhi_controller *mhi_cntrl) +{ +} + +static inline int mhi_arch_pcie_init(struct mhi_controller *mhi_cntrl) +{ + return 0; +} + +static inline void mhi_arch_pcie_deinit(struct mhi_controller *mhi_cntrl) +{ +} + +static inline int mhi_arch_platform_init(struct mhi_dev *mhi_dev) +{ + return 0; +} + +static inline void mhi_arch_platform_deinit(struct mhi_dev *mhi_dev) +{ +} + +static inline int mhi_arch_link_off(struct mhi_controller *mhi_cntrl, + bool graceful) +{ + return 0; +} + +static inline int mhi_arch_link_on(struct mhi_controller *mhi_cntrl) +{ + return 0; +} + +#endif /* _MHI_QCOM_ */ diff --git a/package/wwan/driver/quectel_MHI/src/controllers/mhi_qti.c b/package/wwan/driver/quectel_MHI/src/controllers/mhi_qti.c new file mode 100644 index 000000000..ce508a41e --- /dev/null +++ b/package/wwan/driver/quectel_MHI/src/controllers/mhi_qti.c @@ -0,0 +1,1048 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../core/mhi.h" +#include "../core/mhi_internal.h" +#include "mhi_qti.h" + +#if 1 +#if (LINUX_VERSION_CODE < KERNEL_VERSION( 3,10,65 )) +static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask) +{ + int rc = dma_set_mask(dev, mask); + if (rc == 0) + dma_set_coherent_mask(dev, mask); + return rc; +} +#endif + +#ifdef PCI_IRQ_NOMSIX +#define PCI_IRQ_MSI PCI_IRQ_NOMSIX +#endif + +#ifndef PCI_IRQ_MSI +#define PCI_IRQ_MSI (1 << 1) /* Allow MSI interrupts */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION( 3,10,53 )) +int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec) +{ + int nvec = maxvec; + int rc; + + if (maxvec < minvec) + return -ERANGE; + + do { + rc = pci_enable_msi_block(dev, nvec); + if (rc < 0) { + return rc; + } else if (rc > 0) { + if (rc < minvec) + return -ENOSPC; + nvec = rc; + } + } while (rc); + + return nvec; +} +#endif + +static int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs, + unsigned int max_vecs, unsigned int flags) +{ + return pci_enable_msi_range(dev, min_vecs, max_vecs); +} + +static void pci_free_irq_vectors(struct pci_dev *dev) +{ + pci_disable_msi(dev); +} + +static int pci_irq_vector(struct pci_dev *dev, unsigned int nr) +{ +#if 0//defined(CONFIG_PINCTRL_IPQ5018) + struct pcie_port *pp = dev->bus->sysdata; + pp->msi[nr]; //msi maybe not continuous +#endif + return dev->irq + nr; +} +#endif +#endif + +struct firmware_info { + unsigned int dev_id; + const char *fw_image; + const char *edl_image; +}; + +static const struct firmware_info firmware_table[] = { + {.dev_id = 0x306, .fw_image = "sdx55m/sbl1.mbn"}, + {.dev_id = 0x305, .fw_image = "sdx50m/sbl1.mbn"}, + {.dev_id = 0x304, .fw_image = "sbl.mbn", .edl_image = "edl.mbn"}, + /* default, set to debug.mbn */ + {.fw_image = "debug.mbn"}, +}; + +static int debug_mode; +module_param_named(debug_mode, debug_mode, int, 0644); + +int mhi_debugfs_trigger_m0(void *data, u64 val) +{ + struct mhi_controller *mhi_cntrl = data; + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + + MHI_LOG("Trigger M3 Exit\n"); + pm_runtime_get(&mhi_dev->pci_dev->dev); + pm_runtime_put(&mhi_dev->pci_dev->dev); + + return 0; +} +DEFINE_SIMPLE_ATTRIBUTE(debugfs_trigger_m0_fops, NULL, + mhi_debugfs_trigger_m0, "%llu\n"); + +int mhi_debugfs_trigger_m3(void *data, u64 val) +{ + struct mhi_controller *mhi_cntrl = data; + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + + MHI_LOG("Trigger M3 Entry\n"); + pm_runtime_mark_last_busy(&mhi_dev->pci_dev->dev); + pm_request_autosuspend(&mhi_dev->pci_dev->dev); + + return 0; +} +DEFINE_SIMPLE_ATTRIBUTE(debugfs_trigger_m3_fops, NULL, + mhi_debugfs_trigger_m3, "%llu\n"); + +void mhi_deinit_pci_dev(struct mhi_controller *mhi_cntrl) +{ + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + struct pci_dev *pci_dev = mhi_dev->pci_dev; + + pm_runtime_mark_last_busy(&pci_dev->dev); + pm_runtime_dont_use_autosuspend(&pci_dev->dev); + pm_runtime_disable(&pci_dev->dev); + pci_free_irq_vectors(pci_dev); + kfree(mhi_cntrl->irq); + mhi_cntrl->irq = NULL; + iounmap(mhi_cntrl->regs); + mhi_cntrl->regs = NULL; + pci_clear_master(pci_dev); + pci_release_region(pci_dev, mhi_dev->resn); + pci_disable_device(pci_dev); +} + +static int mhi_init_pci_dev(struct mhi_controller *mhi_cntrl) +{ + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + struct pci_dev *pci_dev = mhi_dev->pci_dev; + int ret; + resource_size_t len; + int i; + + mhi_dev->resn = MHI_PCI_BAR_NUM; + ret = pci_assign_resource(pci_dev, mhi_dev->resn); + if (ret) { + MHI_ERR("Error assign pci resources, ret:%d\n", ret); + return ret; + } + + ret = pci_enable_device(pci_dev); + if (ret) { + MHI_ERR("Error enabling device, ret:%d\n", ret); + goto error_enable_device; + } + + ret = pci_request_region(pci_dev, mhi_dev->resn, "mhi"); + if (ret) { + MHI_ERR("Error pci_request_region, ret:%d\n", ret); + goto error_request_region; + } + + pci_set_master(pci_dev); + +#if 1 //some SOC like rpi_4b need next codes + ret = -EIO; + if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(64))) { + ret = pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(64)); + } else if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32))) { + ret = pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(32)); + } + if (ret) { + MHI_ERR("Error dma mask\n"); + } +#endif + + mhi_cntrl->base_addr = pci_resource_start(pci_dev, mhi_dev->resn); + len = pci_resource_len(pci_dev, mhi_dev->resn); +#ifndef ioremap_nocache //4bdc0d676a643140bdf17dbf7eafedee3d496a3c +#define ioremap_nocache ioremap +#endif + mhi_cntrl->regs = ioremap_nocache(mhi_cntrl->base_addr, len); + if (!mhi_cntrl->regs) { + MHI_ERR("Error ioremap region\n"); + goto error_ioremap; + } + +#if 0 + ret = pci_alloc_irq_vectors(pci_dev, mhi_cntrl->msi_required, + mhi_cntrl->msi_required, PCI_IRQ_NOMSIX); + if (IS_ERR_VALUE((ulong)ret) || ret < mhi_cntrl->msi_required) { + MHI_ERR("Failed to enable MSI, ret:%d\n", ret); + goto error_req_msi; + } +#else + ret = pci_alloc_irq_vectors(pci_dev, 1, mhi_cntrl->msi_required, PCI_IRQ_MSI); + if (IS_ERR_VALUE((ulong)ret) || ret < mhi_cntrl->msi_required) { + if (ret == -ENOSPC) { + /* imx_3.14.52_1.1.0_ga + diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c + index f06e8f0..6a9614f 100644 + --- a/drivers/pci/host/pcie-designware.c + +++ b/drivers/pci/host/pcie-designware.c + @@ -376,6 +376,13 @@ static int dw_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev, + if (msgvec > 5) + msgvec = 0; + + +#if 1 //Add by Quectel 20190419 + + if (msgvec > 0 && pdev->vendor == 0x17cb) { + + dev_info(&pdev->dev, "%s quectel fixup pos=%d, msg_ctr=%04x, msgvec=%d\n", __func__, desc->msi_attrib.pos, msg_ctr, msgvec); + + msgvec = 0; + + } + +#endif + + + irq = assign_irq((1 << msgvec), desc, &pos); + if (irq < 0) + return irq; + */ + } + //imx_4.1.15_2.0.0_ga & DELL_OPTIPLEX_7010 only alloc one msi interrupt for one pcie device + if (ret != 1) { + MHI_ERR("Failed to enable MSI, ret=%d, msi_required=%d\n", ret, mhi_cntrl->msi_required); + goto error_req_msi; + } + } + MHI_LOG("msi_required = %d, msi_allocated = %d, msi_irq = %u\n", mhi_cntrl->msi_required, ret, pci_dev->irq); +#endif + + mhi_cntrl->msi_allocated = ret; + mhi_cntrl->irq = kmalloc_array(mhi_cntrl->msi_allocated, + sizeof(*mhi_cntrl->irq), GFP_KERNEL); + if (!mhi_cntrl->irq) { + ret = -ENOMEM; + goto error_alloc_msi_vec; + } + + for (i = 0; i < mhi_cntrl->msi_allocated; i++) { + mhi_cntrl->irq[i] = pci_irq_vector(pci_dev, i); + if (mhi_cntrl->irq[i] < 0) { + ret = mhi_cntrl->irq[i]; + goto error_get_irq_vec; + } + } + + dev_set_drvdata(&pci_dev->dev, mhi_cntrl); + + /* configure runtime pm */ + pm_runtime_set_autosuspend_delay(&pci_dev->dev, MHI_RPM_SUSPEND_TMR_MS); + pm_runtime_use_autosuspend(&pci_dev->dev); + pm_suspend_ignore_children(&pci_dev->dev, true); + + /* + * pci framework will increment usage count (twice) before + * calling local device driver probe function. + * 1st pci.c pci_pm_init() calls pm_runtime_forbid + * 2nd pci-driver.c local_pci_probe calls pm_runtime_get_sync + * Framework expect pci device driver to call + * pm_runtime_put_noidle to decrement usage count after + * successful probe and and call pm_runtime_allow to enable + * runtime suspend. + */ + pm_runtime_mark_last_busy(&pci_dev->dev); + pm_runtime_put_noidle(&pci_dev->dev); + + return 0; + +error_get_irq_vec: + kfree(mhi_cntrl->irq); + mhi_cntrl->irq = NULL; + +error_alloc_msi_vec: + pci_free_irq_vectors(pci_dev); + +error_req_msi: + iounmap(mhi_cntrl->regs); + +error_ioremap: + pci_clear_master(pci_dev); + +error_request_region: + pci_disable_device(pci_dev); + +error_enable_device: + pci_release_region(pci_dev, mhi_dev->resn); + + return ret; +} + +#ifdef CONFIG_PM +static int mhi_runtime_suspend(struct device *dev) +{ + int ret = 0; + struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev); + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + + MHI_LOG("Enter\n"); + + mutex_lock(&mhi_cntrl->pm_mutex); + + if (!mhi_dev->powered_on) { + MHI_LOG("Not fully powered, return success\n"); + mutex_unlock(&mhi_cntrl->pm_mutex); + return 0; + } + + if (mhi_cntrl->ee != MHI_EE_AMSS) { + MHI_LOG("Not AMSS, return busy\n"); + mutex_unlock(&mhi_cntrl->pm_mutex); + return -EBUSY; + } + + ret = mhi_pm_suspend(mhi_cntrl); + if (ret) { + MHI_LOG("Abort due to ret:%d\n", ret); + goto exit_runtime_suspend; + } + + ret = mhi_arch_link_off(mhi_cntrl, true); + if (ret) + MHI_ERR("Failed to Turn off link ret:%d\n", ret); + +exit_runtime_suspend: + mutex_unlock(&mhi_cntrl->pm_mutex); + MHI_LOG("Exited with ret:%d\n", ret); + + return ret; +} + +static int mhi_runtime_idle(struct device *dev) +{ + struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev); + + if ((mhi_cntrl->dev_state == MHI_STATE_M0 || mhi_cntrl->dev_state == MHI_STATE_M3) + && mhi_cntrl->ee == MHI_EE_AMSS) { + return 0; + } + MHI_LOG("Entered returning -EBUSY, mhi_state:%s exec_env:%s\n", + TO_MHI_STATE_STR(mhi_get_mhi_state(mhi_cntrl)), TO_MHI_EXEC_STR(mhi_get_exec_env(mhi_cntrl))); + + /* + * RPM framework during runtime resume always calls + * rpm_idle to see if device ready to suspend. + * If dev.power usage_count count is 0, rpm fw will call + * rpm_idle cb to see if device is ready to suspend. + * if cb return 0, or cb not defined the framework will + * assume device driver is ready to suspend; + * therefore, fw will schedule runtime suspend. + * In MHI power management, MHI host shall go to + * runtime suspend only after entering MHI State M2, even if + * usage count is 0. Return -EBUSY to disable automatic suspend. + */ + return -EBUSY; +} + +static int mhi_runtime_resume(struct device *dev) +{ + int ret = 0; + struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev); + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + + MHI_LOG("Enter\n"); + + mutex_lock(&mhi_cntrl->pm_mutex); + + if (!mhi_dev->powered_on) { + MHI_LOG("Not fully powered, return success\n"); + mutex_unlock(&mhi_cntrl->pm_mutex); + return 0; + } + + /* turn on link */ + ret = mhi_arch_link_on(mhi_cntrl); + if (ret) + goto rpm_resume_exit; + + /* enter M0 state */ + ret = mhi_pm_resume(mhi_cntrl); + +rpm_resume_exit: + mutex_unlock(&mhi_cntrl->pm_mutex); + MHI_LOG("Exited with :%d\n", ret); + + return ret; +} + +static int mhi_system_resume(struct device *dev) +{ + int ret = 0; + struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev); + + ret = mhi_runtime_resume(dev); + if (ret) { + MHI_ERR("Failed to resume link\n"); + } else { + //pm_runtime_set_active(dev); + //pm_runtime_enable(dev); + } + + return ret; +} + +int mhi_system_suspend(struct device *dev) +{ + struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev); + int ret; + + MHI_LOG("Entered\n"); + + if (atomic_read(&mhi_cntrl->pending_pkts)) { + MHI_LOG("Abort due to pending_pkts:%d\n", atomic_read(&mhi_cntrl->pending_pkts)); + return -EBUSY; + } + + /* if rpm status still active then force suspend */ + if (!pm_runtime_status_suspended(dev)) { + ret = mhi_runtime_suspend(dev); + if (ret) { + MHI_LOG("suspend failed ret:%d\n", ret); + return ret; + } + } + + //pm_runtime_set_suspended(dev); + //pm_runtime_disable(dev); + + MHI_LOG("Exit\n"); + return 0; +} +#endif + +/* checks if link is down */ +static int mhi_link_status(struct mhi_controller *mhi_cntrl, void *priv) +{ + struct mhi_dev *mhi_dev = priv; + u16 dev_id; + int ret; + + /* try reading device id, if dev id don't match, link is down */ + ret = pci_read_config_word(mhi_dev->pci_dev, PCI_DEVICE_ID, &dev_id); + + return (ret || dev_id != mhi_cntrl->dev_id) ? -EIO : 0; +} + +/* disable PCIe L1 */ +static int mhi_lpm_disable(struct mhi_controller *mhi_cntrl, void *priv) +{ + struct mhi_dev *mhi_dev = priv; + struct pci_dev *pci_dev = mhi_dev->pci_dev; + int lnkctl = pci_dev->pcie_cap + PCI_EXP_LNKCTL; + u8 val; + int ret; + + ret = pci_read_config_byte(pci_dev, lnkctl, &val); + if (ret) { + MHI_ERR("Error reading LNKCTL, ret:%d\n", ret); + return ret; + } + + /* L1 is not supported or already disabled */ + if (!(val & PCI_EXP_LNKCTL_ASPM_L1)) + return 0; + + val &= ~PCI_EXP_LNKCTL_ASPM_L1; + ret = pci_write_config_byte(pci_dev, lnkctl, val); + if (ret) { + MHI_ERR("Error writing LNKCTL to disable LPM, ret:%d\n", ret); + return ret; + } + + mhi_dev->lpm_disabled = true; + + return ret; +} + +/* enable PCIe L1 */ +static int mhi_lpm_enable(struct mhi_controller *mhi_cntrl, void *priv) +{ + struct mhi_dev *mhi_dev = priv; + struct pci_dev *pci_dev = mhi_dev->pci_dev; + int lnkctl = pci_dev->pcie_cap + PCI_EXP_LNKCTL; + u8 val; + int ret; + + /* L1 is not supported or already disabled */ + if (!mhi_dev->lpm_disabled) + return 0; + + ret = pci_read_config_byte(pci_dev, lnkctl, &val); + if (ret) { + MHI_ERR("Error reading LNKCTL, ret:%d\n", ret); + return ret; + } + + val |= PCI_EXP_LNKCTL_ASPM_L1; + ret = pci_write_config_byte(pci_dev, lnkctl, val); + if (ret) { + MHI_ERR("Error writing LNKCTL to enable LPM, ret:%d\n", ret); + return ret; + } + + mhi_dev->lpm_disabled = false; + + return ret; +} + +static int mhi_power_up(struct mhi_controller *mhi_cntrl) +{ + enum mhi_dev_state dev_state = mhi_get_mhi_state(mhi_cntrl); + const u32 delayus = 10; + int itr = DIV_ROUND_UP(mhi_cntrl->timeout_ms * 1000, delayus); + int ret; + + MHI_LOG("dev_state:%s\n", TO_MHI_STATE_STR(mhi_get_mhi_state(mhi_cntrl))); + + /* + * It's possible device did not go thru a cold reset before + * power up and still in error state. If device in error state, + * we need to trigger a soft reset before continue with power + * up + */ + if (dev_state == MHI_STATE_SYS_ERR) { + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET); + while (itr--) { + dev_state = mhi_get_mhi_state(mhi_cntrl); + if (dev_state != MHI_STATE_SYS_ERR) + break; + usleep_range(delayus, delayus << 1); + } + MHI_LOG("dev_state:%s\n", TO_MHI_STATE_STR(mhi_get_mhi_state(mhi_cntrl))); + + /* device still in error state, abort power up */ + if (dev_state == MHI_STATE_SYS_ERR) + return -EIO; + } + + ret = mhi_async_power_up(mhi_cntrl); + + /* power up create the dentry */ + if (mhi_cntrl->dentry) { + debugfs_create_file("m0", 0444, mhi_cntrl->dentry, mhi_cntrl, + &debugfs_trigger_m0_fops); + debugfs_create_file("m3", 0444, mhi_cntrl->dentry, mhi_cntrl, + &debugfs_trigger_m3_fops); + } + + return ret; +} + +static int mhi_runtime_get(struct mhi_controller *mhi_cntrl, void *priv) +{ + struct mhi_dev *mhi_dev = priv; + struct device *dev = &mhi_dev->pci_dev->dev; + + return pm_runtime_get(dev); +} + +static void mhi_runtime_put(struct mhi_controller *mhi_cntrl, void *priv) +{ + struct mhi_dev *mhi_dev = priv; + struct device *dev = &mhi_dev->pci_dev->dev; + + pm_runtime_mark_last_busy(dev); + pm_runtime_put(dev); +} + +static void mhi_runtime_mark_last_busy(struct mhi_controller *mhi_cntrl, void *priv) +{ + struct mhi_dev *mhi_dev = priv; + struct device *dev = &mhi_dev->pci_dev->dev; + + pm_runtime_mark_last_busy(dev); +} + +static void mhi_status_cb(struct mhi_controller *mhi_cntrl, + void *priv, + enum MHI_CB reason) +{ + struct mhi_dev *mhi_dev = priv; + struct device *dev = &mhi_dev->pci_dev->dev; + + switch (reason) { + case MHI_CB_FATAL_ERROR: + case MHI_CB_SYS_ERROR: + pm_runtime_forbid(dev); + break; + case MHI_CB_EE_MISSION_MODE: + //pm_runtime_allow(dev); + break; + default: + break; + } +} + +/* capture host SoC XO time in ticks */ +static u64 mhi_time_get(struct mhi_controller *mhi_cntrl, void *priv) +{ + return 0; +} + +static ssize_t timeout_ms_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + + /* buffer provided by sysfs has a minimum size of PAGE_SIZE */ + return snprintf(buf, PAGE_SIZE, "%u\n", mhi_cntrl->timeout_ms); +} + +static ssize_t timeout_ms_store(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + u32 timeout_ms; + + if (kstrtou32(buf, 0, &timeout_ms) < 0) + return -EINVAL; + + mhi_cntrl->timeout_ms = timeout_ms; + + return count; +} +static DEVICE_ATTR_RW(timeout_ms); + +static ssize_t power_up_store(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + int ret; + struct mhi_device *mhi_dev = to_mhi_device(dev); + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + + ret = mhi_power_up(mhi_cntrl); + if (ret) + return ret; + + return count; +} +static DEVICE_ATTR_WO(power_up); + +static struct attribute *mhi_attrs[] = { + &dev_attr_timeout_ms.attr, + &dev_attr_power_up.attr, + NULL +}; + +static const struct attribute_group mhi_group = { + .attrs = mhi_attrs, +}; + +static struct mhi_controller *mhi_register_controller(struct pci_dev *pci_dev) +{ + struct mhi_controller *mhi_cntrl; + struct mhi_dev *mhi_dev; + struct device_node *of_node = pci_dev->dev.of_node; + const struct firmware_info *firmware_info; + bool use_bb; + u64 addr_win[2]; + int ret, i; + + //if (!of_node) + // return ERR_PTR(-ENODEV); + + mhi_cntrl = mhi_alloc_controller(sizeof(*mhi_dev)); + if (!mhi_cntrl) + return ERR_PTR(-ENOMEM); + + mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + + mhi_cntrl->dev = &pci_dev->dev; + mhi_cntrl->domain = pci_domain_nr(pci_dev->bus); + mhi_cntrl->vendor = pci_dev->vendor; + mhi_cntrl->dev_id = pci_dev->device; + mhi_cntrl->bus = pci_dev->bus->number; + mhi_cntrl->slot = PCI_SLOT(pci_dev->devfn); + +#if 0 + use_bb = of_property_read_bool(of_node, "mhi,use-bb"); + + /* + * if s1 translation enabled or using bounce buffer pull iova addr + * from dt + */ + if (use_bb || (mhi_dev->smmu_cfg & MHI_SMMU_ATTACH && + !(mhi_dev->smmu_cfg & MHI_SMMU_S1_BYPASS))) { + ret = of_property_count_elems_of_size(of_node, "qti,addr-win", + sizeof(addr_win)); + if (ret != 1) + goto error_register; + ret = of_property_read_u64_array(of_node, "qti,addr-win", + addr_win, 2); + if (ret) + goto error_register; + } else { + addr_win[0] = memblock_start_of_DRAM(); + addr_win[1] = memblock_end_of_DRAM(); + } +#else + use_bb = false; + (void)use_bb; + addr_win[0] = 0x000000000; + addr_win[1] = 0x2000000000; //MHI_MEM_SIZE_DEFAULT + if (sizeof(dma_addr_t) == 4) { + addr_win[1] = 0xFFFFFFFF; + } +#endif + + mhi_dev->iova_start = addr_win[0]; + mhi_dev->iova_stop = addr_win[1]; + + /* + * If S1 is enabled, set MHI_CTRL start address to 0 so we can use low + * level mapping api to map buffers outside of smmu domain + */ + if (mhi_dev->smmu_cfg & MHI_SMMU_ATTACH && + !(mhi_dev->smmu_cfg & MHI_SMMU_S1_BYPASS)) + mhi_cntrl->iova_start = 0; + else + mhi_cntrl->iova_start = addr_win[0]; + + mhi_cntrl->iova_stop = mhi_dev->iova_stop; + mhi_cntrl->of_node = of_node; + + mhi_dev->pci_dev = pci_dev; + + /* setup power management apis */ + mhi_cntrl->status_cb = mhi_status_cb; + mhi_cntrl->runtime_get = mhi_runtime_get; + mhi_cntrl->runtime_put = mhi_runtime_put; + mhi_cntrl->runtime_mark_last_busy = mhi_runtime_mark_last_busy; + mhi_cntrl->link_status = mhi_link_status; + + mhi_cntrl->lpm_disable = mhi_lpm_disable; + mhi_cntrl->lpm_enable = mhi_lpm_enable; + mhi_cntrl->time_get = mhi_time_get; + + ret = of_register_mhi_controller(mhi_cntrl); + if (ret) + goto error_register; + + for (i = 0; i < ARRAY_SIZE(firmware_table); i++) { + firmware_info = firmware_table + i; + + /* debug mode always use default */ + if (!debug_mode && mhi_cntrl->dev_id == firmware_info->dev_id) + break; + } + +#if 0 + mhi_cntrl->fw_image = firmware_info->fw_image; + mhi_cntrl->edl_image = firmware_info->edl_image; +#endif + + if (sysfs_create_group(&mhi_cntrl->mhi_dev->dev.kobj, &mhi_group)) + MHI_ERR("Error while creating the sysfs group\n"); + + return mhi_cntrl; + +error_register: + mhi_free_controller(mhi_cntrl); + + return ERR_PTR(-EINVAL); +} + +static bool mhi_pci_is_alive(struct pci_dev *pdev) +{ + u16 vendor = 0; + + if (pci_read_config_word(pdev, PCI_VENDOR_ID, &vendor)) + return false; + + if (vendor == (u16) ~0 || vendor == 0) + return false; + + return true; +} + +static void mhi_pci_show_link(struct mhi_controller *mhi_cntrl, struct pci_dev *pci_dev) +{ + int pcie_cap_reg; + u16 stat; + u32 caps; + const char *speed; + + pcie_cap_reg = pci_find_capability(pci_dev, PCI_CAP_ID_EXP); + + if (!pcie_cap_reg) + return; + + pci_read_config_word(pci_dev, + pcie_cap_reg + PCI_EXP_LNKSTA, + &stat); + pci_read_config_dword(pci_dev, + pcie_cap_reg + PCI_EXP_LNKCAP, + &caps); + + switch (caps & PCI_EXP_LNKCAP_SLS) { + case PCI_EXP_LNKCAP_SLS_2_5GB: speed = "2.5"; break; + case PCI_EXP_LNKCAP_SLS_5_0GB: speed = "5"; break; + case 3: speed = "8"; break; + case 4: speed = "16"; break; + case 5: speed = "32"; break; + case 6: speed = "64"; break; + default: speed = "0"; break; + } + + MHI_LOG("LnkCap: Speed %sGT/s, Width x%d\n", speed, + (caps & PCI_EXP_LNKCAP_MLW) >> 4); + + switch (stat & PCI_EXP_LNKSTA_CLS) { + case PCI_EXP_LNKSTA_CLS_2_5GB: speed = "2.5"; break; + case PCI_EXP_LNKSTA_CLS_5_0GB: speed = "5"; break; + case 3: speed = "8"; break; + case 4: speed = "16"; break; + case 5: speed = "32"; break; + case 6: speed = "64"; break; + default: speed = "0"; break; + } + + MHI_LOG("LnkSta: Speed %sGT/s, Width x%d\n", speed, + (stat & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT); + +} + +int mhi_pci_probe(struct pci_dev *pci_dev, + const struct pci_device_id *device_id) +{ + struct mhi_controller *mhi_cntrl; + u32 domain = pci_domain_nr(pci_dev->bus); + u32 bus = pci_dev->bus->number; + u32 dev_id = pci_dev->device; + u32 slot = PCI_SLOT(pci_dev->devfn); + struct mhi_dev *mhi_dev; + int ret; + + pr_info("%s pci_dev->name = %s, domain=%d, bus=%d, slot=%d, vendor=%04X, device=%04X\n", + __func__, dev_name(&pci_dev->dev), domain, bus, slot, pci_dev->vendor, pci_dev->device); + + if (!mhi_pci_is_alive(pci_dev)) { + /* + root@OpenWrt:~# hexdump /sys/bus/pci/devices/0000:01:00.0/config + 0000000 ffff ffff ffff ffff ffff ffff ffff ffff + * + 0001000 + */ + pr_err("mhi_pci is not alive! pcie link is down\n"); + pr_err("double check by 'hexdump /sys/bus/pci/devices/%s/config'\n", dev_name(&pci_dev->dev)); + return -EIO; + } + + /* see if we already registered */ + mhi_cntrl = mhi_bdf_to_controller(domain, bus, slot, dev_id); + if (!mhi_cntrl) + mhi_cntrl = mhi_register_controller(pci_dev); + + if (IS_ERR(mhi_cntrl)) + return PTR_ERR(mhi_cntrl); + + mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + mhi_dev->powered_on = true; + + ret = mhi_arch_pcie_init(mhi_cntrl); + if (ret) + return ret; + + mhi_cntrl->dev = &pci_dev->dev; + ret = mhi_init_pci_dev(mhi_cntrl); + if (ret) + goto error_init_pci; + + /* start power up sequence */ + if (!debug_mode) { + ret = mhi_power_up(mhi_cntrl); + if (ret) + goto error_power_up; + } + + pm_runtime_mark_last_busy(&pci_dev->dev); + + mhi_pci_show_link(mhi_cntrl, pci_dev); + + MHI_LOG("Return successful\n"); + + return 0; + + mhi_unregister_mhi_controller(mhi_cntrl); +error_power_up: + mhi_deinit_pci_dev(mhi_cntrl); + +error_init_pci: + mhi_arch_pcie_deinit(mhi_cntrl); + + return ret; +} + +void mhi_pci_device_removed(struct pci_dev *pci_dev) +{ + struct mhi_controller *mhi_cntrl; + u32 domain = pci_domain_nr(pci_dev->bus); + u32 bus = pci_dev->bus->number; + u32 dev_id = pci_dev->device; + u32 slot = PCI_SLOT(pci_dev->devfn); + + mhi_cntrl = mhi_bdf_to_controller(domain, bus, slot, dev_id); + + if (mhi_cntrl) { + + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + + pm_stay_awake(&mhi_cntrl->mhi_dev->dev); + + /* if link is in drv suspend, wake it up */ + pm_runtime_get_sync(&pci_dev->dev); + + mutex_lock(&mhi_cntrl->pm_mutex); + if (!mhi_dev->powered_on) { + MHI_LOG("Not in active state\n"); + mutex_unlock(&mhi_cntrl->pm_mutex); + pm_runtime_put_noidle(&pci_dev->dev); + return; + } + mhi_dev->powered_on = false; + mutex_unlock(&mhi_cntrl->pm_mutex); + + pm_runtime_put_noidle(&pci_dev->dev); + + MHI_LOG("Triggering shutdown process\n"); + mhi_power_down(mhi_cntrl, false); + + /* turn the link off */ + mhi_deinit_pci_dev(mhi_cntrl); + mhi_arch_link_off(mhi_cntrl, false); + + mhi_arch_pcie_deinit(mhi_cntrl); + + pm_relax(&mhi_cntrl->mhi_dev->dev); + + mhi_unregister_mhi_controller(mhi_cntrl); + } +} + +static const struct dev_pm_ops pm_ops = { + SET_RUNTIME_PM_OPS(mhi_runtime_suspend, + mhi_runtime_resume, + mhi_runtime_idle) + SET_SYSTEM_SLEEP_PM_OPS(mhi_system_suspend, mhi_system_resume) +}; + +static struct pci_device_id mhi_pcie_device_id[] = { + {PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0303)}, + {PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0304)}, //SDX20 + {PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0305)}, //SDX24 + {PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0306)}, //SDX55 + {PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0308)}, //SDX62 + {PCI_DEVICE(0x1eac, 0x1001)}, //EM120 + {PCI_DEVICE(0x1eac, 0x1002)}, //EM160 + {PCI_DEVICE(0x1eac, 0x1004)}, //RM520 + {PCI_DEVICE(MHI_PCIE_VENDOR_ID, MHI_PCIE_DEBUG_ID)}, + {0}, +}; + +MODULE_DEVICE_TABLE(pci, mhi_pcie_device_id); + +static struct pci_driver mhi_pcie_driver = { + .name = "mhi_q", + .id_table = mhi_pcie_device_id, + .probe = mhi_pci_probe, + .remove = mhi_pci_device_removed, + .driver = { + .pm = &pm_ops + } +}; + +#if 0 +module_pci_driver(mhi_pcie_driver); +#else +int __init mhi_controller_qcom_init(void) +{ + return pci_register_driver(&mhi_pcie_driver); +}; + +void mhi_controller_qcom_exit(void) +{ + pr_info("%s enter\n", __func__); + pci_unregister_driver(&mhi_pcie_driver); + pr_info("%s exit\n", __func__); +} + +int mhi_arch_iommu_init(struct mhi_controller *mhi_cntrl) +{ + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + + mhi_cntrl->dev = &mhi_dev->pci_dev->dev; + + return dma_set_mask_and_coherent(mhi_cntrl->dev, DMA_BIT_MASK(64)); +} + +void mhi_arch_iommu_deinit(struct mhi_controller *mhi_cntrl) +{ +} + +int mhi_arch_pcie_init(struct mhi_controller *mhi_cntrl) +{ + return 0; +} + +void mhi_arch_pcie_deinit(struct mhi_controller *mhi_cntrl) +{ +} + +int mhi_arch_platform_init(struct mhi_dev *mhi_dev) +{ + return 0; +} + +void mhi_arch_platform_deinit(struct mhi_dev *mhi_dev) +{ +} + +int mhi_arch_link_off(struct mhi_controller *mhi_cntrl, + bool graceful) +{ + return 0; +} + +int mhi_arch_link_on(struct mhi_controller *mhi_cntrl) +{ + return 0; +} +#endif diff --git a/package/wwan/driver/quectel_MHI/src/controllers/mhi_qti.h b/package/wwan/driver/quectel_MHI/src/controllers/mhi_qti.h new file mode 100644 index 000000000..7ac021a3c --- /dev/null +++ b/package/wwan/driver/quectel_MHI/src/controllers/mhi_qti.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.*/ + +#ifndef _MHI_QTI_ +#define _MHI_QTI_ + +/* iova cfg bitmask */ +#define MHI_SMMU_ATTACH BIT(0) +#define MHI_SMMU_S1_BYPASS BIT(1) +#define MHI_SMMU_FAST BIT(2) +#define MHI_SMMU_ATOMIC BIT(3) +#define MHI_SMMU_FORCE_COHERENT BIT(4) + +#define MHI_PCIE_VENDOR_ID (0x17cb) +#define MHI_PCIE_DEBUG_ID (0xffff) + +/* runtime suspend timer */ +#define MHI_RPM_SUSPEND_TMR_MS (2000) +#define MHI_PCI_BAR_NUM (0) + +struct mhi_dev { + struct pci_dev *pci_dev; + u32 smmu_cfg; + int resn; + void *arch_info; + bool powered_on; + dma_addr_t iova_start; + dma_addr_t iova_stop; + bool lpm_disabled; +}; + +void mhi_deinit_pci_dev(struct mhi_controller *mhi_cntrl); +int mhi_pci_probe(struct pci_dev *pci_dev, + const struct pci_device_id *device_id); + +void mhi_pci_device_removed(struct pci_dev *pci_dev); +int mhi_arch_pcie_init(struct mhi_controller *mhi_cntrl); +void mhi_arch_pcie_deinit(struct mhi_controller *mhi_cntrl); +int mhi_arch_iommu_init(struct mhi_controller *mhi_cntrl); +void mhi_arch_iommu_deinit(struct mhi_controller *mhi_cntrl); +int mhi_arch_link_off(struct mhi_controller *mhi_cntrl, bool graceful); +int mhi_arch_link_on(struct mhi_controller *mhi_cntrl); + +#endif /* _MHI_QTI_ */ diff --git a/package/wwan/driver/quectel_MHI/src/core/Makefile b/package/wwan/driver/quectel_MHI/src/core/Makefile new file mode 100644 index 000000000..a743fbfa4 --- /dev/null +++ b/package/wwan/driver/quectel_MHI/src/core/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_MHI_BUS) +=mhi_init.o mhi_main.o mhi_pm.o mhi_boot.o mhi_dtr.o diff --git a/package/wwan/driver/quectel_MHI/src/core/mhi.h b/package/wwan/driver/quectel_MHI/src/core/mhi.h new file mode 100644 index 000000000..d61150e89 --- /dev/null +++ b/package/wwan/driver/quectel_MHI/src/core/mhi.h @@ -0,0 +1,890 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. */ + +#ifndef _MHI_H_ +#define _MHI_H_ + +#define PCIE_MHI_DRIVER_VERSION "V1.3.4" +#define ENABLE_MHI_MON +//#define ENABLE_IP_SW0 + +#include +typedef enum +{ + MHI_CLIENT_LOOPBACK_OUT = 0, + MHI_CLIENT_LOOPBACK_IN = 1, + MHI_CLIENT_SAHARA_OUT = 2, + MHI_CLIENT_SAHARA_IN = 3, + MHI_CLIENT_DIAG_OUT = 4, + MHI_CLIENT_DIAG_IN = 5, + MHI_CLIENT_SSR_OUT = 6, + MHI_CLIENT_SSR_IN = 7, + MHI_CLIENT_QDSS_OUT = 8, + MHI_CLIENT_QDSS_IN = 9, + MHI_CLIENT_EFS_OUT = 10, + MHI_CLIENT_EFS_IN = 11, + MHI_CLIENT_MBIM_OUT = 12, + MHI_CLIENT_MBIM_IN = 13, + MHI_CLIENT_QMI_OUT = 14, + MHI_CLIENT_QMI_IN = 15, + MHI_CLIENT_QMI_2_OUT = 16, + MHI_CLIENT_QMI_2_IN = 17, + MHI_CLIENT_IP_CTRL_1_OUT = 18, + MHI_CLIENT_IP_CTRL_1_IN = 19, + MHI_CLIENT_IPCR_OUT = 20, + MHI_CLIENT_IPCR_IN = 21, + MHI_CLIENT_TEST_FW_OUT = 22, + MHI_CLIENT_TEST_FW_IN = 23, + MHI_CLIENT_RESERVED_0 = 24, + MHI_CLIENT_BOOT_LOG_IN = 25, + MHI_CLIENT_DCI_OUT = 26, + MHI_CLIENT_DCI_IN = 27, + MHI_CLIENT_QBI_OUT = 28, + MHI_CLIENT_QBI_IN = 29, + MHI_CLIENT_RESERVED_1_LOWER = 30, + MHI_CLIENT_RESERVED_1_UPPER = 31, + MHI_CLIENT_DUN_OUT = 32, + MHI_CLIENT_DUN_IN = 33, + MHI_CLIENT_EDL_OUT = 34, + MHI_CLIENT_EDL_IN = 35, + MHI_CLIENT_ADB_FB_OUT = 36, + MHI_CLIENT_ADB_FB_IN = 37, + MHI_CLIENT_RESERVED_2_LOWER = 38, + MHI_CLIENT_RESERVED_2_UPPER = 41, + MHI_CLIENT_CSVT_OUT = 42, + MHI_CLIENT_CSVT_IN = 43, + MHI_CLIENT_SMCT_OUT = 44, + MHI_CLIENT_SMCT_IN = 45, + MHI_CLIENT_IP_SW_0_OUT = 46, + MHI_CLIENT_IP_SW_0_IN = 47, + MHI_CLIENT_IP_SW_1_OUT = 48, + MHI_CLIENT_IP_SW_1_IN = 49, + MHI_CLIENT_RESERVED_3_LOWER = 50, + MHI_CLIENT_RESERVED_3_UPPER = 59, + MHI_CLIENT_TEST_0_OUT = 60, + MHI_CLIENT_TEST_0_IN = 61, + MHI_CLIENT_TEST_1_OUT = 62, + MHI_CLIENT_TEST_1_IN = 63, + MHI_CLIENT_TEST_2_OUT = 64, + MHI_CLIENT_TEST_2_IN = 65, + MHI_CLIENT_TEST_3_OUT = 66, + MHI_CLIENT_TEST_3_IN = 67, + MHI_CLIENT_RESERVED_4_LOWER = 68, + MHI_CLIENT_RESERVED_4_UPPER = 91, + MHI_CLIENT_OEM_0_OUT = 92, + MHI_CLIENT_OEM_0_IN = 93, + MHI_CLIENT_OEM_1_OUT = 94, + MHI_CLIENT_OEM_1_IN = 95, + MHI_CLIENT_OEM_2_OUT = 96, + MHI_CLIENT_OEM_2_IN = 97, + MHI_CLIENT_OEM_3_OUT = 98, + MHI_CLIENT_OEM_3_IN = 99, + MHI_CLIENT_IP_HW_0_OUT = 100, + MHI_CLIENT_IP_HW_0_IN = 101, + MHI_CLIENT_ADPL = 102, + MHI_CLIENT_RESERVED_5_LOWER = 103, + MHI_CLIENT_RESERVED_5_UPPER = 127, + MHI_MAX_CHANNELS = 128 +}MHI_CLIENT_CHANNEL_TYPE; + +/* Event Ring Index */ +typedef enum +{ + SW_EVT_RING = 0, + PRIMARY_EVENT_RING = SW_EVT_RING, +#ifdef ENABLE_IP_SW0 + SW_0_OUT_EVT_RING, + SW_0_IN_EVT_RING, +#endif + IPA_OUT_EVENT_RING, + IPA_IN_EVENT_RING, + ADPL_EVT_RING, + + MAX_EVT_RING_IDX +}MHI_EVT_RING_IDX; + +#define MHI_VERSION 0x01000000 +#define MHIREGLEN_VALUE 0x100 /* **** WRONG VALUE *** */ +#define MHI_MSI_INDEX 1 +#define MAX_NUM_MHI_DEVICES 1 +#define NUM_MHI_XFER_RINGS 128 +#define NUM_MHI_EVT_RINGS MAX_EVT_RING_IDX +#define NUM_MHI_HW_EVT_RINGS 3 +#define NUM_MHI_XFER_RING_ELEMENTS 16 +#define NUM_MHI_EVT_RING_ELEMENTS (NUM_MHI_IPA_IN_RING_ELEMENTS*2) //must *2, event ring full will make x55 dump +#define NUM_MHI_IPA_IN_RING_ELEMENTS 512 +#define NUM_MHI_IPA_OUT_RING_ELEMENTS 512 //donot use ul agg, so increase +#define NUM_MHI_DIAG_IN_RING_ELEMENTS 128 +#define NUM_MHI_SW_IP_RING_ELEMENTS 512 + +/* +* for if set Interrupt moderation time as 1ms, +and transfer more than NUM_MHI_CHAN_RING_ELEMENTS data are sent to the modem in 1ms. +e.g. firehose upgrade. +modem will not trigger irq for these transfer. +*/ +#define NUM_MHI_CHAN_RING_ELEMENTS 32 //8 +#define MHI_EVT_CMD_QUEUE_SIZE 160 +#define MHI_EVT_STATE_QUEUE_SIZE 128 +#define MHI_EVT_XFER_QUEUE_SIZE 1024 + +#define CHAN_INBOUND(_x) ((_x)%2) + +#define CHAN_SBL(_x) (((_x) == MHI_CLIENT_SAHARA_OUT) || \ + ((_x) == MHI_CLIENT_SAHARA_IN) || \ + ((_x) == MHI_CLIENT_BOOT_LOG_IN)) + +#define CHAN_EDL(_x) (((_x) == MHI_CLIENT_EDL_OUT) || \ + ((_x) == MHI_CLIENT_EDL_IN)) + +struct mhi_chan; +struct mhi_event; +struct mhi_ctxt; +struct mhi_cmd; +struct image_info; +struct bhi_vec_entry; +struct mhi_timesync; +struct mhi_buf_info; + +/** + * enum MHI_CB - MHI callback + * @MHI_CB_IDLE: MHI entered idle state + * @MHI_CB_PENDING_DATA: New data available for client to process + * @MHI_CB_LPM_ENTER: MHI host entered low power mode + * @MHI_CB_LPM_EXIT: MHI host about to exit low power mode + * @MHI_CB_EE_RDDM: MHI device entered RDDM execution enviornment + * @MHI_CB_EE_MISSION_MODE: MHI device entered Mission Mode exec env + * @MHI_CB_SYS_ERROR: MHI device enter error state (may recover) + * @MHI_CB_FATAL_ERROR: MHI device entered fatal error + */ +enum MHI_CB { + MHI_CB_IDLE, + MHI_CB_PENDING_DATA, + MHI_CB_LPM_ENTER, + MHI_CB_LPM_EXIT, + MHI_CB_EE_RDDM, + MHI_CB_EE_MISSION_MODE, + MHI_CB_SYS_ERROR, + MHI_CB_FATAL_ERROR, +}; + +/** + * enum MHI_DEBUG_LEVL - various debugging level + */ +enum MHI_DEBUG_LEVEL { + MHI_MSG_LVL_VERBOSE, + MHI_MSG_LVL_INFO, + MHI_MSG_LVL_ERROR, + MHI_MSG_LVL_CRITICAL, + MHI_MSG_LVL_MASK_ALL, +}; + +/* +GSI_XFER_FLAG_BEI: Block event interrupt +1: Event generated by this ring element must not assert an interrupt to the host +0: Event generated by this ring element must assert an interrupt to the host + +GSI_XFER_FLAG_EOT: Interrupt on end of transfer +1: If an EOT condition is encountered when processing this ring element, an event is generated by the device with its completion code set to EOT. +0: If an EOT condition is encountered for this ring element, a completion event is not be generated by the device, unless IEOB is 1 + +GSI_XFER_FLAG_EOB: Interrupt on end of block +1: Device notifies host after processing this ring element by sending a completion event +0: Completion event is not required after processing this ring element + +GSI_XFER_FLAG_CHAIN: Chain bit that identifies the ring elements in a TD +*/ + +/** + * enum MHI_FLAGS - Transfer flags + * @MHI_EOB: End of buffer for bulk transfer + * @MHI_EOT: End of transfer + * @MHI_CHAIN: Linked transfer + */ +enum MHI_FLAGS { + MHI_EOB, + MHI_EOT, + MHI_CHAIN, +}; + +/** + * enum mhi_device_type - Device types + * @MHI_XFER_TYPE: Handles data transfer + * @MHI_TIMESYNC_TYPE: Use for timesync feature + * @MHI_CONTROLLER_TYPE: Control device + */ +enum mhi_device_type { + MHI_XFER_TYPE, + MHI_TIMESYNC_TYPE, + MHI_CONTROLLER_TYPE, +}; + +/** + * enum mhi_ee - device current execution enviornment + * @MHI_EE_PBL - device in PBL + * @MHI_EE_SBL - device in SBL + * @MHI_EE_AMSS - device in mission mode (firmware fully loaded) + * @MHI_EE_RDDM - device in ram dump collection mode + * @MHI_EE_WFW - device in WLAN firmware mode + * @MHI_EE_PTHRU - device in PBL but configured in pass thru mode + * @MHI_EE_EDL - device in emergency download mode + */ +enum mhi_ee { + MHI_EE_PBL = 0x0, + MHI_EE_SBL = 0x1, + MHI_EE_AMSS = 0x2, + MHI_EE_RDDM = 0x3, + MHI_EE_WFW = 0x4, + MHI_EE_PTHRU = 0x5, + MHI_EE_EDL = 0x6, + MHI_EE_FP = 0x7, /* FlashProg, Flash Programmer Environment */ + MHI_EE_MAX_SUPPORTED = MHI_EE_FP, + MHI_EE_DISABLE_TRANSITION, /* local EE, not related to mhi spec */ + MHI_EE_MAX, +}; + +/** + * enum mhi_dev_state - device current MHI state + */ +enum mhi_dev_state { + MHI_STATE_RESET = 0x0, + MHI_STATE_READY = 0x1, + MHI_STATE_M0 = 0x2, + MHI_STATE_M1 = 0x3, + MHI_STATE_M2 = 0x4, + MHI_STATE_M3 = 0x5, + MHI_STATE_BHI = 0x7, + MHI_STATE_SYS_ERR = 0xFF, + MHI_STATE_MAX, +}; + +extern const char * const mhi_ee_str[MHI_EE_MAX]; +#define TO_MHI_EXEC_STR(ee) (((ee) >= MHI_EE_MAX) ? \ + "INVALID_EE" : mhi_ee_str[ee]) + +/** + * struct image_info - firmware and rddm table table + * @mhi_buf - Contain device firmware and rddm table + * @entries - # of entries in table + */ +struct image_info { + struct mhi_buf *mhi_buf; + struct bhi_vec_entry *bhi_vec; + u32 entries; +}; + +/** + * struct mhi_controller - Master controller structure for external modem + * @dev: Device associated with this controller + * @of_node: DT that has MHI configuration information + * @regs: Points to base of MHI MMIO register space + * @bhi: Points to base of MHI BHI register space + * @bhie: Points to base of MHI BHIe register space + * @wake_db: MHI WAKE doorbell register address + * @dev_id: PCIe device id of the external device + * @domain: PCIe domain the device connected to + * @bus: PCIe bus the device assigned to + * @slot: PCIe slot for the modem + * @iova_start: IOMMU starting address for data + * @iova_stop: IOMMU stop address for data + * @fw_image: Firmware image name for normal booting + * @edl_image: Firmware image name for emergency download mode + * @fbc_download: MHI host needs to do complete image transfer + * @rddm_size: RAM dump size that host should allocate for debugging purpose + * @sbl_size: SBL image size + * @seg_len: BHIe vector size + * @fbc_image: Points to firmware image buffer + * @rddm_image: Points to RAM dump buffer + * @max_chan: Maximum number of channels controller support + * @mhi_chan: Points to channel configuration table + * @lpm_chans: List of channels that require LPM notifications + * @total_ev_rings: Total # of event rings allocated + * @hw_ev_rings: Number of hardware event rings + * @sw_ev_rings: Number of software event rings + * @msi_required: Number of msi required to operate + * @msi_allocated: Number of msi allocated by bus master + * @irq: base irq # to request + * @mhi_event: MHI event ring configurations table + * @mhi_cmd: MHI command ring configurations table + * @mhi_ctxt: MHI device context, shared memory between host and device + * @timeout_ms: Timeout in ms for state transitions + * @pm_state: Power management state + * @ee: MHI device execution environment + * @dev_state: MHI STATE + * @status_cb: CB function to notify various power states to but master + * @link_status: Query link status in case of abnormal value read from device + * @runtime_get: Async runtime resume function + * @runtimet_put: Release votes + * @time_get: Return host time in us + * @lpm_disable: Request controller to disable link level low power modes + * @lpm_enable: Controller may enable link level low power modes again + * @priv_data: Points to bus master's private data + */ +struct mhi_controller { + struct list_head node; + struct mhi_device *mhi_dev; + + /* device node for iommu ops */ + struct device *dev; + struct device_node *of_node; + + /* mmio base */ + phys_addr_t base_addr; + void __iomem *regs; + void __iomem *bhi; + void __iomem *bhie; + void __iomem *wake_db; + + /* device topology */ + u32 vendor; + u32 dev_id; + u32 domain; + u32 bus; + u32 slot; + u32 cntrl_idx; + struct device *cntrl_dev; + + /* addressing window */ + dma_addr_t iova_start; + dma_addr_t iova_stop; + + /* fw images */ + const char *fw_image; + const char *edl_image; + + /* mhi host manages downloading entire fbc images */ + bool fbc_download; + size_t rddm_size; + size_t sbl_size; + size_t seg_len; + u32 session_id; + u32 sequence_id; + struct image_info *fbc_image; + struct image_info *rddm_image; + + /* physical channel config data */ + u32 max_chan; + struct mhi_chan *mhi_chan; + struct list_head lpm_chans; /* these chan require lpm notification */ + + /* physical event config data */ + u32 total_ev_rings; + u32 hw_ev_rings; + u32 sw_ev_rings; + u32 msi_required; + u32 msi_allocated; + u32 msi_irq_base; + int *irq; /* interrupt table */ + struct mhi_event *mhi_event; + + /* cmd rings */ + struct mhi_cmd *mhi_cmd; + + /* mhi context (shared with device) */ + struct mhi_ctxt *mhi_ctxt; + + u32 timeout_ms; + + /* caller should grab pm_mutex for suspend/resume operations */ + struct mutex pm_mutex; + bool pre_init; + rwlock_t pm_lock; + u32 pm_state; + enum mhi_ee ee; + enum mhi_dev_state dev_state; + bool wake_set; + atomic_t dev_wake; + atomic_t alloc_size; + atomic_t pending_pkts; + struct list_head transition_list; + spinlock_t transition_lock; + spinlock_t wlock; + + /* debug counters */ + u32 M0, M2, M3; + + /* worker for different state transitions */ + struct work_struct st_worker; + struct work_struct fw_worker; + struct work_struct syserr_worker; + struct delayed_work ready_worker; + wait_queue_head_t state_event; + + /* shadow functions */ + void (*status_cb)(struct mhi_controller *mhi_cntrl, void *priv, + enum MHI_CB reason); + int (*link_status)(struct mhi_controller *mhi_cntrl, void *priv); + void (*wake_get)(struct mhi_controller *mhi_cntrl, bool override); + void (*wake_put)(struct mhi_controller *mhi_cntrl, bool override); + int (*runtime_get)(struct mhi_controller *mhi_cntrl, void *priv); + void (*runtime_put)(struct mhi_controller *mhi_cntrl, void *priv); + void (*runtime_mark_last_busy)(struct mhi_controller *mhi_cntrl, void *priv); + u64 (*time_get)(struct mhi_controller *mhi_cntrl, void *priv); + int (*lpm_disable)(struct mhi_controller *mhi_cntrl, void *priv); + int (*lpm_enable)(struct mhi_controller *mhi_cntrl, void *priv); + int (*map_single)(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf); + void (*unmap_single)(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf); + + /* channel to control DTR messaging */ + struct mhi_device *dtr_dev; + + /* bounce buffer settings */ + bool bounce_buf; + size_t buffer_len; + + /* supports time sync feature */ + struct mhi_timesync *mhi_tsync; + struct mhi_device *tsync_dev; + + /* kernel log level */ + enum MHI_DEBUG_LEVEL klog_lvl; + int klog_slient; + + /* private log level controller driver to set */ + enum MHI_DEBUG_LEVEL log_lvl; + + /* controller specific data */ + void *priv_data; + void *log_buf; + struct dentry *dentry; + struct dentry *parent; + + struct miscdevice miscdev; + +#ifdef ENABLE_MHI_MON + spinlock_t lock; + + /* Ref */ + int nreaders; /* Under mon_lock AND mbus->lock */ + struct list_head r_list; /* Chain of readers (usually one) */ + struct kref ref; /* Under mon_lock */ + + /* Stats */ + unsigned int cnt_events; + unsigned int cnt_text_lost; +#endif +}; + +#ifdef ENABLE_MHI_MON +struct mhi_tre; +struct mon_reader { + struct list_head r_link; + struct mhi_controller *m_bus; + void *r_data; /* Use container_of instead? */ + + void (*rnf_submit)(void *data, u32 chan, dma_addr_t wp, struct mhi_tre *mhi_tre, void *buf, size_t len); + void (*rnf_receive)(void *data, u32 chan, dma_addr_t wp, struct mhi_tre *mhi_tre, void *buf, size_t len); + void (*rnf_complete)(void *data, u32 chan, dma_addr_t wp, struct mhi_tre *mhi_tre); +}; +#endif + +/** + * struct mhi_device - mhi device structure associated bind to channel + * @dev: Device associated with the channels + * @mtu: Maximum # of bytes controller support + * @ul_chan_id: MHI channel id for UL transfer + * @dl_chan_id: MHI channel id for DL transfer + * @tiocm: Device current terminal settings + * @priv: Driver private data + */ +struct mhi_device { + struct device dev; + u32 vendor; + u32 dev_id; + u32 domain; + u32 bus; + u32 slot; + size_t mtu; + int ul_chan_id; + int dl_chan_id; + int ul_event_id; + int dl_event_id; + u32 tiocm; + const struct mhi_device_id *id; + const char *chan_name; + struct mhi_controller *mhi_cntrl; + struct mhi_chan *ul_chan; + struct mhi_chan *dl_chan; + atomic_t dev_wake; + enum mhi_device_type dev_type; + void *priv_data; + int (*ul_xfer)(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan, + void *buf, size_t len, enum MHI_FLAGS flags); + int (*dl_xfer)(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan, + void *buf, size_t size, enum MHI_FLAGS flags); + void (*status_cb)(struct mhi_device *mhi_dev, enum MHI_CB reason); +}; + +/** + * struct mhi_result - Completed buffer information + * @buf_addr: Address of data buffer + * @dir: Channel direction + * @bytes_xfer: # of bytes transferred + * @transaction_status: Status of last trasnferred + */ +struct mhi_result { + void *buf_addr; + enum dma_data_direction dir; + size_t bytes_xferd; + int transaction_status; +}; + +/** + * struct mhi_buf - Describes the buffer + * @page: buffer as a page + * @buf: cpu address for the buffer + * @phys_addr: physical address of the buffer + * @dma_addr: iommu address for the buffer + * @skb: skb of ip packet + * @len: # of bytes + * @name: Buffer label, for offload channel configurations name must be: + * ECA - Event context array data + * CCA - Channel context array data + */ +struct mhi_buf { + struct list_head node; + struct page *page; + void *buf; + phys_addr_t phys_addr; + dma_addr_t dma_addr; + struct sk_buff *skb; + size_t len; + const char *name; /* ECA, CCA */ +}; + +/** + * struct mhi_driver - mhi driver information + * @id_table: NULL terminated channel ID names + * @ul_xfer_cb: UL data transfer callback + * @dl_xfer_cb: DL data transfer callback + * @status_cb: Asynchronous status callback + */ +struct mhi_driver { + const struct mhi_device_id *id_table; + int (*probe)(struct mhi_device *mhi_dev, + const struct mhi_device_id *id); + void (*remove)(struct mhi_device *mhi_dev); + void (*ul_xfer_cb)(struct mhi_device *mhi_dev, struct mhi_result *res); + void (*dl_xfer_cb)(struct mhi_device *mhi_dev, struct mhi_result *res); + void (*status_cb)(struct mhi_device *mhi_dev, enum MHI_CB mhi_cb); + struct device_driver driver; +}; + +#define to_mhi_driver(drv) container_of(drv, struct mhi_driver, driver) +#define to_mhi_device(dev) container_of(dev, struct mhi_device, dev) + +static inline void mhi_device_set_devdata(struct mhi_device *mhi_dev, + void *priv) +{ + mhi_dev->priv_data = priv; +} + +static inline void *mhi_device_get_devdata(struct mhi_device *mhi_dev) +{ + return mhi_dev->priv_data; +} + +/** + * mhi_queue_transfer - Queue a buffer to hardware + * All transfers are asyncronous transfers + * @mhi_dev: Device associated with the channels + * @dir: Data direction + * @buf: Data buffer (skb for hardware channels) + * @len: Size in bytes + * @mflags: Interrupt flags for the device + */ +static inline int mhi_queue_transfer(struct mhi_device *mhi_dev, + enum dma_data_direction dir, + void *buf, + size_t len, + enum MHI_FLAGS mflags) +{ + if (dir == DMA_TO_DEVICE) + return mhi_dev->ul_xfer(mhi_dev, mhi_dev->ul_chan, buf, len, + mflags); + else + return mhi_dev->dl_xfer(mhi_dev, mhi_dev->dl_chan, buf, len, + mflags); +} + +static inline void *mhi_controller_get_devdata(struct mhi_controller *mhi_cntrl) +{ + return mhi_cntrl->priv_data; +} + +static inline void mhi_free_controller(struct mhi_controller *mhi_cntrl) +{ + kfree(mhi_cntrl); +} + +/** + * mhi_driver_register - Register driver with MHI framework + * @mhi_drv: mhi_driver structure + */ +int mhi_driver_register(struct mhi_driver *mhi_drv); + +/** + * mhi_driver_unregister - Unregister a driver for mhi_devices + * @mhi_drv: mhi_driver structure + */ +void mhi_driver_unregister(struct mhi_driver *mhi_drv); + +/** + * mhi_device_configure - configure ECA or CCA context + * For offload channels that client manage, call this + * function to configure channel context or event context + * array associated with the channel + * @mhi_div: Device associated with the channels + * @dir: Direction of the channel + * @mhi_buf: Configuration data + * @elements: # of configuration elements + */ +int mhi_device_configure(struct mhi_device *mhi_div, + enum dma_data_direction dir, + struct mhi_buf *mhi_buf, + int elements); + +/** + * mhi_device_get - disable all low power modes + * Only disables lpm, does not immediately exit low power mode + * if controller already in a low power mode + * @mhi_dev: Device associated with the channels + */ +void mhi_device_get(struct mhi_device *mhi_dev); + +/** + * mhi_device_get_sync - disable all low power modes + * Synchronously disable all low power, exit low power mode if + * controller already in a low power state + * @mhi_dev: Device associated with the channels + */ +int mhi_device_get_sync(struct mhi_device *mhi_dev); + +/** + * mhi_device_put - re-enable low power modes + * @mhi_dev: Device associated with the channels + */ +void mhi_device_put(struct mhi_device *mhi_dev); + +/** + * mhi_prepare_for_transfer - setup channel for data transfer + * Moves both UL and DL channel from RESET to START state + * @mhi_dev: Device associated with the channels + */ +int mhi_prepare_for_transfer(struct mhi_device *mhi_dev); + +/** + * mhi_unprepare_from_transfer -unprepare the channels + * Moves both UL and DL channels to RESET state + * @mhi_dev: Device associated with the channels + */ +void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev); + +/** + * mhi_get_no_free_descriptors - Get transfer ring length + * Get # of TD available to queue buffers + * @mhi_dev: Device associated with the channels + * @dir: Direction of the channel + */ +int mhi_get_no_free_descriptors(struct mhi_device *mhi_dev, + enum dma_data_direction dir); + +/** + * mhi_poll - poll for any available data to consume + * This is only applicable for DL direction + * @mhi_dev: Device associated with the channels + * @budget: In descriptors to service before returning + */ +int mhi_poll(struct mhi_device *mhi_dev, u32 budget); + +/** + * mhi_ioctl - user space IOCTL support for MHI channels + * Native support for setting TIOCM + * @mhi_dev: Device associated with the channels + * @cmd: IOCTL cmd + * @arg: Optional parameter, iotcl cmd specific + */ +long mhi_ioctl(struct mhi_device *mhi_dev, unsigned int cmd, unsigned long arg); + +/** + * mhi_alloc_controller - Allocate mhi_controller structure + * Allocate controller structure and additional data for controller + * private data. You may get the private data pointer by calling + * mhi_controller_get_devdata + * @size: # of additional bytes to allocate + */ +struct mhi_controller *mhi_alloc_controller(size_t size); + +/** + * of_register_mhi_controller - Register MHI controller + * Registers MHI controller with MHI bus framework. DT must be supported + * @mhi_cntrl: MHI controller to register + */ +int of_register_mhi_controller(struct mhi_controller *mhi_cntrl); + +void mhi_unregister_mhi_controller(struct mhi_controller *mhi_cntrl); + +/** + * mhi_bdf_to_controller - Look up a registered controller + * Search for controller based on device identification + * @domain: RC domain of the device + * @bus: Bus device connected to + * @slot: Slot device assigned to + * @dev_id: Device Identification + */ +struct mhi_controller *mhi_bdf_to_controller(u32 domain, u32 bus, u32 slot, + u32 dev_id); + +/** + * mhi_prepare_for_power_up - Do pre-initialization before power up + * This is optional, call this before power up if controller do not + * want bus framework to automatically free any allocated memory during shutdown + * process. + * @mhi_cntrl: MHI controller + */ +int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl); + +/** + * mhi_async_power_up - Starts MHI power up sequence + * @mhi_cntrl: MHI controller + */ +int mhi_async_power_up(struct mhi_controller *mhi_cntrl); +int mhi_sync_power_up(struct mhi_controller *mhi_cntrl); + +/** + * mhi_power_down - Start MHI power down sequence + * @mhi_cntrl: MHI controller + * @graceful: link is still accessible, do a graceful shutdown process otherwise + * we will shutdown host w/o putting device into RESET state + */ +void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful); + +/** + * mhi_unprepare_after_powre_down - free any allocated memory for power up + * @mhi_cntrl: MHI controller + */ +void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl); + +/** + * mhi_pm_suspend - Move MHI into a suspended state + * Transition to MHI state M3 state from M0||M1||M2 state + * @mhi_cntrl: MHI controller + */ +int mhi_pm_suspend(struct mhi_controller *mhi_cntrl); + +/** + * mhi_pm_resume - Resume MHI from suspended state + * Transition to MHI state M0 state from M3 state + * @mhi_cntrl: MHI controller + */ +int mhi_pm_resume(struct mhi_controller *mhi_cntrl); + +/** + * mhi_download_rddm_img - Download ramdump image from device for + * debugging purpose. + * @mhi_cntrl: MHI controller + * @in_panic: If we trying to capture image while in kernel panic + */ +int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic); + +/** + * mhi_force_rddm_mode - Force external device into rddm mode + * to collect device ramdump. This is useful if host driver assert + * and we need to see device state as well. + * @mhi_cntrl: MHI controller + */ +int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl); + +/** + * mhi_get_remote_time_sync - Get external soc time relative to local soc time + * using MMIO method. + * @mhi_dev: Device associated with the channels + * @t_host: Pointer to output local soc time + * @t_dev: Pointer to output remote soc time + */ +int mhi_get_remote_time_sync(struct mhi_device *mhi_dev, + u64 *t_host, + u64 *t_dev); + +/** + * mhi_get_mhi_state - Return MHI state of device + * @mhi_cntrl: MHI controller + */ +enum mhi_dev_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl); + +/** + * mhi_set_mhi_state - Set device state + * @mhi_cntrl: MHI controller + * @state: state to set + */ +void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, + enum mhi_dev_state state); + + +/** + * mhi_is_active - helper function to determine if MHI in active state + * @mhi_dev: client device + */ +static inline bool mhi_is_active(struct mhi_device *mhi_dev) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + + return (mhi_cntrl->dev_state >= MHI_STATE_M0 && + mhi_cntrl->dev_state <= MHI_STATE_M3); +} + +/** + * mhi_debug_reg_dump - dump MHI registers for debug purpose + * @mhi_cntrl: MHI controller + */ +void mhi_debug_reg_dump(struct mhi_controller *mhi_cntrl); + +#ifdef CONFIG_MHI_DEBUG + +#define MHI_VERB(fmt, ...) do { \ + if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_VERBOSE) \ + pr_debug("[D][mhi%d][%s] " fmt, mhi_cntrl->cntrl_idx, __func__, ##__VA_ARGS__);\ +} while (0) + +#else + +#define MHI_VERB(fmt, ...) + +#endif + +#define MHI_LOG(fmt, ...) do { \ + if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_INFO) \ + pr_info("[I][mhi%d][%s] " fmt, mhi_cntrl->cntrl_idx, __func__, ##__VA_ARGS__);\ + else if (!mhi_cntrl->klog_slient) \ + printk(KERN_DEBUG "[I][mhi%d][%s] " fmt, mhi_cntrl->cntrl_idx, __func__, ##__VA_ARGS__);\ +} while (0) + +#define MHI_ERR(fmt, ...) do { \ + if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_ERROR) \ + pr_err("[E][mhi%d][%s] " fmt, mhi_cntrl->cntrl_idx, __func__, ##__VA_ARGS__); \ +} while (0) + +#define MHI_CRITICAL(fmt, ...) do { \ + if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_CRITICAL) \ + pr_alert("[C][mhi%d][%s] " fmt, mhi_cntrl->cntrl_idx, __func__, ##__VA_ARGS__); \ +} while (0) + +int mhi_register_mhi_controller(struct mhi_controller *mhi_cntrl); +void mhi_unregister_mhi_controller(struct mhi_controller *mhi_cntrl); + +#ifndef MHI_NAME_SIZE +#define MHI_NAME_SIZE 32 +/** + * * struct mhi_device_id - MHI device identification + * * @chan: MHI channel name + * * @driver_data: driver data; + * */ +struct mhi_device_id { + const char chan[MHI_NAME_SIZE]; + unsigned long driver_data; +}; +#endif + +#endif /* _MHI_H_ */ diff --git a/package/wwan/driver/quectel_MHI/src/core/mhi_boot.c b/package/wwan/driver/quectel_MHI/src/core/mhi_boot.c new file mode 100644 index 000000000..30436c153 --- /dev/null +++ b/package/wwan/driver/quectel_MHI/src/core/mhi_boot.c @@ -0,0 +1,846 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "mhi.h" +#include "mhi_internal.h" + +/* Software defines */ +/* BHI Version */ +#define BHI_MAJOR_VERSION 0x1 +#define BHI_MINOR_VERSION 0x1 + +#define MSMHWID_NUMDWORDS 6 /* Number of dwords that make the MSMHWID */ +#define OEMPKHASH_NUMDWORDS 48 /* Number of dwords that make the OEM PK HASH */ + +#define IsPBLExecEnv(ExecEnv) ((ExecEnv == MHI_EE_PBL) || (ExecEnv == MHI_EE_EDL) ) + +typedef u32 ULONG; + +typedef struct _bhi_info_type +{ + ULONG bhi_ver_minor; + ULONG bhi_ver_major; + ULONG bhi_image_address_low; + ULONG bhi_image_address_high; + ULONG bhi_image_size; + ULONG bhi_rsvd1; + ULONG bhi_imgtxdb; + ULONG bhi_rsvd2; + ULONG bhi_msivec; + ULONG bhi_rsvd3; + ULONG bhi_ee; + ULONG bhi_status; + ULONG bhi_errorcode; + ULONG bhi_errdbg1; + ULONG bhi_errdbg2; + ULONG bhi_errdbg3; + ULONG bhi_sernum; + ULONG bhi_sblantirollbackver; + ULONG bhi_numsegs; + ULONG bhi_msmhwid[6]; + ULONG bhi_oempkhash[48]; + ULONG bhi_rsvd5; +}BHI_INFO_TYPE, *PBHI_INFO_TYPE; + +static void PrintBhiInfo(struct mhi_controller *mhi_cntrl, BHI_INFO_TYPE *bhi_info) +{ + ULONG index; + char str[128]; + + MHI_LOG("BHI Device Info...\n"); + MHI_LOG("BHI Version = { Major = 0x%X Minor = 0x%X}\n", bhi_info->bhi_ver_major, bhi_info->bhi_ver_minor); + MHI_LOG("BHI Execution Environment = 0x%X\n", bhi_info->bhi_ee); + MHI_LOG("BHI Status = 0x%X\n", bhi_info->bhi_status); + MHI_LOG("BHI Error code = 0x%X { Dbg1 = 0x%X Dbg2 = 0x%X Dbg3 = 0x%X }\n", bhi_info->bhi_errorcode, bhi_info->bhi_errdbg1, bhi_info->bhi_errdbg2, bhi_info->bhi_errdbg3); + MHI_LOG("BHI Serial Number = 0x%X\n", bhi_info->bhi_sernum); + MHI_LOG("BHI SBL Anti-Rollback Ver = 0x%X\n", bhi_info->bhi_sblantirollbackver); + MHI_LOG("BHI Number of Segments = 0x%X\n", bhi_info->bhi_numsegs); + for (index = 0; index < 6; index++) + { + snprintf(str+3*index, sizeof(str)-3*index, "%02x ", bhi_info->bhi_msmhwid[index]); + } + MHI_LOG("BHI MSM HW-Id = %s\n", str); + + for (index = 0; index < 24; index++) + { + snprintf(str+3*index, sizeof(str)-3*index, "%02x ", bhi_info->bhi_oempkhash[index]); + } + MHI_LOG("BHI OEM PK Hash = %s\n", str); +} + +static u32 bhi_read_reg(struct mhi_controller *mhi_cntrl, u32 offset) +{ + u32 out = 0; + int ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_EXECENV, &out); + + return (ret) ? 0 : out; +} + +static int BhiRead(struct mhi_controller *mhi_cntrl, BHI_INFO_TYPE *bhi_info) +{ + ULONG index; + + memset(bhi_info, 0x00, sizeof(BHI_INFO_TYPE)); + + /* bhi_ver */ + bhi_info->bhi_ver_minor = bhi_read_reg(mhi_cntrl, BHI_BHIVERSION_MINOR); + bhi_info->bhi_ver_major = bhi_read_reg(mhi_cntrl, BHI_BHIVERSION_MINOR); + bhi_info->bhi_image_address_low = bhi_read_reg(mhi_cntrl, BHI_IMGADDR_LOW); + bhi_info->bhi_image_address_high = bhi_read_reg(mhi_cntrl, BHI_IMGADDR_HIGH); + bhi_info->bhi_image_size = bhi_read_reg(mhi_cntrl, BHI_IMGSIZE); + bhi_info->bhi_rsvd1 = bhi_read_reg(mhi_cntrl, BHI_RSVD1); + bhi_info->bhi_imgtxdb = bhi_read_reg(mhi_cntrl, BHI_IMGTXDB); + bhi_info->bhi_rsvd2 = bhi_read_reg(mhi_cntrl, BHI_RSVD2); + bhi_info->bhi_msivec = bhi_read_reg(mhi_cntrl, BHI_INTVEC); + bhi_info->bhi_rsvd3 = bhi_read_reg(mhi_cntrl, BHI_RSVD3); + bhi_info->bhi_ee = bhi_read_reg(mhi_cntrl, BHI_EXECENV); + bhi_info->bhi_status = bhi_read_reg(mhi_cntrl, BHI_STATUS); + bhi_info->bhi_errorcode = bhi_read_reg(mhi_cntrl, BHI_ERRCODE); + bhi_info->bhi_errdbg1 = bhi_read_reg(mhi_cntrl, BHI_ERRDBG1); + bhi_info->bhi_errdbg2 = bhi_read_reg(mhi_cntrl, BHI_ERRDBG2); + bhi_info->bhi_errdbg3 = bhi_read_reg(mhi_cntrl, BHI_ERRDBG3); + bhi_info->bhi_sernum = bhi_read_reg(mhi_cntrl, BHI_SERIALNU); + bhi_info->bhi_sblantirollbackver = bhi_read_reg(mhi_cntrl, BHI_SBLANTIROLLVER); + bhi_info->bhi_numsegs = bhi_read_reg(mhi_cntrl, BHI_NUMSEG); + for (index = 0; index < MSMHWID_NUMDWORDS; index++) + { + bhi_info->bhi_msmhwid[index] = bhi_read_reg(mhi_cntrl, BHI_MSMHWID(index)); + } + for (index = 0; index < OEMPKHASH_NUMDWORDS; index++) + { + bhi_info->bhi_oempkhash[index] = bhi_read_reg(mhi_cntrl, BHI_OEMPKHASH(index)); + } + bhi_info->bhi_rsvd5 = bhi_read_reg(mhi_cntrl, BHI_RSVD5); + PrintBhiInfo(mhi_cntrl, bhi_info); + /* Check the Execution Environment */ + if (!IsPBLExecEnv(bhi_info->bhi_ee)) + { + MHI_LOG("E - EE: 0x%X Expected PBL/EDL\n", bhi_info->bhi_ee); + } + + /* Return the number of bytes read */ + return 0; +} + +/* setup rddm vector table for rddm transfer */ +static void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl, + struct image_info *img_info) +{ + struct mhi_buf *mhi_buf = img_info->mhi_buf; + struct bhi_vec_entry *bhi_vec = img_info->bhi_vec; + int i = 0; + + for (i = 0; i < img_info->entries - 1; i++, mhi_buf++, bhi_vec++) { + MHI_VERB("Setting vector:%pad size:%zu\n", + &mhi_buf->dma_addr, mhi_buf->len); + bhi_vec->dma_addr = mhi_buf->dma_addr; + bhi_vec->size = mhi_buf->len; + } +} + +/* collect rddm during kernel panic */ +static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl) +{ + int ret; + struct mhi_buf *mhi_buf; + u32 sequence_id; + u32 rx_status; + enum mhi_ee ee; + struct image_info *rddm_image = mhi_cntrl->rddm_image; + const u32 delayus = 2000; + u32 retry = (mhi_cntrl->timeout_ms * 1000) / delayus; + const u32 rddm_timeout_us = 200000; + int rddm_retry = rddm_timeout_us / delayus; /* time to enter rddm */ + void __iomem *base = mhi_cntrl->bhie; + + MHI_LOG("Entered with pm_state:%s dev_state:%s ee:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + TO_MHI_EXEC_STR(mhi_cntrl->ee)); + + /* + * This should only be executing during a kernel panic, we expect all + * other cores to shutdown while we're collecting rddm buffer. After + * returning from this function, we expect device to reset. + * + * Normaly, we would read/write pm_state only after grabbing + * pm_lock, since we're in a panic, skipping it. Also there is no + * gurantee this state change would take effect since + * we're setting it w/o grabbing pmlock, it's best effort + */ + mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT; + /* update should take the effect immediately */ + smp_wmb(); + + /* setup the RX vector table */ + mhi_rddm_prepare(mhi_cntrl, rddm_image); + mhi_buf = &rddm_image->mhi_buf[rddm_image->entries - 1]; + + MHI_LOG("Starting BHIe programming for RDDM\n"); + + mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_HIGH_OFFS, + upper_32_bits(mhi_buf->dma_addr)); + + mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_LOW_OFFS, + lower_32_bits(mhi_buf->dma_addr)); + + mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len); + sequence_id = prandom_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK; + + if (unlikely(!sequence_id)) + sequence_id = 1; + + + mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS, + BHIE_RXVECDB_SEQNUM_BMSK, BHIE_RXVECDB_SEQNUM_SHFT, + sequence_id); + + MHI_LOG("Trigger device into RDDM mode\n"); + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR); + + MHI_LOG("Waiting for device to enter RDDM\n"); + while (rddm_retry--) { + ee = mhi_get_exec_env(mhi_cntrl); + if (ee == MHI_EE_RDDM) + break; + + udelay(delayus); + } + + if (rddm_retry <= 0) { + /* This is a hardware reset, will force device to enter rddm */ + MHI_LOG( + "Did not enter RDDM triggering host req. reset to force rddm\n"); + mhi_write_reg(mhi_cntrl, mhi_cntrl->regs, + MHI_SOC_RESET_REQ_OFFSET, MHI_SOC_RESET_REQ); + udelay(delayus); + } + + ee = mhi_get_exec_env(mhi_cntrl); + MHI_LOG("Waiting for image download completion, current EE:%s\n", + TO_MHI_EXEC_STR(ee)); + while (retry--) { + ret = mhi_read_reg_field(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS, + BHIE_RXVECSTATUS_STATUS_BMSK, + BHIE_RXVECSTATUS_STATUS_SHFT, + &rx_status); + if (ret) + return -EIO; + + if (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL) { + MHI_LOG("RDDM successfully collected\n"); + return 0; + } + + udelay(delayus); + } + + ee = mhi_get_exec_env(mhi_cntrl); + ret = mhi_read_reg(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS, &rx_status); + + MHI_ERR("Did not complete RDDM transfer\n"); + MHI_ERR("Current EE:%s\n", TO_MHI_EXEC_STR(ee)); + MHI_ERR("RXVEC_STATUS:0x%x, ret:%d\n", rx_status, ret); + + return -EIO; +} + +/* download ramdump image from device */ +int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic) +{ + void __iomem *base = mhi_cntrl->bhie; + rwlock_t *pm_lock = &mhi_cntrl->pm_lock; + struct image_info *rddm_image = mhi_cntrl->rddm_image; + struct mhi_buf *mhi_buf; + int ret; + u32 rx_status; + u32 sequence_id; + + if (!rddm_image) + return -ENOMEM; + + if (in_panic) + return __mhi_download_rddm_in_panic(mhi_cntrl); + + MHI_LOG("Waiting for device to enter RDDM state from EE:%s\n", + TO_MHI_EXEC_STR(mhi_cntrl->ee)); + + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->ee == MHI_EE_RDDM || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + MHI_ERR("MHI is not in valid state, pm_state:%s ee:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_EXEC_STR(mhi_cntrl->ee)); + return -EIO; + } + + mhi_rddm_prepare(mhi_cntrl, mhi_cntrl->rddm_image); + + /* vector table is the last entry */ + mhi_buf = &rddm_image->mhi_buf[rddm_image->entries - 1]; + + read_lock_bh(pm_lock); + if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { + read_unlock_bh(pm_lock); + return -EIO; + } + + MHI_LOG("Starting BHIe Programming for RDDM\n"); + + mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_HIGH_OFFS, + upper_32_bits(mhi_buf->dma_addr)); + + mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_LOW_OFFS, + lower_32_bits(mhi_buf->dma_addr)); + + mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len); + + sequence_id = prandom_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK; + mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS, + BHIE_RXVECDB_SEQNUM_BMSK, BHIE_RXVECDB_SEQNUM_SHFT, + sequence_id); + read_unlock_bh(pm_lock); + + MHI_LOG("Upper:0x%x Lower:0x%x len:0x%zx sequence:%u\n", + upper_32_bits(mhi_buf->dma_addr), + lower_32_bits(mhi_buf->dma_addr), + mhi_buf->len, sequence_id); + MHI_LOG("Waiting for image download completion\n"); + + /* waiting for image download completion */ + wait_event_timeout(mhi_cntrl->state_event, + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) || + mhi_read_reg_field(mhi_cntrl, base, + BHIE_RXVECSTATUS_OFFS, + BHIE_RXVECSTATUS_STATUS_BMSK, + BHIE_RXVECSTATUS_STATUS_SHFT, + &rx_status) || rx_status, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) + return -EIO; + + return (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL) ? 0 : -EIO; +} +EXPORT_SYMBOL(mhi_download_rddm_img); + +static int mhi_fw_load_amss(struct mhi_controller *mhi_cntrl, + const struct mhi_buf *mhi_buf) +{ + void __iomem *base = mhi_cntrl->bhie; + rwlock_t *pm_lock = &mhi_cntrl->pm_lock; + u32 tx_status; + + read_lock_bh(pm_lock); + if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { + read_unlock_bh(pm_lock); + return -EIO; + } + + MHI_LOG("Starting BHIe Programming\n"); + + mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_HIGH_OFFS, + upper_32_bits(mhi_buf->dma_addr)); + + mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_LOW_OFFS, + lower_32_bits(mhi_buf->dma_addr)); + + mhi_write_reg(mhi_cntrl, base, BHIE_TXVECSIZE_OFFS, mhi_buf->len); + + mhi_cntrl->sequence_id = prandom_u32() & BHIE_TXVECSTATUS_SEQNUM_BMSK; + mhi_write_reg_field(mhi_cntrl, base, BHIE_TXVECDB_OFFS, + BHIE_TXVECDB_SEQNUM_BMSK, BHIE_TXVECDB_SEQNUM_SHFT, + mhi_cntrl->sequence_id); + read_unlock_bh(pm_lock); + + MHI_LOG("Upper:0x%x Lower:0x%x len:0x%zx sequence:%u\n", + upper_32_bits(mhi_buf->dma_addr), + lower_32_bits(mhi_buf->dma_addr), + mhi_buf->len, mhi_cntrl->sequence_id); + MHI_LOG("Waiting for image transfer completion\n"); + + /* waiting for image download completion */ + wait_event_timeout(mhi_cntrl->state_event, + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) || + mhi_read_reg_field(mhi_cntrl, base, + BHIE_TXVECSTATUS_OFFS, + BHIE_TXVECSTATUS_STATUS_BMSK, + BHIE_TXVECSTATUS_STATUS_SHFT, + &tx_status) || tx_status, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) + return -EIO; + + return (tx_status == BHIE_TXVECSTATUS_STATUS_XFER_COMPL) ? 0 : -EIO; +} + +static int mhi_fw_load_sbl(struct mhi_controller *mhi_cntrl, + dma_addr_t dma_addr, + size_t size) +{ + u32 tx_status, val; + u32 ImgTxDb = 0x1; + int i, ret; + void __iomem *base = mhi_cntrl->bhi; + rwlock_t *pm_lock = &mhi_cntrl->pm_lock; + struct { + char *name; + u32 offset; + } error_reg[] = { + { "ERROR_CODE", BHI_ERRCODE }, + { "ERROR_DBG1", BHI_ERRDBG1 }, + { "ERROR_DBG2", BHI_ERRDBG2 }, + { "ERROR_DBG3", BHI_ERRDBG3 }, + { NULL }, + }; + + MHI_LOG("Starting BHI programming\n"); + + /* program start sbl download via bhi protocol */ + read_lock_bh(pm_lock); + if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { + read_unlock_bh(pm_lock); + goto invalid_pm_state; + } + + mhi_write_reg(mhi_cntrl, base, BHI_STATUS, 0); + mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_HIGH, + upper_32_bits(dma_addr)); + mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_LOW, + lower_32_bits(dma_addr)); + mhi_write_reg(mhi_cntrl, base, BHI_IMGSIZE, size); + mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICFG, MHICFG_NER_MASK, MHICFG_NER_SHIFT, NUM_MHI_EVT_RINGS); + mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICFG, MHICFG_NHWER_MASK, MHICFG_NHWER_SHIFT, NUM_MHI_HW_EVT_RINGS); + mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, mhi_cntrl->msi_irq_base); + mhi_write_reg(mhi_cntrl, base, BHI_IMGTXDB, ImgTxDb); + read_unlock_bh(pm_lock); + + MHI_LOG("Waiting for image transfer completion\n"); + + /* waiting for image download completion */ + ret = wait_event_timeout(mhi_cntrl->state_event, + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) || + mhi_read_reg_field(mhi_cntrl, base, BHI_STATUS, + BHI_STATUS_MASK, BHI_STATUS_SHIFT, + &tx_status) || tx_status, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) + goto invalid_pm_state; + + if (tx_status == BHI_STATUS_ERROR) { + MHI_ERR("Image transfer failed\n"); + read_lock_bh(pm_lock); + if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { + for (i = 0; error_reg[i].name; i++) { + ret = mhi_read_reg(mhi_cntrl, base, + error_reg[i].offset, &val); + if (ret) + break; + MHI_ERR("reg:%s value:0x%x\n", + error_reg[i].name, val); + } + } + read_unlock_bh(pm_lock); + goto invalid_pm_state; + } + + return (tx_status == BHI_STATUS_SUCCESS) ? 0 : -ETIMEDOUT; + +invalid_pm_state: + + return -EIO; +} + +void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl, + struct image_info *image_info) +{ + int i; + struct mhi_buf *mhi_buf = image_info->mhi_buf; + + for (i = 0; i < image_info->entries; i++, mhi_buf++) + mhi_free_coherent(mhi_cntrl, mhi_buf->len, mhi_buf->buf, + mhi_buf->dma_addr); + + kfree(image_info->mhi_buf); + kfree(image_info); +} + +int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl, + struct image_info **image_info, + size_t alloc_size) +{ + size_t seg_size = mhi_cntrl->seg_len; + /* requier additional entry for vec table */ + int segments = DIV_ROUND_UP(alloc_size, seg_size) + 1; + int i; + struct image_info *img_info; + struct mhi_buf *mhi_buf; + + MHI_LOG("Allocating bytes:%zu seg_size:%zu total_seg:%u\n", + alloc_size, seg_size, segments); + + img_info = kzalloc(sizeof(*img_info), GFP_KERNEL); + if (!img_info) + return -ENOMEM; + + /* allocate memory for entries */ + img_info->mhi_buf = kcalloc(segments, sizeof(*img_info->mhi_buf), + GFP_KERNEL); + if (!img_info->mhi_buf) + goto error_alloc_mhi_buf; + + /* allocate and populate vector table */ + mhi_buf = img_info->mhi_buf; + for (i = 0; i < segments; i++, mhi_buf++) { + size_t vec_size = seg_size; + + /* last entry is for vector table */ + if (i == segments - 1) + vec_size = sizeof(struct bhi_vec_entry) * i; + + mhi_buf->len = vec_size; + mhi_buf->buf = mhi_alloc_coherent(mhi_cntrl, vec_size, + &mhi_buf->dma_addr, GFP_KERNEL); + if (!mhi_buf->buf) + goto error_alloc_segment; + + MHI_LOG("Entry:%d Address:0x%llx size:%zu\n", i, + (unsigned long long)mhi_buf->dma_addr, + mhi_buf->len); + } + + img_info->bhi_vec = img_info->mhi_buf[segments - 1].buf; + img_info->entries = segments; + *image_info = img_info; + + MHI_LOG("Successfully allocated bhi vec table\n"); + + return 0; + +error_alloc_segment: + for (--i, --mhi_buf; i >= 0; i--, mhi_buf--) + mhi_free_coherent(mhi_cntrl, mhi_buf->len, mhi_buf->buf, + mhi_buf->dma_addr); + +error_alloc_mhi_buf: + kfree(img_info); + + return -ENOMEM; +} + +static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl, + const struct firmware *firmware, + struct image_info *img_info) +{ + size_t remainder = firmware->size; + size_t to_cpy; + const u8 *buf = firmware->data; + int i = 0; + struct mhi_buf *mhi_buf = img_info->mhi_buf; + struct bhi_vec_entry *bhi_vec = img_info->bhi_vec; + + while (remainder) { + MHI_ASSERT(i >= img_info->entries, "malformed vector table"); + + to_cpy = min(remainder, mhi_buf->len); + memcpy(mhi_buf->buf, buf, to_cpy); + bhi_vec->dma_addr = mhi_buf->dma_addr; + bhi_vec->size = to_cpy; + + MHI_VERB("Setting Vector:0x%llx size: %llu\n", + bhi_vec->dma_addr, bhi_vec->size); + buf += to_cpy; + remainder -= to_cpy; + i++; + bhi_vec++; + mhi_buf++; + } +} + +void mhi_fw_load_worker(struct work_struct *work) +{ + int ret; + struct mhi_controller *mhi_cntrl; + const char *fw_name; + const struct firmware *firmware; + struct image_info *image_info; + void *buf; + dma_addr_t dma_addr; + size_t size; + + mhi_cntrl = container_of(work, struct mhi_controller, fw_worker); + + MHI_LOG("Waiting for device to enter PBL from EE:%s\n", + TO_MHI_EXEC_STR(mhi_cntrl->ee)); + + ret = wait_event_timeout(mhi_cntrl->state_event, + MHI_IN_PBL(mhi_cntrl->ee) || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + MHI_ERR("MHI is not in valid state\n"); + return; + } + + MHI_LOG("Device current EE:%s\n", TO_MHI_EXEC_STR(mhi_cntrl->ee)); + + /* if device in pthru, we do not have to load firmware */ + if (mhi_cntrl->ee == MHI_EE_PTHRU) + return; + + fw_name = (mhi_cntrl->ee == MHI_EE_EDL) ? + mhi_cntrl->edl_image : mhi_cntrl->fw_image; + + if (!fw_name || (mhi_cntrl->fbc_download && (!mhi_cntrl->sbl_size || + !mhi_cntrl->seg_len))) { + MHI_ERR("No firmware image defined or !sbl_size || !seg_len\n"); + return; + } + + ret = request_firmware(&firmware, fw_name, mhi_cntrl->dev); + if (ret) { + MHI_ERR("Error loading firmware, ret:%d\n", ret); + return; + } + + size = (mhi_cntrl->fbc_download) ? mhi_cntrl->sbl_size : firmware->size; + + /* the sbl size provided is maximum size, not necessarily image size */ + if (size > firmware->size) + size = firmware->size; + + buf = mhi_alloc_coherent(mhi_cntrl, size, &dma_addr, GFP_KERNEL); + if (!buf) { + MHI_ERR("Could not allocate memory for image\n"); + release_firmware(firmware); + return; + } + + /* load sbl image */ + memcpy(buf, firmware->data, size); + ret = mhi_fw_load_sbl(mhi_cntrl, dma_addr, size); + mhi_free_coherent(mhi_cntrl, size, buf, dma_addr); + + if (!mhi_cntrl->fbc_download || ret || mhi_cntrl->ee == MHI_EE_EDL) + release_firmware(firmware); + + /* error or in edl, we're done */ + if (ret || mhi_cntrl->ee == MHI_EE_EDL) + return; + + write_lock_irq(&mhi_cntrl->pm_lock); + mhi_cntrl->dev_state = MHI_STATE_RESET; + write_unlock_irq(&mhi_cntrl->pm_lock); + + /* + * if we're doing fbc, populate vector tables while + * device transitioning into MHI READY state + */ + if (mhi_cntrl->fbc_download) { + ret = mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->fbc_image, + firmware->size); + if (ret) { + MHI_ERR("Error alloc size of %zu\n", firmware->size); + goto error_alloc_fw_table; + } + + MHI_LOG("Copying firmware image into vector table\n"); + + /* load the firmware into BHIE vec table */ + mhi_firmware_copy(mhi_cntrl, firmware, mhi_cntrl->fbc_image); + } + + /* transitioning into MHI RESET->READY state */ + ret = mhi_ready_state_transition(mhi_cntrl); + + MHI_LOG("To Reset->Ready PM_STATE:%s MHI_STATE:%s EE:%s, ret:%d\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + TO_MHI_EXEC_STR(mhi_cntrl->ee), ret); + + if (!mhi_cntrl->fbc_download) + return; + + if (ret) { + MHI_ERR("Did not transition to READY state\n"); + goto error_read; + } + + /* wait for SBL event */ + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->ee == MHI_EE_SBL || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + MHI_ERR("MHI did not enter BHIE\n"); + goto error_read; + } + + /* start full firmware image download */ + image_info = mhi_cntrl->fbc_image; + ret = mhi_fw_load_amss(mhi_cntrl, + /* last entry is vec table */ + &image_info->mhi_buf[image_info->entries - 1]); + + MHI_LOG("amss fw_load, ret:%d\n", ret); + + release_firmware(firmware); + + return; + +error_read: + mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image); + mhi_cntrl->fbc_image = NULL; + +error_alloc_fw_table: + release_firmware(firmware); +} + +int BhiWrite(struct mhi_controller *mhi_cntrl, void __user *ubuf, size_t size) +{ + int ret; + dma_addr_t dma_addr; + void *dma_buf; + + MHI_LOG("Device current EE:%s, M:%s, PM:%s\n", + TO_MHI_EXEC_STR(mhi_get_exec_env(mhi_cntrl)), + TO_MHI_STATE_STR(mhi_get_mhi_state(mhi_cntrl)), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + +#if 0 + if (mhi_get_exec_env(mhi_cntrl) == MHI_EE_EDL && mhi_cntrl->ee != MHI_EE_EDL) { + mhi_cntrl->ee = MHI_EE_EDL; + wait_event_timeout(mhi_cntrl->state_event, + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms + 500)); + } +#endif + +#if 0 + if (!MHI_IN_PBL(mhi_cntrl->ee) || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + MHI_ERR("MHI is not in valid BHI state\n"); + return -EINVAL; + } +#endif + + if (mhi_cntrl->ee != MHI_EE_EDL) { + MHI_ERR("MHI is not in EDL state\n"); + return -EINVAL; + } + + dma_buf = mhi_alloc_coherent(mhi_cntrl, size, &dma_addr, GFP_KERNEL); + if (!dma_buf) { + MHI_ERR("Could not allocate memory for image\n"); + return -ENOMEM; + } + + ret = copy_from_user(dma_buf, ubuf, size); + if (ret) { + MHI_ERR("IOCTL_BHI_WRITEIMAGE copy buf error, ret = %d\n", ret); + mhi_free_coherent(mhi_cntrl, size, dma_buf, dma_addr);; + return ret; + } + + ret = mhi_fw_load_sbl(mhi_cntrl, dma_addr, size); + mhi_free_coherent(mhi_cntrl, size, dma_buf, dma_addr); + + if (ret) { + MHI_ERR("ret = %d, ee=%d\n", ret, mhi_cntrl->ee); + goto error_state; + } + + write_lock_irq(&mhi_cntrl->pm_lock); + mhi_cntrl->dev_state = MHI_STATE_RESET; + write_unlock_irq(&mhi_cntrl->pm_lock); + + /* transitioning into MHI RESET->READY state */ + ret = mhi_ready_state_transition(mhi_cntrl); + if (ret) { + MHI_ERR("Did not transition to READY state\n"); + goto error_state; + } + + MHI_LOG("To Reset->Ready PM_STATE:%s MHI_STATE:%s EE:%s, ret:%d\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + TO_MHI_EXEC_STR(mhi_cntrl->ee), ret); + + /* wait for BHIE event */ + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->ee == MHI_EE_FP || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + MHI_ERR("MHI did not enter Flash Programmer Environment\n"); + goto error_state; + } + + MHI_LOG("MHI enter Flash Programmer Environment\n"); + return 0; + +error_state: + MHI_LOG("Device current EE:%s, M:%s\n", + TO_MHI_EXEC_STR(mhi_get_exec_env(mhi_cntrl)), + TO_MHI_STATE_STR(mhi_get_mhi_state(mhi_cntrl))); + + return ret; +} + +long bhi_get_dev_info(struct mhi_controller *mhi_cntrl, void __user *ubuf) +{ + long ret = -EINVAL; + BHI_INFO_TYPE bhi_info; + + ret = BhiRead(mhi_cntrl, &bhi_info); + if (ret) { + MHI_ERR("IOCTL_BHI_GETDEVINFO BhiRead error, ret = %ld\n", ret); + return ret; + } + + ret = copy_to_user(ubuf, &bhi_info, sizeof(bhi_info)); + if (ret) { + MHI_ERR("IOCTL_BHI_GETDEVINFO copy error, ret = %ld\n", ret); + } + + return ret; +} + +long bhi_write_image(struct mhi_controller *mhi_cntrl, void __user *ubuf) +{ + long ret = -EINVAL; + size_t size; + + ret = copy_from_user(&size, ubuf, sizeof(size)); + if (ret) { + MHI_ERR("IOCTL_BHI_WRITEIMAGE copy size error, ret = %ld\n", ret); + return ret; + } + + ret = BhiWrite(mhi_cntrl, ubuf+sizeof(size), size); + if (ret) { + MHI_ERR("IOCTL_BHI_WRITEIMAGE BhiWrite error, ret = %ld\n", ret); + } + + return ret; +} diff --git a/package/wwan/driver/quectel_MHI/src/core/mhi_dtr.c b/package/wwan/driver/quectel_MHI/src/core/mhi_dtr.c new file mode 100644 index 000000000..7ce44b363 --- /dev/null +++ b/package/wwan/driver/quectel_MHI/src/core/mhi_dtr.c @@ -0,0 +1,274 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "mhi.h" +#include "mhi_internal.h" + +struct __packed dtr_ctrl_msg { + u32 preamble; + u32 msg_id; + u32 dest_id; + u32 size; + u32 msg; +}; + +#define CTRL_MAGIC (0x4C525443) +#define CTRL_MSG_DTR BIT(0) +#define CTRL_MSG_RTS BIT(1) +#define CTRL_MSG_DCD BIT(0) +#define CTRL_MSG_DSR BIT(1) +#define CTRL_MSG_RI BIT(3) +#define CTRL_HOST_STATE (0x10) +#define CTRL_DEVICE_STATE (0x11) +#define CTRL_GET_CHID(dtr) (dtr->dest_id & 0xFF) + +static int mhi_dtr_tiocmset(struct mhi_controller *mhi_cntrl, + struct mhi_device *mhi_dev, + u32 tiocm) +{ + struct dtr_ctrl_msg *dtr_msg = NULL; + struct mhi_chan *dtr_chan = mhi_cntrl->dtr_dev->ul_chan; + spinlock_t *res_lock = &mhi_dev->dev.devres_lock; + u32 cur_tiocm; + int ret = 0; + + cur_tiocm = mhi_dev->tiocm & ~(TIOCM_CD | TIOCM_DSR | TIOCM_RI); + + tiocm &= (TIOCM_DTR | TIOCM_RTS); + + /* state did not changed */ + if (cur_tiocm == tiocm) + return 0; + + mutex_lock(&dtr_chan->mutex); + + dtr_msg = kzalloc(sizeof(*dtr_msg), GFP_KERNEL); + if (!dtr_msg) { + ret = -ENOMEM; + goto tiocm_exit; + } + + dtr_msg->preamble = CTRL_MAGIC; + dtr_msg->msg_id = CTRL_HOST_STATE; + dtr_msg->dest_id = mhi_dev->ul_chan_id; + dtr_msg->size = sizeof(u32); + if (tiocm & TIOCM_DTR) + dtr_msg->msg |= CTRL_MSG_DTR; + if (tiocm & TIOCM_RTS) + dtr_msg->msg |= CTRL_MSG_RTS; + +/* +* 'minicom -D /dev/mhi_DUN' will send RTS:1 when open, and RTS:0 when exit. +* RTS:0 will prevent modem output AT response. +* But 'busybox microcom' do not send any RTS to modem. +* [75094.969783] mhi_uci_q 0306_00.03.00_DUN: mhi_dtr_tiocmset DTR:0 RTS:1 +* [75100.210994] mhi_uci_q 0306_00.03.00_DUN: mhi_dtr_tiocmset DTR:0 RTS:0 +*/ + dev_dbg(&mhi_dev->dev, "%s DTR:%d RTS:%d\n", __func__, + !!(tiocm & TIOCM_DTR), !!(tiocm & TIOCM_RTS)); + + reinit_completion(&dtr_chan->completion); + ret = mhi_queue_transfer(mhi_cntrl->dtr_dev, DMA_TO_DEVICE, dtr_msg, + sizeof(*dtr_msg), MHI_EOT); + if (ret) + goto tiocm_exit; + + ret = wait_for_completion_timeout(&dtr_chan->completion, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + if (!ret) { + MHI_ERR("Failed to receive transfer callback\n"); + ret = -EIO; + goto tiocm_exit; + } + + ret = 0; + spin_lock_irq(res_lock); + mhi_dev->tiocm &= ~(TIOCM_DTR | TIOCM_RTS); + mhi_dev->tiocm |= tiocm; + spin_unlock_irq(res_lock); + +tiocm_exit: + kfree(dtr_msg); + mutex_unlock(&dtr_chan->mutex); + + return ret; +} + +long mhi_ioctl(struct mhi_device *mhi_dev, unsigned int cmd, unsigned long arg) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + int ret; + + /* ioctl not supported by this controller */ + if (!mhi_cntrl->dtr_dev) + return -EIO; + + switch (cmd) { + case TIOCMGET: + return mhi_dev->tiocm; + case TIOCMSET: + { + u32 tiocm; + + ret = get_user(tiocm, (u32 *)arg); + if (ret) + return ret; + + return mhi_dtr_tiocmset(mhi_cntrl, mhi_dev, tiocm); + } + default: + break; + } + + return -EINVAL; +} +EXPORT_SYMBOL(mhi_ioctl); + +static int mhi_dtr_queue_inbound(struct mhi_controller *mhi_cntrl) +{ + struct mhi_device *mhi_dev = mhi_cntrl->dtr_dev; + int nr_trbs = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE); + size_t mtu = mhi_dev->mtu; + void *buf; + int ret = -EIO, i; + + for (i = 0; i < nr_trbs; i++) { + buf = kmalloc(mtu, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, buf, mtu, + MHI_EOT); + if (ret) { + kfree(buf); + return ret; + } + } + + return ret; +} + +static void mhi_dtr_dl_xfer_cb(struct mhi_device *mhi_dev, + struct mhi_result *mhi_result) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct dtr_ctrl_msg *dtr_msg = mhi_result->buf_addr; + u32 chan; + spinlock_t *res_lock; + + if (mhi_result->transaction_status == -ENOTCONN) { + kfree(mhi_result->buf_addr); + return; + } + + if (mhi_result->bytes_xferd != sizeof(*dtr_msg)) { + MHI_ERR("Unexpected length %zu received\n", + mhi_result->bytes_xferd); + return; + } + + MHI_LOG("preamble:0x%x msg_id:%u dest_id:%u msg:0x%x\n", + dtr_msg->preamble, dtr_msg->msg_id, dtr_msg->dest_id, + dtr_msg->msg); + + chan = CTRL_GET_CHID(dtr_msg); + if (chan >= mhi_cntrl->max_chan) + goto auto_queue; + + mhi_dev = mhi_cntrl->mhi_chan[chan].mhi_dev; + if (!mhi_dev) + goto auto_queue; + + res_lock = &mhi_dev->dev.devres_lock; + spin_lock_irq(res_lock); + mhi_dev->tiocm &= ~(TIOCM_CD | TIOCM_DSR | TIOCM_RI); + + if (dtr_msg->msg & CTRL_MSG_DCD) + mhi_dev->tiocm |= TIOCM_CD; + + if (dtr_msg->msg & CTRL_MSG_DSR) + mhi_dev->tiocm |= TIOCM_DSR; + + if (dtr_msg->msg & CTRL_MSG_RI) + mhi_dev->tiocm |= TIOCM_RI; + spin_unlock_irq(res_lock); + +auto_queue: + mhi_queue_transfer(mhi_cntrl->dtr_dev, DMA_FROM_DEVICE, mhi_result->buf_addr, + mhi_cntrl->dtr_dev->mtu, MHI_EOT); +} + +static void mhi_dtr_ul_xfer_cb(struct mhi_device *mhi_dev, + struct mhi_result *mhi_result) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_chan *dtr_chan = mhi_cntrl->dtr_dev->ul_chan; + + MHI_VERB("Received with status:%d\n", mhi_result->transaction_status); + if (!mhi_result->transaction_status) + complete(&dtr_chan->completion); +} + +static void mhi_dtr_remove(struct mhi_device *mhi_dev) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + + mhi_cntrl->dtr_dev = NULL; +} + +static int mhi_dtr_probe(struct mhi_device *mhi_dev, + const struct mhi_device_id *id) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + int ret; + + MHI_LOG("Enter for DTR control channel\n"); + + mhi_dev->mtu = min_t(size_t, id->driver_data, mhi_dev->mtu); + ret = mhi_prepare_for_transfer(mhi_dev); + if (!ret) + mhi_cntrl->dtr_dev = mhi_dev; + + if (!ret) + ret = mhi_dtr_queue_inbound(mhi_cntrl); + + MHI_LOG("Exit with ret:%d\n", ret); + + return ret; +} + +static const struct mhi_device_id mhi_dtr_table[] = { + { .chan = "IP_CTRL", .driver_data = sizeof(struct dtr_ctrl_msg) }, + {}, +}; + +static struct mhi_driver mhi_dtr_driver = { + .id_table = mhi_dtr_table, + .remove = mhi_dtr_remove, + .probe = mhi_dtr_probe, + .ul_xfer_cb = mhi_dtr_ul_xfer_cb, + .dl_xfer_cb = mhi_dtr_dl_xfer_cb, + .driver = { + .name = "MHI_DTR", + .owner = THIS_MODULE, + } +}; + +int __init mhi_dtr_init(void) +{ + return mhi_driver_register(&mhi_dtr_driver); +} +void mhi_dtr_exit(void) { + mhi_driver_unregister(&mhi_dtr_driver); +} diff --git a/package/wwan/driver/quectel_MHI/src/core/mhi_init.c b/package/wwan/driver/quectel_MHI/src/core/mhi_init.c new file mode 100644 index 000000000..4d21d39ed --- /dev/null +++ b/package/wwan/driver/quectel_MHI/src/core/mhi_init.c @@ -0,0 +1,2645 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 4,11,0 )) +#include +#else +#include +#endif +#include "mhi.h" +#include "mhi_internal.h" + +struct mhi_controller_map { + u32 dev_id; + u32 domain; + u32 bus; + u32 slot; +}; + +#define MAX_MHI_CONTROLLER 16 +struct mhi_controller_map mhi_controller_minors[MAX_MHI_CONTROLLER]; + +#define MHI_CNTRL_DRIVER_NAME "mhi_cntrl_q" +struct mhi_cntrl_drv { + struct list_head head; + struct mutex lock; + struct class *class; + int major; +}; +static struct mhi_cntrl_drv mhi_cntrl_drv; + +const char * const mhi_ee_str[MHI_EE_MAX] = { + [MHI_EE_PBL] = "PBL", + [MHI_EE_SBL] = "SBL", + [MHI_EE_AMSS] = "AMSS", + [MHI_EE_RDDM] = "RDDM", + [MHI_EE_WFW] = "WFW", + [MHI_EE_PTHRU] = "PASS THRU", + [MHI_EE_EDL] = "EDL", + [MHI_EE_FP] = "FlashProg", + [MHI_EE_DISABLE_TRANSITION] = "DISABLE", +}; + +const char * const mhi_state_tran_str[MHI_ST_TRANSITION_MAX] = { + [MHI_ST_TRANSITION_PBL] = "PBL", + [MHI_ST_TRANSITION_READY] = "READY", + [MHI_ST_TRANSITION_SBL] = "SBL", + [MHI_ST_TRANSITION_MISSION_MODE] = "MISSION MODE", + [MHI_ST_TRANSITION_FP] = "FlashProg", +}; + +const char * const mhi_state_str[MHI_STATE_MAX] = { + [MHI_STATE_RESET] = "RESET", + [MHI_STATE_READY] = "READY", + [MHI_STATE_M0] = "M0", + [MHI_STATE_M1] = "M1", + [MHI_STATE_M2] = "M2", + [MHI_STATE_M3] = "M3", + [MHI_STATE_BHI] = "BHI", + [MHI_STATE_SYS_ERR] = "SYS_ERR", +}; + +static const char * const mhi_pm_state_str[] = { + [MHI_PM_BIT_DISABLE] = "DISABLE", + [MHI_PM_BIT_POR] = "POR", + [MHI_PM_BIT_M0] = "M0", + [MHI_PM_BIT_M2] = "M2", + [MHI_PM_BIT_M3_ENTER] = "M?->M3", + [MHI_PM_BIT_M3] = "M3", + [MHI_PM_BIT_M3_EXIT] = "M3->M0", + [MHI_PM_BIT_FW_DL_ERR] = "FW DL Error", + [MHI_PM_BIT_SYS_ERR_DETECT] = "SYS_ERR Detect", + [MHI_PM_BIT_SYS_ERR_PROCESS] = "SYS_ERR Process", + [MHI_PM_BIT_SHUTDOWN_PROCESS] = "SHUTDOWN Process", + [MHI_PM_BIT_LD_ERR_FATAL_DETECT] = "LD or Error Fatal Detect", +}; + +struct mhi_bus mhi_bus; + +const char *to_mhi_pm_state_str(enum MHI_PM_STATE state) +{ + int index = find_last_bit((unsigned long *)&state, 32); + + if (index >= ARRAY_SIZE(mhi_pm_state_str)) + return "Invalid State"; + + return mhi_pm_state_str[index]; +} + +#if 0 +/* MHI protocol require transfer ring to be aligned to ring length */ +static int mhi_alloc_aligned_ring(struct mhi_controller *mhi_cntrl, + struct mhi_ring *ring, + u64 len) +{ + ring->alloc_size = len + (len - 1); + ring->pre_aligned = mhi_alloc_coherent(mhi_cntrl, ring->alloc_size, + &ring->dma_handle, GFP_KERNEL); + if (!ring->pre_aligned) + return -ENOMEM; + + ring->iommu_base = (ring->dma_handle + (len - 1)) & ~(len - 1); + ring->base = ring->pre_aligned + (ring->iommu_base - ring->dma_handle); + return 0; +} +#endif + +static void mhi_ring_aligned_check(struct mhi_controller *mhi_cntrl, u64 rbase, u64 rlen) { + uint64_t ra; + + ra = rbase; + do_div(ra, roundup_pow_of_two(rlen)); + + if (rbase != ra * roundup_pow_of_two(rlen)) { + MHI_ERR("bad params ring base not aligned 0x%llx align 0x%lx\n", rbase, roundup_pow_of_two(rlen)); + } +} + +void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl) +{ + int i; + struct mhi_event *mhi_event = mhi_cntrl->mhi_event; + + + if (mhi_cntrl->msi_allocated == 1) { + free_irq(mhi_cntrl->irq[mhi_cntrl->msi_irq_base], mhi_cntrl); + return; + } + + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + if (mhi_event->offload_ev) + continue; + + free_irq(mhi_cntrl->irq[mhi_event->msi], mhi_event); + } + + free_irq(mhi_cntrl->irq[mhi_cntrl->msi_irq_base], mhi_cntrl); +} + +int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl) +{ + int i; + int ret; + struct mhi_event *mhi_event = mhi_cntrl->mhi_event; + + if (mhi_cntrl->msi_allocated == 1) { + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + mhi_event->msi = 0; + } + + ret = request_threaded_irq(mhi_cntrl->irq[0], NULL, + mhi_one_msi_handlr, IRQF_ONESHOT, "mhi", mhi_cntrl); + if (ret) { + MHI_ERR("Error requesting irq:%d, ret=%d\n", mhi_cntrl->irq[0], ret); + } + return ret; + } + + /* for BHI INTVEC msi */ + ret = request_threaded_irq(mhi_cntrl->irq[mhi_cntrl->msi_irq_base], mhi_intvec_handlr, + mhi_intvec_threaded_handlr, IRQF_ONESHOT, + "mhi", mhi_cntrl); + if (ret) + return ret; + + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + if (mhi_event->offload_ev) + continue; + + ret = request_irq(mhi_cntrl->irq[mhi_event->msi], + mhi_msi_handlr, IRQF_SHARED, "mhi", + mhi_event); + if (ret) { + MHI_ERR("Error requesting irq:%d for ev:%d\n", + mhi_cntrl->irq[mhi_event->msi], i); + goto error_request; + } + } + + return 0; + +error_request: + for (--i, --mhi_event; i >= 0; i--, mhi_event--) { + if (mhi_event->offload_ev) + continue; + + free_irq(mhi_cntrl->irq[mhi_event->msi], mhi_event); + } + free_irq(mhi_cntrl->irq[0], mhi_cntrl); + + return ret; +} + +void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl) +{ + int i; + struct mhi_ctxt *mhi_ctxt = mhi_cntrl->mhi_ctxt; + struct mhi_cmd *mhi_cmd; + struct mhi_event *mhi_event; + struct mhi_ring *ring; + + mhi_cmd = mhi_cntrl->mhi_cmd; + for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++) { + ring = &mhi_cmd->ring; +#if 0 + mhi_free_coherent(mhi_cntrl, ring->alloc_size, + ring->pre_aligned, ring->dma_handle); +#endif + ring->base = NULL; + ring->iommu_base = 0; + } + +#if 0 + mhi_free_coherent(mhi_cntrl, + sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS, + mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr); +#endif + + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + if (mhi_event->offload_ev) + continue; + + ring = &mhi_event->ring; +#if 0 + mhi_free_coherent(mhi_cntrl, ring->alloc_size, + ring->pre_aligned, ring->dma_handle); +#endif + ring->base = NULL; + ring->iommu_base = 0; + } + +#if 0 + mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->er_ctxt) * + mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt, + mhi_ctxt->er_ctxt_addr); + + mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->chan_ctxt) * + mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt, + mhi_ctxt->chan_ctxt_addr); +#endif + + mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->ctrl_seg), mhi_ctxt->ctrl_seg, mhi_ctxt->ctrl_seg_addr); + kfree(mhi_ctxt); + mhi_cntrl->mhi_ctxt = NULL; +} + +static int mhi_init_debugfs_mhi_states_open(struct inode *inode, + struct file *fp) +{ + return single_open(fp, mhi_debugfs_mhi_states_show, inode->i_private); +} + +static int mhi_init_debugfs_mhi_event_open(struct inode *inode, struct file *fp) +{ + return single_open(fp, mhi_debugfs_mhi_event_show, inode->i_private); +} + +static int mhi_init_debugfs_mhi_chan_open(struct inode *inode, struct file *fp) +{ + return single_open(fp, mhi_debugfs_mhi_chan_show, inode->i_private); +} + +static const struct file_operations debugfs_state_ops = { + .open = mhi_init_debugfs_mhi_states_open, + .release = single_release, + .read = seq_read, +}; + +static const struct file_operations debugfs_ev_ops = { + .open = mhi_init_debugfs_mhi_event_open, + .release = single_release, + .read = seq_read, +}; + +static const struct file_operations debugfs_chan_ops = { + .open = mhi_init_debugfs_mhi_chan_open, + .release = single_release, + .read = seq_read, +}; + +DEFINE_SIMPLE_ATTRIBUTE(debugfs_trigger_reset_fops, NULL, + mhi_debugfs_trigger_reset, "%llu\n"); + +#ifdef ENABLE_MHI_MON +struct mon_event_text { + struct list_head e_link; + int type; /* submit, complete, etc. */ + unsigned int tstamp; + u32 chan; + dma_addr_t wp; + struct mhi_tre mhi_tre; + u8 data[32]; + size_t len; +}; + +#define EVENT_MAX (16*PAGE_SIZE / sizeof(struct mon_event_text)) +#define PRINTF_DFL 250 +#define SLAB_NAME_SZ 30 + +struct mon_reader_text { + struct kmem_cache *e_slab; + int nevents; + struct list_head e_list; + struct mon_reader r; /* In C, parent class can be placed anywhere */ + + wait_queue_head_t wait; + int printf_size; + char *printf_buf; + int left_size; + int left_pos; + struct mutex printf_lock; + + char slab_name[SLAB_NAME_SZ]; +}; + +struct mon_text_ptr { + int cnt, limit; + char *pbuf; +}; + +static DEFINE_MUTEX(mon_lock); + +static inline unsigned int mon_get_timestamp(void) +{ + struct timespec64 now; + unsigned int stamp; + + ktime_get_ts64(&now); + stamp = now.tv_sec & 0xFFF; /* 2^32 = 4294967296. Limit to 4096s. */ + stamp = stamp * USEC_PER_SEC + now.tv_nsec / NSEC_PER_USEC; + return stamp; +} + +static void mon_text_event(struct mon_reader_text *rp, + u32 chan, dma_addr_t wp, struct mhi_tre *mhi_tre, void *buf, size_t len, + char ev_type) +{ + struct mon_event_text *ep; + + if (rp->nevents >= EVENT_MAX || + (ep = kmem_cache_alloc(rp->e_slab, GFP_ATOMIC)) == NULL) { + rp->r.m_bus->cnt_text_lost++; + return; + } + + ep->type = ev_type; + ep->tstamp = mon_get_timestamp(); + ep->chan = chan; + ep->wp = wp; + ep->mhi_tre = *mhi_tre; + if (len > sizeof(ep->data)) + len = sizeof(ep->data); + memcpy(ep->data, buf, len); + ep->len = len; + rp->nevents++; + list_add_tail(&ep->e_link, &rp->e_list); + wake_up(&rp->wait); +} + +static void mon_text_submit(void *data, u32 chan, dma_addr_t wp, struct mhi_tre *mhi_tre, void *buf, size_t len) +{ + struct mon_reader_text *rp = data; + mon_text_event(rp, chan, wp, mhi_tre, buf, len, 'W'); +} + +static void mon_text_receive(void *data, u32 chan, dma_addr_t wp, struct mhi_tre *mhi_tre, void *buf, size_t len) +{ + struct mon_reader_text *rp = data; + mon_text_event(rp, chan, wp, mhi_tre, buf, len, 'R'); +} + +static void mon_text_complete(void *data, u32 chan, dma_addr_t wp, struct mhi_tre *mhi_tre) +{ + struct mon_reader_text *rp = data; + mon_text_event(rp, chan, wp, mhi_tre, NULL, 0, 'E'); +} + +void mon_reader_add(struct mhi_controller *mbus, struct mon_reader *r) +{ + unsigned long flags; + + spin_lock_irqsave(&mbus->lock, flags); + mbus->nreaders++; + list_add_tail(&r->r_link, &mbus->r_list); + spin_unlock_irqrestore(&mbus->lock, flags); + + kref_get(&mbus->ref); +} + +static void mon_bus_drop(struct kref *r) +{ + struct mhi_controller *mbus = container_of(r, struct mhi_controller, ref); + kfree(mbus); +} + +static void mon_reader_del(struct mhi_controller *mbus, struct mon_reader *r) +{ + unsigned long flags; + + spin_lock_irqsave(&mbus->lock, flags); + list_del(&r->r_link); + --mbus->nreaders; + spin_unlock_irqrestore(&mbus->lock, flags); + + kref_put(&mbus->ref, mon_bus_drop); +} + +static void mon_text_ctor(void *mem) +{ + /* + * Nothing to initialize. No, really! + * So, we fill it with garbage to emulate a reused object. + */ + memset(mem, 0xe5, sizeof(struct mon_event_text)); +} + +static int mon_text_open(struct inode *inode, struct file *file) +{ + struct mhi_controller *mbus; + struct mon_reader_text *rp; + int rc; + + mutex_lock(&mon_lock); + mbus = inode->i_private; + + rp = kzalloc(sizeof(struct mon_reader_text), GFP_KERNEL); + if (rp == NULL) { + rc = -ENOMEM; + goto err_alloc; + } + INIT_LIST_HEAD(&rp->e_list); + init_waitqueue_head(&rp->wait); + mutex_init(&rp->printf_lock); + + rp->printf_size = PRINTF_DFL; + rp->printf_buf = kmalloc(rp->printf_size, GFP_KERNEL); + if (rp->printf_buf == NULL) { + rc = -ENOMEM; + goto err_alloc_pr; + } + + rp->r.m_bus = mbus; + rp->r.r_data = rp; + rp->r.rnf_submit = mon_text_submit; + rp->r.rnf_receive = mon_text_receive; + rp->r.rnf_complete = mon_text_complete; + + snprintf(rp->slab_name, SLAB_NAME_SZ, "mon_text_%p", rp); + rp->e_slab = kmem_cache_create(rp->slab_name, + sizeof(struct mon_event_text), sizeof(long), 0, + mon_text_ctor); + if (rp->e_slab == NULL) { + rc = -ENOMEM; + goto err_slab; + } + + mon_reader_add(mbus, &rp->r); + + file->private_data = rp; + mutex_unlock(&mon_lock); + return 0; + +// err_busy: +// kmem_cache_destroy(rp->e_slab); +err_slab: + kfree(rp->printf_buf); +err_alloc_pr: + kfree(rp); +err_alloc: + mutex_unlock(&mon_lock); + return rc; +} + +static struct mon_event_text *mon_text_fetch(struct mon_reader_text *rp, + struct mhi_controller *mbus) +{ + struct list_head *p; + unsigned long flags; + + spin_lock_irqsave(&mbus->lock, flags); + if (list_empty(&rp->e_list)) { + spin_unlock_irqrestore(&mbus->lock, flags); + return NULL; + } + p = rp->e_list.next; + list_del(p); + --rp->nevents; + spin_unlock_irqrestore(&mbus->lock, flags); + return list_entry(p, struct mon_event_text, e_link); +} + +static struct mon_event_text *mon_text_read_wait(struct mon_reader_text *rp, + struct file *file) +{ + struct mhi_controller *mbus = rp->r.m_bus; + DECLARE_WAITQUEUE(waita, current); + struct mon_event_text *ep; + + add_wait_queue(&rp->wait, &waita); + set_current_state(TASK_INTERRUPTIBLE); + while ((ep = mon_text_fetch(rp, mbus)) == NULL) { + if (file->f_flags & O_NONBLOCK) { + set_current_state(TASK_RUNNING); + remove_wait_queue(&rp->wait, &waita); + return ERR_PTR(-EWOULDBLOCK); + } + /* + * We do not count nwaiters, because ->release is supposed + * to be called when all openers are gone only. + */ + schedule(); + if (signal_pending(current)) { + remove_wait_queue(&rp->wait, &waita); + return ERR_PTR(-EINTR); + } + set_current_state(TASK_INTERRUPTIBLE); + } + set_current_state(TASK_RUNNING); + remove_wait_queue(&rp->wait, &waita); + return ep; +} + +static ssize_t mon_text_read_u(struct file *file, char __user *buf, + size_t nbytes, loff_t *ppos) +{ + struct mon_reader_text *rp = file->private_data; + struct mon_event_text *ep; + struct mon_text_ptr ptr; + + if (rp->left_size) { + int cnt = rp->left_size; + + if (cnt > nbytes) + cnt = nbytes; + if (copy_to_user(buf, rp->printf_buf + rp->left_pos, cnt)) + return -EFAULT; + rp->left_pos += cnt; + rp->left_size -= cnt; + return cnt; + } + + if (IS_ERR(ep = mon_text_read_wait(rp, file))) + return PTR_ERR(ep); + mutex_lock(&rp->printf_lock); + ptr.cnt = 0; + ptr.pbuf = rp->printf_buf; + ptr.limit = rp->printf_size; + + ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt, + "%u %c %03d WP:%llx TRE: %llx %08x %08x", + ep->tstamp, ep->type, ep->chan, ep->wp, + ep->mhi_tre.ptr, ep->mhi_tre.dword[0], ep->mhi_tre.dword[1]); + + if (ep->len) { + struct mon_text_ptr *p = &ptr; + size_t i = 0; + + for (i = 0; i < ep->len; i++) { + if (i % 4 == 0) { + p->cnt += snprintf(p->pbuf + p->cnt, + p->limit - p->cnt, + " "); + } + p->cnt += snprintf(p->pbuf + p->cnt, + p->limit - p->cnt, + "%02x", ep->data[i]); + } + + } + + ptr.cnt += snprintf(ptr.pbuf +ptr.cnt, ptr.limit - ptr.cnt, "\n"); + + if (ptr.cnt > nbytes) { + rp->left_pos = nbytes; + rp->left_size = ptr.cnt - nbytes; + ptr.cnt = nbytes; + } + + if (copy_to_user(buf, rp->printf_buf, ptr.cnt)) + ptr.cnt = -EFAULT; + mutex_unlock(&rp->printf_lock); + kmem_cache_free(rp->e_slab, ep); + return ptr.cnt; +} + +static int mon_text_release(struct inode *inode, struct file *file) +{ + struct mon_reader_text *rp = file->private_data; + struct mhi_controller *mbus; + /* unsigned long flags; */ + struct list_head *p; + struct mon_event_text *ep; + + mutex_lock(&mon_lock); + mbus = inode->i_private; + + if (mbus->nreaders <= 0) { + mutex_unlock(&mon_lock); + return 0; + } + mon_reader_del(mbus, &rp->r); + + /* + * In theory, e_list is protected by mbus->lock. However, + * after mon_reader_del has finished, the following is the case: + * - we are not on reader list anymore, so new events won't be added; + * - whole mbus may be dropped if it was orphaned. + * So, we better not touch mbus. + */ + /* spin_lock_irqsave(&mbus->lock, flags); */ + while (!list_empty(&rp->e_list)) { + p = rp->e_list.next; + ep = list_entry(p, struct mon_event_text, e_link); + list_del(p); + --rp->nevents; + kmem_cache_free(rp->e_slab, ep); + } + /* spin_unlock_irqrestore(&mbus->lock, flags); */ + + kmem_cache_destroy(rp->e_slab); + kfree(rp->printf_buf); + kfree(rp); + + mutex_unlock(&mon_lock); + return 0; +} + + +static const struct file_operations mon_fops_text_u = { + .owner = THIS_MODULE, + .open = mon_text_open, + .llseek = no_llseek, + .read = mon_text_read_u, + .release = mon_text_release, +}; +#endif + +void mhi_init_debugfs(struct mhi_controller *mhi_cntrl) +{ + struct dentry *dentry; + char node[32]; + +#ifdef ENABLE_MHI_MON + struct mhi_controller *mbus = mhi_cntrl; + + mbus->nreaders = 0; + kref_init(&mbus->ref); + spin_lock_init(&mbus->lock); + INIT_LIST_HEAD(&mbus->r_list); +#endif + + if (!mhi_cntrl->parent) + snprintf(node, sizeof(node), "mhi_%04x_%02u:%02u.%02u", + mhi_cntrl->dev_id, mhi_cntrl->domain, mhi_cntrl->bus, + mhi_cntrl->slot); + else + snprintf(node, sizeof(node), "%04x_%02u:%02u.%02u", + mhi_cntrl->dev_id, mhi_cntrl->domain, mhi_cntrl->bus, + mhi_cntrl->slot); + + dentry = debugfs_create_dir(node, mhi_cntrl->parent); + if (IS_ERR_OR_NULL(dentry)) + return; + + debugfs_create_file("states", 0444, dentry, mhi_cntrl, + &debugfs_state_ops); + debugfs_create_file("events", 0444, dentry, mhi_cntrl, + &debugfs_ev_ops); + debugfs_create_file("chan", 0444, dentry, mhi_cntrl, + &debugfs_chan_ops); + debugfs_create_file("reset", 0444, dentry, mhi_cntrl, + &debugfs_trigger_reset_fops); +#ifdef ENABLE_MHI_MON + debugfs_create_file("mhimon", 0444, dentry, mhi_cntrl, + &mon_fops_text_u); +#endif + mhi_cntrl->dentry = dentry; +} + +void mhi_deinit_debugfs(struct mhi_controller *mhi_cntrl) +{ + debugfs_remove_recursive(mhi_cntrl->dentry); + mhi_cntrl->dentry = NULL; +} + +int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl) +{ + struct mhi_ctxt *mhi_ctxt; + struct mhi_chan_ctxt *chan_ctxt; + struct mhi_event_ctxt *er_ctxt; + struct mhi_cmd_ctxt *cmd_ctxt; + struct mhi_chan *mhi_chan; + struct mhi_event *mhi_event; + struct mhi_cmd *mhi_cmd; + int ret = -ENOMEM, i; + + atomic_set(&mhi_cntrl->dev_wake, 0); + atomic_set(&mhi_cntrl->alloc_size, 0); + atomic_set(&mhi_cntrl->pending_pkts, 0); + + mhi_ctxt = kzalloc(sizeof(*mhi_ctxt), GFP_KERNEL); + if (!mhi_ctxt) + return -ENOMEM; + + mhi_ctxt->ctrl_seg = mhi_alloc_coherent(mhi_cntrl, sizeof(*mhi_ctxt->ctrl_seg), + &mhi_ctxt->ctrl_seg_addr, GFP_KERNEL); + MHI_LOG("mhi_ctxt->ctrl_seg = %p\n", mhi_ctxt->ctrl_seg); + if (!mhi_ctxt->ctrl_seg) + goto error_alloc_chan_ctxt; + + if ((unsigned long)mhi_ctxt->ctrl_seg & (4096-1)) { + mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->ctrl_seg), mhi_ctxt->ctrl_seg, mhi_ctxt->ctrl_seg_addr); + goto error_alloc_chan_ctxt; + } + + /* setup channel ctxt */ +#if 1 + mhi_ctxt->chan_ctxt = mhi_ctxt->ctrl_seg->chan_ctxt; + mhi_ctxt->chan_ctxt_addr = mhi_ctxt->ctrl_seg_addr + offsetof(struct mhi_ctrl_seg, chan_ctxt); +#else + mhi_ctxt->chan_ctxt = mhi_alloc_coherent(mhi_cntrl, + sizeof(*mhi_ctxt->chan_ctxt) * mhi_cntrl->max_chan, + &mhi_ctxt->chan_ctxt_addr, GFP_KERNEL); + if (!mhi_ctxt->chan_ctxt) + goto error_alloc_chan_ctxt; +#endif + + mhi_chan = mhi_cntrl->mhi_chan; + chan_ctxt = mhi_ctxt->chan_ctxt; + for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) { + /* If it's offload channel skip this step */ + if (mhi_chan->offload_ch) + continue; + + chan_ctxt->chstate = MHI_CH_STATE_DISABLED; + chan_ctxt->brstmode = mhi_chan->db_cfg.brstmode; + chan_ctxt->pollcfg = mhi_chan->db_cfg.pollcfg; + chan_ctxt->chtype = mhi_chan->type; + chan_ctxt->erindex = mhi_chan->er_index; + + mhi_chan->ch_state = MHI_CH_STATE_DISABLED; + mhi_chan->tre_ring.db_addr = &chan_ctxt->wp; + } + + /* setup event context */ +#if 1 + mhi_ctxt->er_ctxt = mhi_ctxt->ctrl_seg->er_ctxt; + mhi_ctxt->er_ctxt_addr = mhi_ctxt->ctrl_seg_addr + offsetof(struct mhi_ctrl_seg, er_ctxt); +#else + mhi_ctxt->er_ctxt = mhi_alloc_coherent(mhi_cntrl, + sizeof(*mhi_ctxt->er_ctxt) * mhi_cntrl->total_ev_rings, + &mhi_ctxt->er_ctxt_addr, GFP_KERNEL); + if (!mhi_ctxt->er_ctxt) + goto error_alloc_er_ctxt; +#endif + + er_ctxt = mhi_ctxt->er_ctxt; + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++, + mhi_event++) { + struct mhi_ring *ring = &mhi_event->ring; + + /* it's a satellite ev, we do not touch it */ + if (mhi_event->offload_ev) + continue; + + er_ctxt->intmodc = 0; + er_ctxt->intmodt = mhi_event->intmod; + er_ctxt->ertype = MHI_ER_TYPE_VALID; + if (mhi_cntrl->msi_allocated == 1) { + mhi_event->msi = 0; + } + er_ctxt->msivec = mhi_event->msi; + mhi_event->db_cfg.db_mode = true; + + ring->el_size = sizeof(struct mhi_tre); + ring->len = ring->el_size * ring->elements; +#if 1 + ring->alloc_size = ring->len; + ring->pre_aligned = mhi_ctxt->ctrl_seg->event_ring[i]; + ring->dma_handle = mhi_ctxt->ctrl_seg_addr + offsetof(struct mhi_ctrl_seg, event_ring[i]); + ring->iommu_base = ring->dma_handle; + ring->base = ring->pre_aligned + (ring->iommu_base - ring->dma_handle); +#else + ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len); + if (ret) + goto error_alloc_er; +#endif + + ring->rp = ring->wp = ring->base; + er_ctxt->rbase = ring->iommu_base; + er_ctxt->rp = er_ctxt->wp = er_ctxt->rbase; + er_ctxt->rlen = ring->len; + ring->ctxt_wp = &er_ctxt->wp; + + mhi_ring_aligned_check(mhi_cntrl, er_ctxt->rbase, er_ctxt->rlen); + memset(ring->base, 0xCC, ring->len); + } + + /* setup cmd context */ +#if 1 + mhi_ctxt->cmd_ctxt = mhi_ctxt->ctrl_seg->cmd_ctxt; + mhi_ctxt->cmd_ctxt_addr = mhi_ctxt->ctrl_seg_addr + offsetof(struct mhi_ctrl_seg, cmd_ctxt); +#else + mhi_ctxt->cmd_ctxt = mhi_alloc_coherent(mhi_cntrl, + sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS, + &mhi_ctxt->cmd_ctxt_addr, GFP_KERNEL); + if (!mhi_ctxt->cmd_ctxt) + goto error_alloc_er; +#endif + + mhi_cmd = mhi_cntrl->mhi_cmd; + cmd_ctxt = mhi_ctxt->cmd_ctxt; + for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) { + struct mhi_ring *ring = &mhi_cmd->ring; + + ring->el_size = sizeof(struct mhi_tre); + ring->elements = CMD_EL_PER_RING; + ring->len = ring->el_size * ring->elements; +#if 1 + ring->alloc_size = ring->len; + ring->pre_aligned = mhi_ctxt->ctrl_seg->cmd_ring[i]; + ring->dma_handle = mhi_ctxt->ctrl_seg_addr + offsetof(struct mhi_ctrl_seg, cmd_ring[i]); + ring->iommu_base = ring->dma_handle; + ring->base = ring->pre_aligned + (ring->iommu_base - ring->dma_handle); +#else + ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len); + if (ret) + goto error_alloc_cmd; +#endif + + ring->rp = ring->wp = ring->base; + cmd_ctxt->rbase = ring->iommu_base; + cmd_ctxt->rp = cmd_ctxt->wp = cmd_ctxt->rbase; + cmd_ctxt->rlen = ring->len; + ring->ctxt_wp = &cmd_ctxt->wp; + + mhi_ring_aligned_check(mhi_cntrl, cmd_ctxt->rbase, cmd_ctxt->rlen); + } + + mhi_cntrl->mhi_ctxt = mhi_ctxt; + + return 0; + +#if 0 +error_alloc_cmd: + for (--i, --mhi_cmd; i >= 0; i--, mhi_cmd--) { + struct mhi_ring *ring = &mhi_cmd->ring; + + mhi_free_coherent(mhi_cntrl, ring->alloc_size, + ring->pre_aligned, ring->dma_handle); + } + mhi_free_coherent(mhi_cntrl, + sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS, + mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr); + i = mhi_cntrl->total_ev_rings; + mhi_event = mhi_cntrl->mhi_event + i; + +error_alloc_er: + for (--i, --mhi_event; i >= 0; i--, mhi_event--) { + struct mhi_ring *ring = &mhi_event->ring; + + if (mhi_event->offload_ev) + continue; + + mhi_free_coherent(mhi_cntrl, ring->alloc_size, + ring->pre_aligned, ring->dma_handle); + } + mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->er_ctxt) * + mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt, + mhi_ctxt->er_ctxt_addr); + +error_alloc_er_ctxt: + mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->chan_ctxt) * + mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt, + mhi_ctxt->chan_ctxt_addr); +#endif + +error_alloc_chan_ctxt: + kfree(mhi_ctxt); + + return ret; +} + +static int mhi_get_tsync_er_cfg(struct mhi_controller *mhi_cntrl) +{ + int i; + struct mhi_event *mhi_event = mhi_cntrl->mhi_event; + + /* find event ring with timesync support */ + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) + if (mhi_event->data_type == MHI_ER_TSYNC_ELEMENT_TYPE) + return mhi_event->er_index; + + return -ENOENT; +} + +int mhi_init_timesync(struct mhi_controller *mhi_cntrl) +{ + struct mhi_timesync *mhi_tsync; + u32 time_offset, db_offset; + int ret; + + read_lock_bh(&mhi_cntrl->pm_lock); + + if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { + ret = -EIO; + goto exit_timesync; + } + + ret = mhi_get_capability_offset(mhi_cntrl, TIMESYNC_CAP_ID, + &time_offset); + if (ret) { + MHI_LOG("No timesync capability found\n"); + goto exit_timesync; + } + + read_unlock_bh(&mhi_cntrl->pm_lock); + + if (!mhi_cntrl->time_get || !mhi_cntrl->lpm_disable || + !mhi_cntrl->lpm_enable) + return -EINVAL; + + /* register method supported */ + mhi_tsync = kzalloc(sizeof(*mhi_tsync), GFP_KERNEL); + if (!mhi_tsync) + return -ENOMEM; + + spin_lock_init(&mhi_tsync->lock); + INIT_LIST_HEAD(&mhi_tsync->head); + init_completion(&mhi_tsync->completion); + + /* save time_offset for obtaining time */ + MHI_LOG("TIME OFFS:0x%x\n", time_offset); + mhi_tsync->time_reg = mhi_cntrl->regs + time_offset + + TIMESYNC_TIME_LOW_OFFSET; + + mhi_cntrl->mhi_tsync = mhi_tsync; + + ret = mhi_create_timesync_sysfs(mhi_cntrl); + if (unlikely(ret)) { + /* kernel method still work */ + MHI_ERR("Failed to create timesync sysfs nodes\n"); + } + + read_lock_bh(&mhi_cntrl->pm_lock); + + if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { + ret = -EIO; + goto exit_timesync; + } + + /* get DB offset if supported, else return */ + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, + time_offset + TIMESYNC_DB_OFFSET, &db_offset); + if (ret || !db_offset) { + ret = 0; + goto exit_timesync; + } + + MHI_LOG("TIMESYNC_DB OFFS:0x%x\n", db_offset); + mhi_tsync->db = mhi_cntrl->regs + db_offset; + + read_unlock_bh(&mhi_cntrl->pm_lock); + + /* get time-sync event ring configuration */ + ret = mhi_get_tsync_er_cfg(mhi_cntrl); + if (ret < 0) { + MHI_LOG("Could not find timesync event ring\n"); + return ret; + } + + mhi_tsync->er_index = ret; + + ret = mhi_send_cmd(mhi_cntrl, NULL, MHI_CMD_TIMSYNC_CFG); + if (ret) { + MHI_ERR("Failed to send time sync cfg cmd\n"); + return ret; + } + + ret = wait_for_completion_timeout(&mhi_tsync->completion, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (!ret || mhi_tsync->ccs != MHI_EV_CC_SUCCESS) { + MHI_ERR("Failed to get time cfg cmd completion\n"); + return -EIO; + } + + return 0; + +exit_timesync: + read_unlock_bh(&mhi_cntrl->pm_lock); + + return ret; +} + +int mhi_init_mmio(struct mhi_controller *mhi_cntrl) +{ + u32 val; + int i, ret; + struct mhi_chan *mhi_chan; + struct mhi_event *mhi_event; + void __iomem *base = mhi_cntrl->regs; + struct { + u32 offset; + u32 mask; + u32 shift; + u32 val; + } reg_info[] = { + { + CCABAP_HIGHER, U32_MAX, 0, + upper_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr), + }, + { + CCABAP_LOWER, U32_MAX, 0, + lower_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr), + }, + { + ECABAP_HIGHER, U32_MAX, 0, + upper_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr), + }, + { + ECABAP_LOWER, U32_MAX, 0, + lower_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr), + }, + { + CRCBAP_HIGHER, U32_MAX, 0, + upper_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr), + }, + { + CRCBAP_LOWER, U32_MAX, 0, + lower_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr), + }, +#if 0 //carl.yin 20190527 UDE-WIN-InitMmio + { + MHICFG, MHICFG_NER_MASK, MHICFG_NER_SHIFT, + mhi_cntrl->total_ev_rings, + }, + { + MHICFG, MHICFG_NHWER_MASK, MHICFG_NHWER_SHIFT, + mhi_cntrl->hw_ev_rings, + }, +#endif + { + MHICTRLBASE_HIGHER, U32_MAX, 0, + upper_32_bits(mhi_cntrl->mhi_ctxt->ctrl_seg_addr), + }, + { + MHICTRLBASE_LOWER, U32_MAX, 0, + lower_32_bits(mhi_cntrl->mhi_ctxt->ctrl_seg_addr), + }, + { + MHIDATABASE_HIGHER, U32_MAX, 0, + upper_32_bits(mhi_cntrl->iova_start), + }, + { + MHIDATABASE_LOWER, U32_MAX, 0, + lower_32_bits(mhi_cntrl->iova_start), + }, + { + MHICTRLLIMIT_HIGHER, U32_MAX, 0, + upper_32_bits(mhi_cntrl->mhi_ctxt->ctrl_seg_addr+sizeof(struct mhi_ctrl_seg)), + }, + { + MHICTRLLIMIT_LOWER, U32_MAX, 0, + lower_32_bits(mhi_cntrl->mhi_ctxt->ctrl_seg_addr+sizeof(struct mhi_ctrl_seg)), + }, + { + MHIDATALIMIT_HIGHER, U32_MAX, 0, + upper_32_bits(mhi_cntrl->iova_stop), + }, + { + MHIDATALIMIT_LOWER, U32_MAX, 0, + lower_32_bits(mhi_cntrl->iova_stop), + }, + { 0, 0, 0 } + }; + + MHI_LOG("Initializing MMIO\n"); + + /* set up DB register for all the chan rings */ + ret = mhi_read_reg_field(mhi_cntrl, base, CHDBOFF, CHDBOFF_CHDBOFF_MASK, + CHDBOFF_CHDBOFF_SHIFT, &val); + if (ret) + return -EIO; + + MHI_LOG("CHDBOFF:0x%x\n", val); + + /* setup wake db */ + mhi_cntrl->wake_db = base + val + (8 * MHI_DEV_WAKE_DB); +#if 0 //'EM120RGLAPR02A07M4G_11' will treate as chan 127's interrput, and report complete event over cmd ring, but cmd ring is not set by now + mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 4, 0); + mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 0, 0); + mhi_cntrl->wake_set = false; +#endif + + /* setup channel db addresses */ + mhi_chan = mhi_cntrl->mhi_chan; + for (i = 0; i < mhi_cntrl->max_chan; i++, val += 8, mhi_chan++) + mhi_chan->tre_ring.db_addr = base + val; + + /* setup event ring db addresses */ + ret = mhi_read_reg_field(mhi_cntrl, base, ERDBOFF, ERDBOFF_ERDBOFF_MASK, + ERDBOFF_ERDBOFF_SHIFT, &val); + if (ret) + return -EIO; + + MHI_LOG("ERDBOFF:0x%x\n", val); + + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, val += 8, mhi_event++) { + if (mhi_event->offload_ev) + continue; + + mhi_event->ring.db_addr = base + val; + } + + /* set up DB register for primary CMD rings */ + mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING].ring.db_addr = base + CRDB_LOWER; + + MHI_LOG("Programming all MMIO values.\n"); + for (i = 0; reg_info[i].offset; i++) + mhi_write_reg_field(mhi_cntrl, base, reg_info[i].offset, + reg_info[i].mask, reg_info[i].shift, + reg_info[i].val); + + return 0; +} + +void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan) +{ + struct mhi_ring *buf_ring; + struct mhi_ring *tre_ring; + struct mhi_chan_ctxt *chan_ctxt; + + buf_ring = &mhi_chan->buf_ring; + tre_ring = &mhi_chan->tre_ring; + chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan]; + +#if 0 + mhi_free_coherent(mhi_cntrl, tre_ring->alloc_size, + tre_ring->pre_aligned, tre_ring->dma_handle); +#endif + kfree(buf_ring->base); + + buf_ring->base = tre_ring->base = NULL; + chan_ctxt->rbase = 0; +} + +int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan) +{ + struct mhi_ring *buf_ring; + struct mhi_ring *tre_ring; + struct mhi_chan_ctxt *chan_ctxt; + int ret; + + buf_ring = &mhi_chan->buf_ring; + tre_ring = &mhi_chan->tre_ring; + tre_ring->el_size = sizeof(struct mhi_tre); + tre_ring->len = tre_ring->el_size * tre_ring->elements; + chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan]; +#if 1 + tre_ring->alloc_size = tre_ring->len; + if (MHI_CLIENT_IP_HW_0_IN == mhi_chan->chan) { + tre_ring->pre_aligned = &mhi_cntrl->mhi_ctxt->ctrl_seg->hw_in_chan_ring[mhi_chan->ring]; + tre_ring->dma_handle = mhi_cntrl->mhi_ctxt->ctrl_seg_addr + offsetof(struct mhi_ctrl_seg, hw_in_chan_ring[mhi_chan->ring]); + } + else if (MHI_CLIENT_IP_HW_0_OUT == mhi_chan->chan) { + tre_ring->pre_aligned = &mhi_cntrl->mhi_ctxt->ctrl_seg->hw_out_chan_ring[mhi_chan->ring]; + tre_ring->dma_handle = mhi_cntrl->mhi_ctxt->ctrl_seg_addr + offsetof(struct mhi_ctrl_seg, hw_out_chan_ring[mhi_chan->ring]); + } +#ifdef ENABLE_IP_SW0 + else if (MHI_CLIENT_IP_SW_0_IN == mhi_chan->chan) { + tre_ring->pre_aligned = &mhi_cntrl->mhi_ctxt->ctrl_seg->sw_in_chan_ring[mhi_chan->ring]; + tre_ring->dma_handle = mhi_cntrl->mhi_ctxt->ctrl_seg_addr + offsetof(struct mhi_ctrl_seg, sw_in_chan_ring[mhi_chan->ring]); + } + else if (MHI_CLIENT_IP_SW_0_OUT == mhi_chan->chan) { + tre_ring->pre_aligned = &mhi_cntrl->mhi_ctxt->ctrl_seg->sw_out_chan_ring[mhi_chan->ring]; + tre_ring->dma_handle = mhi_cntrl->mhi_ctxt->ctrl_seg_addr + offsetof(struct mhi_ctrl_seg, sw_out_chan_ring[mhi_chan->ring]); + } +#endif + else if (MHI_CLIENT_DIAG_IN == mhi_chan->chan) { + tre_ring->pre_aligned = &mhi_cntrl->mhi_ctxt->ctrl_seg->diag_in_chan_ring[mhi_chan->ring]; + tre_ring->dma_handle = mhi_cntrl->mhi_ctxt->ctrl_seg_addr + offsetof(struct mhi_ctrl_seg, diag_in_chan_ring[mhi_chan->ring]); + } + else { + tre_ring->pre_aligned = &mhi_cntrl->mhi_ctxt->ctrl_seg->chan_ring[mhi_chan->ring]; + tre_ring->dma_handle = mhi_cntrl->mhi_ctxt->ctrl_seg_addr + offsetof(struct mhi_ctrl_seg, chan_ring[mhi_chan->ring]); + } + tre_ring->iommu_base = tre_ring->dma_handle; + tre_ring->base = tre_ring->pre_aligned + (tre_ring->iommu_base - tre_ring->dma_handle); + ret = 0; +#else + ret = mhi_alloc_aligned_ring(mhi_cntrl, tre_ring, tre_ring->len); +#endif + if (ret) + return -ENOMEM; + + buf_ring->el_size = sizeof(struct mhi_buf_info); + buf_ring->len = buf_ring->el_size * buf_ring->elements; + buf_ring->base = kzalloc(buf_ring->len, GFP_KERNEL); + + if (!buf_ring->base) { +#if 0 + mhi_free_coherent(mhi_cntrl, tre_ring->alloc_size, + tre_ring->pre_aligned, tre_ring->dma_handle); +#endif + return -ENOMEM; + } + + chan_ctxt->chstate = MHI_CH_STATE_ENABLED; + chan_ctxt->rbase = tre_ring->iommu_base; + chan_ctxt->rp = chan_ctxt->wp = chan_ctxt->rbase; + chan_ctxt->rlen = tre_ring->len; + tre_ring->ctxt_wp = &chan_ctxt->wp; + + tre_ring->rp = tre_ring->wp = tre_ring->base; + buf_ring->rp = buf_ring->wp = buf_ring->base; + mhi_chan->db_cfg.db_mode = true; + + mhi_ring_aligned_check(mhi_cntrl, chan_ctxt->rbase, chan_ctxt->rlen); + /* update to all cores */ + smp_wmb(); + + return 0; +} + +int mhi_device_configure(struct mhi_device *mhi_dev, + enum dma_data_direction dir, + struct mhi_buf *cfg_tbl, + int elements) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_chan *mhi_chan; + struct mhi_event_ctxt *er_ctxt; + struct mhi_chan_ctxt *ch_ctxt; + int er_index, chan; + + switch (dir) { + case DMA_TO_DEVICE: + mhi_chan = mhi_dev->ul_chan; + break; + case DMA_BIDIRECTIONAL: + case DMA_FROM_DEVICE: + case DMA_NONE: + mhi_chan = mhi_dev->dl_chan; + break; + default: + return -EINVAL; + } + + er_index = mhi_chan->er_index; + chan = mhi_chan->chan; + + for (; elements > 0; elements--, cfg_tbl++) { + /* update event context array */ + if (!strcmp(cfg_tbl->name, "ECA")) { + er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[er_index]; + if (sizeof(*er_ctxt) != cfg_tbl->len) { + MHI_ERR( + "Invalid ECA size, expected:%zu actual%zu\n", + sizeof(*er_ctxt), cfg_tbl->len); + return -EINVAL; + } + memcpy((void *)er_ctxt, cfg_tbl->buf, sizeof(*er_ctxt)); + continue; + } + + /* update channel context array */ + if (!strcmp(cfg_tbl->name, "CCA")) { + ch_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[chan]; + if (cfg_tbl->len != sizeof(*ch_ctxt)) { + MHI_ERR( + "Invalid CCA size, expected:%zu actual:%zu\n", + sizeof(*ch_ctxt), cfg_tbl->len); + return -EINVAL; + } + memcpy((void *)ch_ctxt, cfg_tbl->buf, sizeof(*ch_ctxt)); + continue; + } + + return -EINVAL; + } + + return 0; +} + +#if 0 +static int of_parse_ev_cfg(struct mhi_controller *mhi_cntrl, + struct device_node *of_node) +{ + int i, ret, num = 0; + struct mhi_event *mhi_event; + struct device_node *child; + + of_node = of_find_node_by_name(of_node, "mhi_events"); + if (!of_node) + return -EINVAL; + + for_each_available_child_of_node(of_node, child) { + if (!strcmp(child->name, "mhi_event")) + num++; + } + + if (!num) + return -EINVAL; + + mhi_cntrl->total_ev_rings = num; + mhi_cntrl->mhi_event = kcalloc(num, sizeof(*mhi_cntrl->mhi_event), + GFP_KERNEL); + if (!mhi_cntrl->mhi_event) + return -ENOMEM; + + /* populate ev ring */ + mhi_event = mhi_cntrl->mhi_event; + i = 0; + for_each_available_child_of_node(of_node, child) { + if (strcmp(child->name, "mhi_event")) + continue; + + mhi_event->er_index = i++; + ret = of_property_read_u32(child, "mhi,num-elements", + (u32 *)&mhi_event->ring.elements); + if (ret) + goto error_ev_cfg; + + ret = of_property_read_u32(child, "mhi,intmod", + &mhi_event->intmod); + if (ret) + goto error_ev_cfg; + + ret = of_property_read_u32(child, "mhi,msi", + &mhi_event->msi); + if (ret) + goto error_ev_cfg; + + ret = of_property_read_u32(child, "mhi,chan", + &mhi_event->chan); + if (!ret) { + if (mhi_event->chan >= mhi_cntrl->max_chan) + goto error_ev_cfg; + /* this event ring has a dedicated channel */ + mhi_event->mhi_chan = + &mhi_cntrl->mhi_chan[mhi_event->chan]; + } + + ret = of_property_read_u32(child, "mhi,priority", + &mhi_event->priority); + if (ret) + goto error_ev_cfg; + + ret = of_property_read_u32(child, "mhi,brstmode", + &mhi_event->db_cfg.brstmode); + if (ret || MHI_INVALID_BRSTMODE(mhi_event->db_cfg.brstmode)) + goto error_ev_cfg; + + mhi_event->db_cfg.process_db = + (mhi_event->db_cfg.brstmode == MHI_BRSTMODE_ENABLE) ? + mhi_db_brstmode : mhi_db_brstmode_disable; + + ret = of_property_read_u32(child, "mhi,data-type", + &mhi_event->data_type); + if (ret) + mhi_event->data_type = MHI_ER_DATA_ELEMENT_TYPE; + + if (mhi_event->data_type > MHI_ER_DATA_TYPE_MAX) + goto error_ev_cfg; + + switch (mhi_event->data_type) { + case MHI_ER_DATA_ELEMENT_TYPE: + mhi_event->process_event = mhi_process_data_event_ring; + break; + case MHI_ER_CTRL_ELEMENT_TYPE: + mhi_event->process_event = mhi_process_ctrl_ev_ring; + break; + case MHI_ER_TSYNC_ELEMENT_TYPE: + mhi_event->process_event = mhi_process_tsync_event_ring; + break; + } + + mhi_event->hw_ring = of_property_read_bool(child, "mhi,hw-ev"); + if (mhi_event->hw_ring) + mhi_cntrl->hw_ev_rings++; + else + mhi_cntrl->sw_ev_rings++; + mhi_event->cl_manage = of_property_read_bool(child, + "mhi,client-manage"); + mhi_event->offload_ev = of_property_read_bool(child, + "mhi,offload"); + mhi_event++; + } + + /* we need msi for each event ring + additional one for BHI */ + mhi_cntrl->msi_required = mhi_cntrl->total_ev_rings + 1; + + return 0; + +error_ev_cfg: + + kfree(mhi_cntrl->mhi_event); + return -EINVAL; +} +static int of_parse_ch_cfg(struct mhi_controller *mhi_cntrl, + struct device_node *of_node) +{ + int ret; + struct device_node *child; + u32 chan; + + ret = of_property_read_u32(of_node, "mhi,max-channels", + &mhi_cntrl->max_chan); + if (ret) + return ret; + + of_node = of_find_node_by_name(of_node, "mhi_channels"); + if (!of_node) + return -EINVAL; + + mhi_cntrl->mhi_chan = kcalloc(mhi_cntrl->max_chan, + sizeof(*mhi_cntrl->mhi_chan), GFP_KERNEL); + if (!mhi_cntrl->mhi_chan) + return -ENOMEM; + + INIT_LIST_HEAD(&mhi_cntrl->lpm_chans); + + /* populate channel configurations */ + for_each_available_child_of_node(of_node, child) { + struct mhi_chan *mhi_chan; + + if (strcmp(child->name, "mhi_chan")) + continue; + + ret = of_property_read_u32(child, "reg", &chan); + if (ret || chan >= mhi_cntrl->max_chan) + goto error_chan_cfg; + + mhi_chan = &mhi_cntrl->mhi_chan[chan]; + + ret = of_property_read_string(child, "label", + &mhi_chan->name); + if (ret) + goto error_chan_cfg; + + mhi_chan->chan = chan; + + ret = of_property_read_u32(child, "mhi,num-elements", + (u32 *)&mhi_chan->tre_ring.elements); + if (!ret && !mhi_chan->tre_ring.elements) + goto error_chan_cfg; + + /* + * For some channels, local ring len should be bigger than + * transfer ring len due to internal logical channels in device. + * So host can queue much more buffers than transfer ring len. + * Example, RSC channels should have a larger local channel + * than transfer ring length. + */ + ret = of_property_read_u32(child, "mhi,local-elements", + (u32 *)&mhi_chan->buf_ring.elements); + if (ret) + mhi_chan->buf_ring.elements = + mhi_chan->tre_ring.elements; + + ret = of_property_read_u32(child, "mhi,event-ring", + &mhi_chan->er_index); + if (ret) + goto error_chan_cfg; + + ret = of_property_read_u32(child, "mhi,chan-dir", + &mhi_chan->dir); + if (ret) + goto error_chan_cfg; + + /* + * For most channels, chtype is identical to channel directions, + * if not defined, assign ch direction to chtype + */ + ret = of_property_read_u32(child, "mhi,chan-type", + &mhi_chan->type); + if (ret) + mhi_chan->type = (enum mhi_ch_type)mhi_chan->dir; + + ret = of_property_read_u32(child, "mhi,ee", &mhi_chan->ee_mask); + if (ret) + goto error_chan_cfg; + + of_property_read_u32(child, "mhi,pollcfg", + &mhi_chan->db_cfg.pollcfg); + + ret = of_property_read_u32(child, "mhi,data-type", + &mhi_chan->xfer_type); + if (ret) + goto error_chan_cfg; + + switch (mhi_chan->xfer_type) { + case MHI_XFER_BUFFER: + mhi_chan->gen_tre = mhi_gen_tre; + mhi_chan->queue_xfer = mhi_queue_buf; + break; + case MHI_XFER_SKB: + mhi_chan->queue_xfer = mhi_queue_skb; + break; + case MHI_XFER_SCLIST: + mhi_chan->gen_tre = mhi_gen_tre; + mhi_chan->queue_xfer = mhi_queue_sclist; + break; + case MHI_XFER_NOP: + mhi_chan->queue_xfer = mhi_queue_nop; + break; + case MHI_XFER_DMA: + case MHI_XFER_RSC_DMA: + mhi_chan->queue_xfer = mhi_queue_dma; + break; + default: + goto error_chan_cfg; + } + + mhi_chan->lpm_notify = of_property_read_bool(child, + "mhi,lpm-notify"); + mhi_chan->offload_ch = of_property_read_bool(child, + "mhi,offload-chan"); + mhi_chan->db_cfg.reset_req = of_property_read_bool(child, + "mhi,db-mode-switch"); + mhi_chan->pre_alloc = of_property_read_bool(child, + "mhi,auto-queue"); + mhi_chan->auto_start = of_property_read_bool(child, + "mhi,auto-start"); + mhi_chan->wake_capable = of_property_read_bool(child, + "mhi,wake-capable"); + + if (mhi_chan->pre_alloc && + (mhi_chan->dir != DMA_FROM_DEVICE || + mhi_chan->xfer_type != MHI_XFER_BUFFER)) + goto error_chan_cfg; + + /* bi-dir and dirctionless channels must be a offload chan */ + if ((mhi_chan->dir == DMA_BIDIRECTIONAL || + mhi_chan->dir == DMA_NONE) && !mhi_chan->offload_ch) + goto error_chan_cfg; + + /* if mhi host allocate the buffers then client cannot queue */ + if (mhi_chan->pre_alloc) + mhi_chan->queue_xfer = mhi_queue_nop; + + if (!mhi_chan->offload_ch) { + ret = of_property_read_u32(child, "mhi,doorbell-mode", + &mhi_chan->db_cfg.brstmode); + if (ret || + MHI_INVALID_BRSTMODE(mhi_chan->db_cfg.brstmode)) + goto error_chan_cfg; + + mhi_chan->db_cfg.process_db = + (mhi_chan->db_cfg.brstmode == + MHI_BRSTMODE_ENABLE) ? + mhi_db_brstmode : mhi_db_brstmode_disable; + } + + mhi_chan->configured = true; + + if (mhi_chan->lpm_notify) + list_add_tail(&mhi_chan->node, &mhi_cntrl->lpm_chans); + } + + return 0; + +error_chan_cfg: + kfree(mhi_cntrl->mhi_chan); + + return -EINVAL; +} +#else +static int of_parse_ev_cfg(struct mhi_controller *mhi_cntrl, + struct device_node *of_node) +{ + int i, num = 0; + struct mhi_event *mhi_event; + + num = NUM_MHI_EVT_RINGS; + mhi_cntrl->total_ev_rings = num; + mhi_cntrl->mhi_event = kcalloc(num, sizeof(*mhi_cntrl->mhi_event), + GFP_KERNEL); + if (!mhi_cntrl->mhi_event) + return -ENOMEM; + + mhi_cntrl->msi_irq_base = 0; + /* populate ev ring */ + mhi_event = mhi_cntrl->mhi_event; + i = 0; + + for (i = 0; i < mhi_cntrl->total_ev_rings; i++) { + mhi_event->er_index = i; + + mhi_event->ring.elements = NUM_MHI_EVT_RING_ELEMENTS; //Event ring length in elements + if (i == PRIMARY_EVENT_RING || i == ADPL_EVT_RING) + mhi_event->ring.elements = 256; //256 is enough, and 1024 some times make driver fail to open channel (reason is x6x fail to malloc) + + mhi_event->intmod = 1; //Interrupt moderation time in ms + + /* see mhi_netdev_status_cb(), when interrupt come, the napi_poll maybe scheduled, so can reduce interrupts + root@OpenWrt:/# cat /proc/interrupts | grep mhi + root@OpenWrt:/# cat /sys/kernel/debug/mhi_q/mhi_netdev/pcie_mhi_0306_00.01.00_0/rx_int + */ + if (i == IPA_IN_EVENT_RING) + mhi_event->intmod = 5; + +#ifdef ENABLE_IP_SW0 + if (i == SW_0_IN_EVT_RING) + mhi_event->intmod = 5; +#endif + + mhi_event->msi = 1 + i + mhi_cntrl->msi_irq_base; //MSI associated with this event ring + + if (i == IPA_OUT_EVENT_RING) + mhi_event->chan = MHI_CLIENT_IP_HW_0_OUT; //Dedicated channel number, if it's a dedicated event ring + else if (i == IPA_IN_EVENT_RING) + mhi_event->chan = MHI_CLIENT_IP_HW_0_IN; //Dedicated channel number, if it's a dedicated event ring +#ifdef ENABLE_IP_SW0 + else if (i == SW_0_OUT_EVT_RING) + mhi_event->chan = MHI_CLIENT_IP_SW_0_OUT; + else if (i == SW_0_IN_EVT_RING) + mhi_event->chan = MHI_CLIENT_IP_SW_0_IN; +#endif + else + mhi_event->chan = 0; + + /* this event ring has a dedicated channel */ + mhi_event->mhi_chan = + &mhi_cntrl->mhi_chan[mhi_event->chan]; + + mhi_event->priority = 1; //Event ring priority, set to 1 for now + + if (mhi_event->chan && mhi_event->mhi_chan->db_cfg.brstmode == MHI_BRSTMODE_ENABLE) + mhi_event->db_cfg.brstmode = MHI_BRSTMODE_ENABLE; + else + mhi_event->db_cfg.brstmode = MHI_BRSTMODE_DISABLE; + + mhi_event->db_cfg.process_db = + (mhi_event->db_cfg.brstmode == MHI_BRSTMODE_ENABLE) ? + mhi_db_brstmode : mhi_db_brstmode_disable; + + if (i == IPA_OUT_EVENT_RING || i == IPA_IN_EVENT_RING) + mhi_event->data_type = MHI_ER_DATA_ELEMENT_TYPE; +#ifdef ENABLE_IP_SW0 + else if (i == SW_0_OUT_EVT_RING || i == SW_0_IN_EVT_RING) + mhi_event->data_type = MHI_ER_DATA_ELEMENT_TYPE; +#endif + else + mhi_event->data_type = MHI_ER_CTRL_ELEMENT_TYPE; + + switch (mhi_event->data_type) { + case MHI_ER_DATA_ELEMENT_TYPE: + mhi_event->process_event = mhi_process_data_event_ring; + break; + case MHI_ER_CTRL_ELEMENT_TYPE: + mhi_event->process_event = mhi_process_ctrl_ev_ring; + break; + case MHI_ER_TSYNC_ELEMENT_TYPE: + mhi_event->process_event = mhi_process_tsync_event_ring; + break; + } + + if (i == IPA_OUT_EVENT_RING || i == IPA_IN_EVENT_RING) + mhi_event->hw_ring = true; + else + mhi_event->hw_ring = false; + + if (mhi_event->hw_ring) + mhi_cntrl->hw_ev_rings++; + else + mhi_cntrl->sw_ev_rings++; + + mhi_event->cl_manage = false; + if (mhi_event->chan == MHI_CLIENT_IP_HW_0_IN || mhi_event->chan == MHI_CLIENT_IP_SW_0_IN) + mhi_event->cl_manage = true; + mhi_event->offload_ev = false; + mhi_event++; + } + + /* we need msi for each event ring + additional one for BHI */ + mhi_cntrl->msi_required = mhi_cntrl->total_ev_rings + 1 + mhi_cntrl->msi_irq_base; + + return 0; +} + +struct chan_cfg_t { + const char *chan_name; + u32 chan_id; + u32 elements; +}; + +static struct chan_cfg_t chan_cfg[] = { +//"Qualcomm PCIe Loopback" + {"LOOPBACK", MHI_CLIENT_LOOPBACK_OUT, NUM_MHI_CHAN_RING_ELEMENTS}, + {"LOOPBACK", MHI_CLIENT_LOOPBACK_IN, NUM_MHI_CHAN_RING_ELEMENTS}, +//"Qualcomm PCIe Sahara" + {"SAHARA", MHI_CLIENT_SAHARA_OUT, NUM_MHI_CHAN_RING_ELEMENTS}, + {"SAHARA", MHI_CLIENT_SAHARA_IN, NUM_MHI_CHAN_RING_ELEMENTS}, +//"Qualcomm PCIe Diagnostics" + {"DIAG", MHI_CLIENT_DIAG_OUT, NUM_MHI_CHAN_RING_ELEMENTS}, + {"DIAG", MHI_CLIENT_DIAG_IN, NUM_MHI_DIAG_IN_RING_ELEMENTS}, +//"Qualcomm PCIe QDSS Data" + {"QDSS", MHI_CLIENT_QDSS_OUT, NUM_MHI_CHAN_RING_ELEMENTS}, + {"QDSS", MHI_CLIENT_QDSS_IN, NUM_MHI_CHAN_RING_ELEMENTS}, +//"Qualcomm PCIe EFS" + {"EFS", MHI_CLIENT_EFS_OUT, NUM_MHI_CHAN_RING_ELEMENTS}, + {"EFS", MHI_CLIENT_EFS_IN, NUM_MHI_CHAN_RING_ELEMENTS}, +//"Qualcomm PCIe MBIM" + {"MBIM", MHI_CLIENT_MBIM_OUT, NUM_MHI_CHAN_RING_ELEMENTS}, + {"MBIM", MHI_CLIENT_MBIM_IN, NUM_MHI_CHAN_RING_ELEMENTS}, +//"Qualcomm PCIe QMI" + {"QMI0", MHI_CLIENT_QMI_OUT, NUM_MHI_CHAN_RING_ELEMENTS}, + {"QMI0", MHI_CLIENT_QMI_IN, NUM_MHI_CHAN_RING_ELEMENTS}, +//"Qualcomm PCIe QMI" + //{"QMI1", MHI_CLIENT_QMI_2_OUT, NUM_MHI_CHAN_RING_ELEMENTS}, + //{"QMI1", MHI_CLIENT_QMI_2_IN, NUM_MHI_CHAN_RING_ELEMENTS}, +//"Qualcomm PCIe IP CTRL" + {"IP_CTRL", MHI_CLIENT_IP_CTRL_1_OUT, NUM_MHI_CHAN_RING_ELEMENTS}, + {"IP_CTRL", MHI_CLIENT_IP_CTRL_1_IN, NUM_MHI_CHAN_RING_ELEMENTS}, +#if 0 //AG15 +//"Qualcomm PCIe IPCR" + {"IPCR", MHI_CLIENT_DIAG_CONS_IF_OUT, NUM_MHI_CHAN_RING_ELEMENTS}, + {"IPCR", MHI_CLIENT_DIAG_CONS_IF_IN, NUM_MHI_CHAN_RING_ELEMENTS}, +#endif +//"Qualcomm PCIe Boot Logging" + //{"BL", MHI_CLIENT_BOOT_LOG_OUT, NUM_MHI_CHAN_RING_ELEMENTS}, + //{"BL", MHI_CLIENT_BOOT_LOG_IN, NUM_MHI_CHAN_RING_ELEMENTS}, +//"Qualcomm PCIe Modem" + {"DUN", MHI_CLIENT_DUN_OUT, NUM_MHI_CHAN_RING_ELEMENTS}, + {"DUN", MHI_CLIENT_DUN_IN, NUM_MHI_CHAN_RING_ELEMENTS}, +//"Qualcomm EDL " + {"EDL", MHI_CLIENT_EDL_OUT, NUM_MHI_CHAN_RING_ELEMENTS}, + {"EDL", MHI_CLIENT_EDL_IN, NUM_MHI_CHAN_RING_ELEMENTS}, +#ifdef ENABLE_IP_SW0 +//"Qualcomm PCIe LOCAL Adapter" + {"IP_SW0", MHI_CLIENT_IP_SW_0_OUT, NUM_MHI_SW_IP_RING_ELEMENTS}, + {"IP_SW0", MHI_CLIENT_IP_SW_0_IN, NUM_MHI_SW_IP_RING_ELEMENTS}, +#endif +//"Qualcomm PCIe WWAN Adapter" + {"IP_HW0", MHI_CLIENT_IP_HW_0_OUT, NUM_MHI_IPA_OUT_RING_ELEMENTS}, + {"IP_HW0", MHI_CLIENT_IP_HW_0_IN, NUM_MHI_IPA_IN_RING_ELEMENTS}, +}; + +extern int mhi_netdev_mbin_enabled(void); +static int of_parse_ch_cfg(struct mhi_controller *mhi_cntrl, + struct device_node *of_node) +{ + u32 chan; + u32 i, num; + u32 ring = 0; + + mhi_cntrl->max_chan = MHI_MAX_CHANNELS; + num = sizeof(chan_cfg)/sizeof(chan_cfg[0]); + + mhi_cntrl->mhi_chan = kcalloc(mhi_cntrl->max_chan, + sizeof(*mhi_cntrl->mhi_chan), GFP_KERNEL); + if (!mhi_cntrl->mhi_chan) + return -ENOMEM; + + INIT_LIST_HEAD(&mhi_cntrl->lpm_chans); + + /* populate channel configurations */ + for (i = 0; i < num; i++) { + struct mhi_chan *mhi_chan; + + if (!strncmp( chan_cfg[i].chan_name, "MBIM", 4)) { + if (!mhi_netdev_mbin_enabled()) + continue; + } + else if (!strncmp( chan_cfg[i].chan_name, "QMI", 3)) { + if (mhi_netdev_mbin_enabled()) + continue; + } + + chan = chan_cfg[i].chan_id; + + mhi_chan = &mhi_cntrl->mhi_chan[chan]; + + mhi_chan->name = chan_cfg[i].chan_name; + + mhi_chan->chan = chan; + + mhi_chan->tre_ring.elements = chan_cfg[i].elements; + + /* + * For some channels, local ring len should be bigger than + * transfer ring len due to internal logical channels in device. + * So host can queue much more buffers than transfer ring len. + * Example, RSC channels should have a larger local channel + * than transfer ring length. + */ + mhi_chan->buf_ring.elements = mhi_chan->tre_ring.elements; + + if (chan == MHI_CLIENT_IP_HW_0_OUT || chan == MHI_CLIENT_IP_HW_0_IN || chan == MHI_CLIENT_DIAG_IN + || chan == MHI_CLIENT_IP_SW_0_OUT || chan == MHI_CLIENT_IP_SW_0_IN) { + mhi_chan->ring = 0; + } + else { + mhi_chan->ring = ring; + ring += mhi_chan->buf_ring.elements; + } + + if (chan == MHI_CLIENT_IP_HW_0_OUT) + mhi_chan->er_index = IPA_OUT_EVENT_RING; + else if (chan == MHI_CLIENT_IP_HW_0_IN) + mhi_chan->er_index = IPA_IN_EVENT_RING; +#ifdef ENABLE_IP_SW0 + else if (chan == MHI_CLIENT_IP_SW_0_OUT) + mhi_chan->er_index = SW_0_OUT_EVT_RING; + else if (chan == MHI_CLIENT_IP_SW_0_IN) + mhi_chan->er_index = SW_0_IN_EVT_RING; +#endif + else + mhi_chan->er_index = PRIMARY_EVENT_RING; + + mhi_chan->dir = CHAN_INBOUND(chan) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; + + /* + * For most channels, chtype is identical to channel directions, + * if not defined, assign ch direction to chtype + */ + mhi_chan->type = (enum mhi_ch_type)mhi_chan->dir; + + mhi_chan->ee_mask = BIT(MHI_EE_AMSS); + if (CHAN_SBL(chan)) + mhi_chan->ee_mask = BIT(MHI_EE_SBL); + else if (CHAN_EDL(chan)) + mhi_chan->ee_mask = BIT(MHI_EE_FP); + + mhi_chan->db_cfg.pollcfg = 0; + + if (chan == MHI_CLIENT_IP_HW_0_OUT || chan == MHI_CLIENT_IP_SW_0_OUT) + mhi_chan->xfer_type = MHI_XFER_SKB; + else if (chan == MHI_CLIENT_IP_HW_0_IN || chan == MHI_CLIENT_IP_SW_0_IN) + mhi_chan->xfer_type = MHI_XFER_SKB; //MHI_XFER_DMA; + else + mhi_chan->xfer_type = MHI_XFER_BUFFER; + + if (chan_cfg[i].elements == 0) { + mhi_chan->dir = DMA_BIDIRECTIONAL; + mhi_chan->xfer_type = MHI_XFER_NOP; + } + + switch (mhi_chan->xfer_type) { + case MHI_XFER_BUFFER: + mhi_chan->gen_tre = mhi_gen_tre; + mhi_chan->queue_xfer = mhi_queue_buf; + break; + case MHI_XFER_SKB: + mhi_chan->queue_xfer = mhi_queue_skb; + break; + case MHI_XFER_SCLIST: + mhi_chan->gen_tre = mhi_gen_tre; + mhi_chan->queue_xfer = mhi_queue_sclist; + break; + case MHI_XFER_NOP: + mhi_chan->queue_xfer = mhi_queue_nop; + break; + case MHI_XFER_DMA: + case MHI_XFER_RSC_DMA: + mhi_chan->queue_xfer = mhi_queue_dma; + break; + default: + goto error_chan_cfg; + } + + mhi_chan->lpm_notify = false; + mhi_chan->offload_ch = (chan_cfg[i].elements == 0); + mhi_chan->db_cfg.reset_req = false; + mhi_chan->pre_alloc = false; + mhi_chan->auto_start = false; + mhi_chan->wake_capable = false; + + if (mhi_chan->pre_alloc && + (mhi_chan->dir != DMA_FROM_DEVICE || + mhi_chan->xfer_type != MHI_XFER_BUFFER)) + goto error_chan_cfg; + + /* bi-dir and dirctionless channels must be a offload chan */ + if ((mhi_chan->dir == DMA_BIDIRECTIONAL || + mhi_chan->dir == DMA_NONE) && !mhi_chan->offload_ch) + goto error_chan_cfg; + + /* if mhi host allocate the buffers then client cannot queue */ + if (mhi_chan->pre_alloc) + mhi_chan->queue_xfer = mhi_queue_nop; + + if (!mhi_chan->offload_ch) { + mhi_chan->db_cfg.brstmode = MHI_BRSTMODE_DISABLE; + if (chan == MHI_CLIENT_IP_HW_0_OUT || chan == MHI_CLIENT_IP_HW_0_IN) + mhi_chan->db_cfg.brstmode = MHI_BRSTMODE_ENABLE; + + if (MHI_INVALID_BRSTMODE(mhi_chan->db_cfg.brstmode)) + goto error_chan_cfg; + + mhi_chan->db_cfg.process_db = + (mhi_chan->db_cfg.brstmode == + MHI_BRSTMODE_ENABLE) ? + mhi_db_brstmode : mhi_db_brstmode_disable; + } + + mhi_chan->configured = true; + + if (mhi_chan->lpm_notify) + list_add_tail(&mhi_chan->node, &mhi_cntrl->lpm_chans); + } + + return 0; + +error_chan_cfg: + kfree(mhi_cntrl->mhi_chan); + + return -EINVAL; +} +#endif + +static int of_parse_dt(struct mhi_controller *mhi_cntrl, + struct device_node *of_node) +{ + int ret; + + /* parse MHI channel configuration */ + ret = of_parse_ch_cfg(mhi_cntrl, of_node); + if (ret) + return ret; + + /* parse MHI event configuration */ + ret = of_parse_ev_cfg(mhi_cntrl, of_node); + if (ret) + goto error_ev_cfg; + +#if 0 + ret = of_property_read_u32(of_node, "mhi,timeout", + &mhi_cntrl->timeout_ms); + if (ret) + mhi_cntrl->timeout_ms = MHI_TIMEOUT_MS; + + mhi_cntrl->bounce_buf = of_property_read_bool(of_node, "mhi,use-bb"); + ret = of_property_read_u32(of_node, "mhi,buffer-len", + (u32 *)&mhi_cntrl->buffer_len); + if (ret) + mhi_cntrl->buffer_len = MHI_MAX_MTU; +#else + mhi_cntrl->timeout_ms = MHI_TIMEOUT_MS; + mhi_cntrl->bounce_buf = false; + mhi_cntrl->buffer_len = MHI_MAX_MTU; +#endif + + return 0; + +error_ev_cfg: + kfree(mhi_cntrl->mhi_chan); + + return ret; +} + +int of_register_mhi_controller(struct mhi_controller *mhi_cntrl) +{ + int ret; + int i; + struct mhi_event *mhi_event; + struct mhi_chan *mhi_chan; + struct mhi_cmd *mhi_cmd; + struct mhi_device *mhi_dev; + + //if (!mhi_cntrl->of_node) + // return -EINVAL; + + for (i = 0; i < MAX_MHI_CONTROLLER; i++) { + if (mhi_controller_minors[i].dev_id == mhi_cntrl->dev_id + && mhi_controller_minors[i].domain == mhi_cntrl->domain + && mhi_controller_minors[i].bus == mhi_cntrl->bus + && mhi_controller_minors[i].slot == mhi_cntrl->slot) { + mhi_cntrl->cntrl_idx = i; + break; + } + else if (mhi_controller_minors[i].dev_id == 0 + && mhi_controller_minors[i].domain == 0 + && mhi_controller_minors[i].bus == 0 + && mhi_controller_minors[i].slot == 0) { + mhi_controller_minors[i].dev_id = mhi_cntrl->dev_id; + mhi_controller_minors[i].domain = mhi_cntrl->domain; + mhi_controller_minors[i].bus = mhi_cntrl->bus; + mhi_controller_minors[i].slot = mhi_cntrl->slot; + mhi_cntrl->cntrl_idx = i; + break; + } + } + + if (i == MAX_MHI_CONTROLLER) + return -EINVAL; + + if (!mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put) + return -EINVAL; + + if (!mhi_cntrl->status_cb || !mhi_cntrl->link_status) + return -EINVAL; + + ret = of_parse_dt(mhi_cntrl, mhi_cntrl->of_node); + if (ret) + return -EINVAL; + + mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS, + sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL); + if (!mhi_cntrl->mhi_cmd) { + ret = -ENOMEM; + goto error_alloc_cmd; + } + + INIT_LIST_HEAD(&mhi_cntrl->transition_list); + mutex_init(&mhi_cntrl->pm_mutex); + rwlock_init(&mhi_cntrl->pm_lock); + spin_lock_init(&mhi_cntrl->transition_lock); + spin_lock_init(&mhi_cntrl->wlock); + INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker); + INIT_WORK(&mhi_cntrl->fw_worker, mhi_fw_load_worker); + INIT_WORK(&mhi_cntrl->syserr_worker, mhi_pm_sys_err_worker); + INIT_DELAYED_WORK(&mhi_cntrl->ready_worker, mhi_pm_ready_worker); + init_waitqueue_head(&mhi_cntrl->state_event); + + mhi_cmd = mhi_cntrl->mhi_cmd; + for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++) + spin_lock_init(&mhi_cmd->lock); + + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + if (mhi_event->offload_ev) + continue; + + mhi_event->mhi_cntrl = mhi_cntrl; + spin_lock_init(&mhi_event->lock); + if (mhi_event->data_type == MHI_ER_CTRL_ELEMENT_TYPE) + tasklet_init(&mhi_event->task, mhi_ctrl_ev_task, + (ulong)mhi_event); + else + tasklet_init(&mhi_event->task, mhi_ev_task, + (ulong)mhi_event); + } + + mhi_chan = mhi_cntrl->mhi_chan; + for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { + mutex_init(&mhi_chan->mutex); + init_completion(&mhi_chan->completion); + rwlock_init(&mhi_chan->lock); + } + + if (mhi_cntrl->bounce_buf) { + mhi_cntrl->map_single = mhi_map_single_use_bb; + mhi_cntrl->unmap_single = mhi_unmap_single_use_bb; + } else { + mhi_cntrl->map_single = mhi_map_single_no_bb; + mhi_cntrl->unmap_single = mhi_unmap_single_no_bb; + } + + /* register controller with mhi_bus */ + mhi_dev = mhi_alloc_device(mhi_cntrl); + if (!mhi_dev) { + ret = -ENOMEM; + goto error_alloc_dev; + } + + mhi_dev->dev_type = MHI_CONTROLLER_TYPE; + mhi_dev->mhi_cntrl = mhi_cntrl; + dev_set_name(&mhi_dev->dev, "%04x_%02u.%02u.%02u", mhi_dev->dev_id, + mhi_dev->domain, mhi_dev->bus, mhi_dev->slot); + + ret = device_add(&mhi_dev->dev); + if (ret) + goto error_add_dev; + + if (mhi_cntrl->cntrl_idx) + mhi_cntrl->cntrl_dev = device_create(mhi_cntrl_drv.class, mhi_cntrl->dev, + MKDEV(mhi_cntrl_drv.major, mhi_cntrl->cntrl_idx), NULL, + "mhi_BHI%d", mhi_cntrl->cntrl_idx); + else + mhi_cntrl->cntrl_dev = device_create(mhi_cntrl_drv.class, mhi_cntrl->dev, + MKDEV(mhi_cntrl_drv.major, mhi_cntrl->cntrl_idx), NULL, + "mhi_BHI"); + + mhi_cntrl->mhi_dev = mhi_dev; + + mhi_cntrl->parent = mhi_bus.dentry; + mhi_cntrl->klog_lvl = MHI_MSG_LVL_ERROR; + + /* adding it to this list only for debug purpose */ + mutex_lock(&mhi_bus.lock); + list_add_tail(&mhi_cntrl->node, &mhi_bus.controller_list); + mutex_unlock(&mhi_bus.lock); + + return 0; + +error_add_dev: + mhi_dealloc_device(mhi_cntrl, mhi_dev); + +error_alloc_dev: + kfree(mhi_cntrl->mhi_cmd); + +error_alloc_cmd: + kfree(mhi_cntrl->mhi_chan); + kfree(mhi_cntrl->mhi_event); + + return ret; +}; +EXPORT_SYMBOL(of_register_mhi_controller); + +void mhi_unregister_mhi_controller(struct mhi_controller *mhi_cntrl) +{ + struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev; + + kfree(mhi_cntrl->mhi_cmd); + kfree(mhi_cntrl->mhi_event); + kfree(mhi_cntrl->mhi_chan); + kfree(mhi_cntrl->mhi_tsync); + + if (mhi_cntrl->cntrl_dev) + device_destroy(mhi_cntrl_drv.class, MKDEV(mhi_cntrl_drv.major, mhi_cntrl->cntrl_idx)); + device_del(&mhi_dev->dev); + put_device(&mhi_dev->dev); + + mutex_lock(&mhi_bus.lock); + list_del(&mhi_cntrl->node); + mutex_unlock(&mhi_bus.lock); +} + +/* set ptr to control private data */ +static inline void mhi_controller_set_devdata(struct mhi_controller *mhi_cntrl, + void *priv) +{ + mhi_cntrl->priv_data = priv; +} + + +/* allocate mhi controller to register */ +struct mhi_controller *mhi_alloc_controller(size_t size) +{ + struct mhi_controller *mhi_cntrl; + + mhi_cntrl = kzalloc(size + sizeof(*mhi_cntrl), GFP_KERNEL); + + if (mhi_cntrl && size) + mhi_controller_set_devdata(mhi_cntrl, mhi_cntrl + 1); + + return mhi_cntrl; +} +EXPORT_SYMBOL(mhi_alloc_controller); + +int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl) +{ + int ret; + u32 bhie_off; + + mutex_lock(&mhi_cntrl->pm_mutex); + + ret = mhi_init_dev_ctxt(mhi_cntrl); + if (ret) { + MHI_ERR("Error with init dev_ctxt\n"); + goto error_dev_ctxt; + } + + ret = mhi_init_irq_setup(mhi_cntrl); + if (ret) { + MHI_ERR("Error setting up irq\n"); + goto error_setup_irq; + } + + /* + * allocate rddm table if specified, this table is for debug purpose + * so we'll ignore erros + */ + if (mhi_cntrl->rddm_size) { + mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->rddm_image, + mhi_cntrl->rddm_size); + + /* + * This controller supports rddm, we need to manually clear + * BHIE RX registers since por values are undefined. + */ + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF, + &bhie_off); + if (ret) { + MHI_ERR("Error getting bhie offset\n"); + goto bhie_error; + } + + memset_io(mhi_cntrl->regs + bhie_off + BHIE_RXVECADDR_LOW_OFFS, + 0, BHIE_RXVECSTATUS_OFFS - BHIE_RXVECADDR_LOW_OFFS + + 4); + } + + mhi_cntrl->pre_init = true; + + mutex_unlock(&mhi_cntrl->pm_mutex); + + return 0; + +bhie_error: + if (mhi_cntrl->rddm_image) { + mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image); + mhi_cntrl->rddm_image = NULL; + } + mhi_deinit_free_irq(mhi_cntrl); + +error_setup_irq: + mhi_deinit_dev_ctxt(mhi_cntrl); + +error_dev_ctxt: + mutex_unlock(&mhi_cntrl->pm_mutex); + + return ret; +} +EXPORT_SYMBOL(mhi_prepare_for_power_up); + +void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl) +{ + if (mhi_cntrl->fbc_image) { + mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image); + mhi_cntrl->fbc_image = NULL; + } + + if (mhi_cntrl->rddm_image) { + mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image); + mhi_cntrl->rddm_image = NULL; + } + + mhi_deinit_free_irq(mhi_cntrl); + mhi_deinit_dev_ctxt(mhi_cntrl); + mhi_cntrl->pre_init = false; +} + +/* match dev to drv */ +static int mhi_match(struct device *dev, struct device_driver *drv) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + struct mhi_driver *mhi_drv = to_mhi_driver(drv); + const struct mhi_device_id *id; + + /* if controller type there is no client driver associated with it */ + if (mhi_dev->dev_type == MHI_CONTROLLER_TYPE) + return 0; + + for (id = mhi_drv->id_table; id->chan[0]; id++) + if (!strcmp(mhi_dev->chan_name, id->chan)) { + mhi_dev->id = id; + return 1; + } + + return 0; +}; + +static void mhi_release_device(struct device *dev) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + + if (mhi_dev->ul_chan) + mhi_dev->ul_chan->mhi_dev = NULL; + + if (mhi_dev->dl_chan) + mhi_dev->dl_chan->mhi_dev = NULL; + + kfree(mhi_dev); +} + +struct bus_type mhi_bus_type = { + .name = "mhi_q", + .dev_name = "mhi_q", + .match = mhi_match, +}; + +static int mhi_driver_probe(struct device *dev) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct device_driver *drv = dev->driver; + struct mhi_driver *mhi_drv = to_mhi_driver(drv); + struct mhi_event *mhi_event; + struct mhi_chan *ul_chan = mhi_dev->ul_chan; + struct mhi_chan *dl_chan = mhi_dev->dl_chan; + bool auto_start = false; + int ret; + + /* bring device out of lpm */ + ret = mhi_device_get_sync(mhi_dev); + if (ret) + return ret; + + ret = -EINVAL; + if (ul_chan) { + /* lpm notification require status_cb */ + if (ul_chan->lpm_notify && !mhi_drv->status_cb) + goto exit_probe; + + if (!ul_chan->offload_ch && !mhi_drv->ul_xfer_cb) + goto exit_probe; + + ul_chan->xfer_cb = mhi_drv->ul_xfer_cb; + mhi_dev->status_cb = mhi_drv->status_cb; + auto_start = ul_chan->auto_start; + } + + if (dl_chan) { + if (dl_chan->lpm_notify && !mhi_drv->status_cb) + goto exit_probe; + + if (!dl_chan->offload_ch && !mhi_drv->dl_xfer_cb) + goto exit_probe; + + mhi_event = &mhi_cntrl->mhi_event[dl_chan->er_index]; + + /* + * if this channal event ring manage by client, then + * status_cb must be defined so we can send the async + * cb whenever there are pending data + */ + if (mhi_event->cl_manage && !mhi_drv->status_cb) + goto exit_probe; + + dl_chan->xfer_cb = mhi_drv->dl_xfer_cb; + + /* ul & dl uses same status cb */ + mhi_dev->status_cb = mhi_drv->status_cb; + auto_start = (auto_start || dl_chan->auto_start); + } + + ret = mhi_drv->probe(mhi_dev, mhi_dev->id); + + if (!ret && auto_start) + mhi_prepare_for_transfer(mhi_dev); + +exit_probe: + mhi_device_put(mhi_dev); + + return ret; +} + +static int mhi_driver_remove(struct device *dev) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + struct mhi_driver *mhi_drv = to_mhi_driver(dev->driver); + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_chan *mhi_chan; + enum MHI_CH_STATE ch_state[] = { + MHI_CH_STATE_DISABLED, + MHI_CH_STATE_DISABLED + }; + int dir; + + /* control device has no work to do */ + if (mhi_dev->dev_type == MHI_CONTROLLER_TYPE) + return 0; + + MHI_LOG("Removing device for chan:%s\n", mhi_dev->chan_name); + + /* reset both channels */ + for (dir = 0; dir < 2; dir++) { + mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; + + if (!mhi_chan) + continue; + + /* wake all threads waiting for completion */ + write_lock_irq(&mhi_chan->lock); + mhi_chan->ccs = MHI_EV_CC_INVALID; + complete_all(&mhi_chan->completion); + write_unlock_irq(&mhi_chan->lock); + + /* move channel state to disable, no more processing */ + mutex_lock(&mhi_chan->mutex); + write_lock_irq(&mhi_chan->lock); + ch_state[dir] = mhi_chan->ch_state; + mhi_chan->ch_state = MHI_CH_STATE_SUSPENDED; + write_unlock_irq(&mhi_chan->lock); + + /* reset the channel */ + if (!mhi_chan->offload_ch) + mhi_reset_chan(mhi_cntrl, mhi_chan); + + mutex_unlock(&mhi_chan->mutex); + } + + /* destroy the device */ + mhi_drv->remove(mhi_dev); + + /* de_init channel if it was enabled */ + for (dir = 0; dir < 2; dir++) { + mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; + + if (!mhi_chan) + continue; + + mutex_lock(&mhi_chan->mutex); + + if (ch_state[dir] == MHI_CH_STATE_ENABLED && + !mhi_chan->offload_ch) + mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan); + + mhi_chan->ch_state = MHI_CH_STATE_DISABLED; + + mutex_unlock(&mhi_chan->mutex); + } + + + if (mhi_cntrl->tsync_dev == mhi_dev) + mhi_cntrl->tsync_dev = NULL; + + /* relinquish any pending votes */ + read_lock_bh(&mhi_cntrl->pm_lock); + while (atomic_read(&mhi_dev->dev_wake)) + mhi_device_put(mhi_dev); + read_unlock_bh(&mhi_cntrl->pm_lock); + + return 0; +} + +int mhi_driver_register(struct mhi_driver *mhi_drv) +{ + struct device_driver *driver = &mhi_drv->driver; + + if (!mhi_drv->probe || !mhi_drv->remove) + return -EINVAL; + + driver->bus = &mhi_bus_type; + driver->probe = mhi_driver_probe; + driver->remove = mhi_driver_remove; + return driver_register(driver); +} +EXPORT_SYMBOL(mhi_driver_register); + +void mhi_driver_unregister(struct mhi_driver *mhi_drv) +{ + driver_unregister(&mhi_drv->driver); +} +EXPORT_SYMBOL(mhi_driver_unregister); + +struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl) +{ + struct mhi_device *mhi_dev = kzalloc(sizeof(*mhi_dev), GFP_KERNEL); + struct device *dev; + + if (!mhi_dev) + return NULL; + + dev = &mhi_dev->dev; + device_initialize(dev); + dev->bus = &mhi_bus_type; + dev->release = mhi_release_device; + dev->parent = mhi_cntrl->dev; + mhi_dev->mhi_cntrl = mhi_cntrl; + mhi_dev->vendor = mhi_cntrl->vendor; + mhi_dev->dev_id = mhi_cntrl->dev_id; + mhi_dev->domain = mhi_cntrl->domain; + mhi_dev->bus = mhi_cntrl->bus; + mhi_dev->slot = mhi_cntrl->slot; + mhi_dev->mtu = MHI_MAX_MTU; + atomic_set(&mhi_dev->dev_wake, 0); + + return mhi_dev; +} + +static int mhi_cntrl_open(struct inode *inode, struct file *f) +{ + int ret = -ENODEV; + struct mhi_controller *mhi_cntrl; + + mutex_lock(&mhi_bus.lock); + list_for_each_entry(mhi_cntrl, &mhi_bus.controller_list, node) { + if (MINOR(inode->i_rdev) == mhi_cntrl->cntrl_idx) { + ret = 0; + f->private_data = mhi_cntrl; + break; + } + } + mutex_unlock(&mhi_bus.lock); + + return ret; +} + +static int mhi_cntrl_release(struct inode *inode, struct file *f) +{ + f->private_data = NULL; + return 0; +} + +#define IOCTL_BHI_GETDEVINFO 0x8BE0 + 1 +#define IOCTL_BHI_WRITEIMAGE 0x8BE0 + 2 +long bhi_get_dev_info(struct mhi_controller *mhi_cntrl, void __user *to); +long bhi_write_image(struct mhi_controller *mhi_cntrl, void __user *from); + +static long mhi_cntrl_ioctl(struct file *f, unsigned int cmd, unsigned long __arg) +{ + long ret = -ENODEV; + struct mhi_controller *mhi_cntrl; + + mutex_lock(&mhi_bus.lock); + list_for_each_entry(mhi_cntrl, &mhi_bus.controller_list, node) { + if (mhi_cntrl == (struct mhi_controller *)f->private_data) { + ret = 0; + break; + } + } + mutex_unlock(&mhi_bus.lock); + + if (ret) + return ret; + + switch (cmd) { + case IOCTL_BHI_GETDEVINFO: + ret = bhi_get_dev_info(mhi_cntrl, (void __user *)__arg); + break; + + case IOCTL_BHI_WRITEIMAGE: + ret = bhi_write_image(mhi_cntrl, (void __user *)__arg); + break; + + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static const struct file_operations mhi_cntrl_fops = { + .unlocked_ioctl = mhi_cntrl_ioctl, + .open = mhi_cntrl_open, + .release = mhi_cntrl_release, +}; + +static int __init mhi_cntrl_init(void) +{ + int ret; + + ret = register_chrdev(0, MHI_CNTRL_DRIVER_NAME, &mhi_cntrl_fops); + if (ret < 0) + return ret; + + mhi_cntrl_drv.major = ret; + mhi_cntrl_drv.class = class_create(THIS_MODULE, MHI_CNTRL_DRIVER_NAME); + if (IS_ERR(mhi_cntrl_drv.class)) { + unregister_chrdev(mhi_cntrl_drv.major, MHI_CNTRL_DRIVER_NAME); + return -ENODEV; + } + + mutex_init(&mhi_cntrl_drv.lock); + INIT_LIST_HEAD(&mhi_cntrl_drv.head); + + return 0; +} + +void mhi_cntrl_exit(void) +{ + class_destroy(mhi_cntrl_drv.class); + unregister_chrdev(mhi_cntrl_drv.major, MHI_CNTRL_DRIVER_NAME); +} + +extern int mhi_dtr_init(void); +extern void mhi_dtr_exit(void); +extern int mhi_device_netdev_init(struct dentry *parent); +extern void mhi_device_netdev_exit(void); +extern int mhi_device_uci_init(void); +extern void mhi_device_uci_exit(void); +extern int mhi_controller_qcom_init(void); +extern void mhi_controller_qcom_exit(void); + +static char mhi_version[] = "Quectel_Linux_PCIE_MHI_Driver_"PCIE_MHI_DRIVER_VERSION; +module_param_string(mhi_version, mhi_version, sizeof(mhi_version), S_IRUGO); + +static int __init mhi_init(void) +{ + int ret; + + pr_info("%s %s\n", __func__, mhi_version); + + mutex_init(&mhi_bus.lock); + INIT_LIST_HEAD(&mhi_bus.controller_list); + + /* parent directory */ + mhi_bus.dentry = debugfs_create_dir(mhi_bus_type.name, NULL); + + ret = bus_register(&mhi_bus_type); + if (ret) { + pr_err("Error bus_register ret:%d\n", ret); + return ret; + } + + ret = mhi_dtr_init(); + if (ret) { + pr_err("Error mhi_dtr_init ret:%d\n", ret); + bus_unregister(&mhi_bus_type); + return ret; + } + + ret = mhi_device_netdev_init(mhi_bus.dentry); + if (ret) { + pr_err("Error mhi_device_netdev_init ret:%d\n", ret); + } + + ret = mhi_device_uci_init(); + if (ret) { + pr_err("Error mhi_device_uci_init ret:%d\n", ret); + } + + ret = mhi_cntrl_init(); + if (ret) { + pr_err("Error mhi_cntrl_init ret:%d\n", ret); + } + + ret = mhi_controller_qcom_init(); + if (ret) { + pr_err("Error mhi_controller_qcom_init ret:%d\n", ret); + } + + return ret; +} + +static void mhi_exit(void) +{ + mhi_controller_qcom_exit(); + mhi_cntrl_exit(); + mhi_device_uci_exit(); + mhi_device_netdev_exit(); + mhi_dtr_exit(); + bus_unregister(&mhi_bus_type); + debugfs_remove_recursive(mhi_bus.dentry); +} + +module_init(mhi_init); +module_exit(mhi_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("MHI_CORE"); +MODULE_DESCRIPTION("MHI Host Interface"); diff --git a/package/wwan/driver/quectel_MHI/src/core/mhi_internal.h b/package/wwan/driver/quectel_MHI/src/core/mhi_internal.h new file mode 100644 index 000000000..31bd61718 --- /dev/null +++ b/package/wwan/driver/quectel_MHI/src/core/mhi_internal.h @@ -0,0 +1,1181 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. */ + +#ifndef _MHI_INT_H +#define _MHI_INT_H + +#include +#ifndef writel_relaxed +#define writel_relaxed writel +#endif + +#ifndef writel_relaxed_no_log +#define writel_relaxed_no_log writel_relaxed +#endif + +#ifndef readq +static inline u64 readq(void __iomem *reg) +{ + return ((u64) readl(reg)) | (((u64) readl(reg + 4UL)) << 32); +} +#endif + +#ifndef readq_relaxed +#define readq_relaxed readq +#endif + +#ifndef readq_relaxed_no_log +#define readq_relaxed_no_log readq_relaxed +#endif + +#ifndef U32_MAX +#define U32_MAX ((u32)~0U) +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION( 3,10,53 )) +static inline void reinit_completion(struct completion *x) +{ + x->done = 0; +} +#endif + +#ifndef __ATTR_RO +#define __ATTR_RO(_name) { \ + .attr = { .name = __stringify(_name), .mode = S_IRUGO }, \ + .show = _name##_show, \ +} +#endif +#ifndef __ATTR_WO +#define __ATTR_WO(_name) { \ + .attr = { .name = __stringify(_name), .mode = S_IWUSR }, \ + .store = _name##_store, \ +} +#endif +#ifndef __ATTR_RW +#define __ATTR_RW(_name) __ATTR(_name, (S_IWUSR | S_IRUGO), \ + _name##_show, _name##_store) +#endif +#ifndef DEVICE_ATTR_RO +#define DEVICE_ATTR_RO(_name) \ + struct device_attribute dev_attr_##_name = __ATTR_RO(_name) +#endif +#ifndef DEVICE_ATTR_WO +#define DEVICE_ATTR_WO(_name) \ + struct device_attribute dev_attr_##_name = __ATTR_WO(_name) +#endif +#ifndef DEVICE_ATTR_RW +#define DRIVER_ATTR_RW(_name) \ + struct driver_attribute driver_attr_##_name = __ATTR_RW(_name) +#endif + +#ifdef EXPORT_SYMBOL +#undef EXPORT_SYMBOL +#define EXPORT_SYMBOL(sym) +#endif + +extern struct bus_type mhi_bus_type; + +/* MHI mmio register mapping */ +#define PCI_INVALID_READ(val) (val == U32_MAX) + +#define MHIREGLEN (0x0) +#define MHIREGLEN_MHIREGLEN_MASK (0xFFFFFFFF) +#define MHIREGLEN_MHIREGLEN_SHIFT (0) + +#define MHIVER (0x8) +#define MHIVER_MHIVER_MASK (0xFFFFFFFF) +#define MHIVER_MHIVER_SHIFT (0) + +#define MHICFG (0x10) +#define MHICFG_NHWER_MASK (0xFF000000) +#define MHICFG_NHWER_SHIFT (24) +#define MHICFG_NER_MASK (0xFF0000) +#define MHICFG_NER_SHIFT (16) +#define MHICFG_NHWCH_MASK (0xFF00) +#define MHICFG_NHWCH_SHIFT (8) +#define MHICFG_NCH_MASK (0xFF) +#define MHICFG_NCH_SHIFT (0) + +#define CHDBOFF (0x18) +#define CHDBOFF_CHDBOFF_MASK (0xFFFFFFFF) +#define CHDBOFF_CHDBOFF_SHIFT (0) + +#define ERDBOFF (0x20) +#define ERDBOFF_ERDBOFF_MASK (0xFFFFFFFF) +#define ERDBOFF_ERDBOFF_SHIFT (0) + +#define BHIOFF (0x28) +#define BHIOFF_BHIOFF_MASK (0xFFFFFFFF) +#define BHIOFF_BHIOFF_SHIFT (0) + +#define BHIEOFF (0x2C) +#define BHIEOFF_BHIEOFF_MASK (0xFFFFFFFF) +#define BHIEOFF_BHIEOFF_SHIFT (0) + +#define DEBUGOFF (0x30) +#define DEBUGOFF_DEBUGOFF_MASK (0xFFFFFFFF) +#define DEBUGOFF_DEBUGOFF_SHIFT (0) + +#define MHICTRL (0x38) +#define MHICTRL_MHISTATE_MASK (0x0000FF00) +#define MHICTRL_MHISTATE_SHIFT (8) +#define MHICTRL_RESET_MASK (0x2) +#define MHICTRL_RESET_SHIFT (1) + +#define MHISTATUS (0x48) +#define MHISTATUS_MHISTATE_MASK (0x0000FF00) +#define MHISTATUS_MHISTATE_SHIFT (8) +#define MHISTATUS_SYSERR_MASK (0x4) +#define MHISTATUS_SYSERR_SHIFT (2) +#define MHISTATUS_READY_MASK (0x1) +#define MHISTATUS_READY_SHIFT (0) + +#define CCABAP_LOWER (0x58) +#define CCABAP_LOWER_CCABAP_LOWER_MASK (0xFFFFFFFF) +#define CCABAP_LOWER_CCABAP_LOWER_SHIFT (0) + +#define CCABAP_HIGHER (0x5C) +#define CCABAP_HIGHER_CCABAP_HIGHER_MASK (0xFFFFFFFF) +#define CCABAP_HIGHER_CCABAP_HIGHER_SHIFT (0) + +#define ECABAP_LOWER (0x60) +#define ECABAP_LOWER_ECABAP_LOWER_MASK (0xFFFFFFFF) +#define ECABAP_LOWER_ECABAP_LOWER_SHIFT (0) + +#define ECABAP_HIGHER (0x64) +#define ECABAP_HIGHER_ECABAP_HIGHER_MASK (0xFFFFFFFF) +#define ECABAP_HIGHER_ECABAP_HIGHER_SHIFT (0) + +#define CRCBAP_LOWER (0x68) +#define CRCBAP_LOWER_CRCBAP_LOWER_MASK (0xFFFFFFFF) +#define CRCBAP_LOWER_CRCBAP_LOWER_SHIFT (0) + +#define CRCBAP_HIGHER (0x6C) +#define CRCBAP_HIGHER_CRCBAP_HIGHER_MASK (0xFFFFFFFF) +#define CRCBAP_HIGHER_CRCBAP_HIGHER_SHIFT (0) + +#define CRDB_LOWER (0x70) +#define CRDB_LOWER_CRDB_LOWER_MASK (0xFFFFFFFF) +#define CRDB_LOWER_CRDB_LOWER_SHIFT (0) + +#define CRDB_HIGHER (0x74) +#define CRDB_HIGHER_CRDB_HIGHER_MASK (0xFFFFFFFF) +#define CRDB_HIGHER_CRDB_HIGHER_SHIFT (0) + +#define MHICTRLBASE_LOWER (0x80) +#define MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_MASK (0xFFFFFFFF) +#define MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_SHIFT (0) + +#define MHICTRLBASE_HIGHER (0x84) +#define MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_MASK (0xFFFFFFFF) +#define MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_SHIFT (0) + +#define MHICTRLLIMIT_LOWER (0x88) +#define MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_MASK (0xFFFFFFFF) +#define MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_SHIFT (0) + +#define MHICTRLLIMIT_HIGHER (0x8C) +#define MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_MASK (0xFFFFFFFF) +#define MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_SHIFT (0) + +#define MHIDATABASE_LOWER (0x98) +#define MHIDATABASE_LOWER_MHIDATABASE_LOWER_MASK (0xFFFFFFFF) +#define MHIDATABASE_LOWER_MHIDATABASE_LOWER_SHIFT (0) + +#define MHIDATABASE_HIGHER (0x9C) +#define MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_MASK (0xFFFFFFFF) +#define MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_SHIFT (0) + +#define MHIDATALIMIT_LOWER (0xA0) +#define MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_MASK (0xFFFFFFFF) +#define MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_SHIFT (0) + +#define MHIDATALIMIT_HIGHER (0xA4) +#define MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_MASK (0xFFFFFFFF) +#define MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_SHIFT (0) + +#define MHI_READ_REG_FIELD(_VAR,_REG,_FIELD) \ + ((_VAR & _REG ## _ ## _FIELD ## _MASK) >> _REG ## _ ## _FIELD ## _SHIFT) + +#define MHI_WRITE_REG_FIELD(_VAR,_REG,_FIELD,_VAL) \ + do { \ + _VAR &= ~_REG ## _ ## _FIELD ## _MASK; \ + _VAR |= (_VAL << _REG ## _ ## _FIELD ## _SHIFT); \ + } while(0) + +/* Host request register */ +#define MHI_SOC_RESET_REQ_OFFSET (0xB0) +#define MHI_SOC_RESET_REQ BIT(0) + +/* MHI misc capability registers */ +#define MISC_OFFSET (0x24) +#define MISC_CAP_MASK (0xFFFFFFFF) +#define MISC_CAP_SHIFT (0) + +#define CAP_CAPID_MASK (0xFF000000) +#define CAP_CAPID_SHIFT (24) +#define CAP_NEXT_CAP_MASK (0x00FFF000) +#define CAP_NEXT_CAP_SHIFT (12) + +/* MHI Timesync offsets */ +#define TIMESYNC_CFG_OFFSET (0x00) +#define TIMESYNC_CFG_CAPID_MASK (CAP_CAPID_MASK) +#define TIMESYNC_CFG_CAPID_SHIFT (CAP_CAPID_SHIFT) +#define TIMESYNC_CFG_NEXT_OFF_MASK (CAP_NEXT_CAP_MASK) +#define TIMESYNC_CFG_NEXT_OFF_SHIFT (CAP_NEXT_CAP_SHIFT) +#define TIMESYNC_CFG_NUMCMD_MASK (0xFF) +#define TIMESYNC_CFG_NUMCMD_SHIFT (0) +#define TIMESYNC_DB_OFFSET (0x4) +#define TIMESYNC_TIME_LOW_OFFSET (0x8) +#define TIMESYNC_TIME_HIGH_OFFSET (0xC) + +#define TIMESYNC_CAP_ID (2) + +/* MHI BHI offfsets */ +#define BHI_BHIVERSION_MINOR (0x00) +#define BHI_BHIVERSION_MAJOR (0x04) +#define BHI_IMGADDR_LOW (0x08) +#define BHI_IMGADDR_HIGH (0x0C) +#define BHI_IMGSIZE (0x10) +#define BHI_RSVD1 (0x14) +#define BHI_IMGTXDB (0x18) +#define BHI_TXDB_SEQNUM_BMSK (0x3FFFFFFF) +#define BHI_TXDB_SEQNUM_SHFT (0) +#define BHI_RSVD2 (0x1C) +#define BHI_INTVEC (0x20) +#define BHI_RSVD3 (0x24) +#define BHI_EXECENV (0x28) +#define BHI_STATUS (0x2C) +#define BHI_ERRCODE (0x30) +#define BHI_ERRDBG1 (0x34) +#define BHI_ERRDBG2 (0x38) +#define BHI_ERRDBG3 (0x3C) +#define BHI_SERIALNU (0x40) +#define BHI_SBLANTIROLLVER (0x44) +#define BHI_NUMSEG (0x48) +#define BHI_MSMHWID(n) (0x4C + (0x4 * n)) +#define BHI_OEMPKHASH(n) (0x64 + (0x4 * n)) +#define BHI_RSVD5 (0xC4) +#define BHI_STATUS_MASK (0xC0000000) +#define BHI_STATUS_SHIFT (30) +#define BHI_STATUS_ERROR (3) +#define BHI_STATUS_SUCCESS (2) +#define BHI_STATUS_RESET (0) + +/* MHI BHIE offsets */ +#define BHIE_MSMSOCID_OFFS (0x0000) +#define BHIE_TXVECADDR_LOW_OFFS (0x002C) +#define BHIE_TXVECADDR_HIGH_OFFS (0x0030) +#define BHIE_TXVECSIZE_OFFS (0x0034) +#define BHIE_TXVECDB_OFFS (0x003C) +#define BHIE_TXVECDB_SEQNUM_BMSK (0x3FFFFFFF) +#define BHIE_TXVECDB_SEQNUM_SHFT (0) +#define BHIE_TXVECSTATUS_OFFS (0x0044) +#define BHIE_TXVECSTATUS_SEQNUM_BMSK (0x3FFFFFFF) +#define BHIE_TXVECSTATUS_SEQNUM_SHFT (0) +#define BHIE_TXVECSTATUS_STATUS_BMSK (0xC0000000) +#define BHIE_TXVECSTATUS_STATUS_SHFT (30) +#define BHIE_TXVECSTATUS_STATUS_RESET (0x00) +#define BHIE_TXVECSTATUS_STATUS_XFER_COMPL (0x02) +#define BHIE_TXVECSTATUS_STATUS_ERROR (0x03) +#define BHIE_RXVECADDR_LOW_OFFS (0x0060) +#define BHIE_RXVECADDR_HIGH_OFFS (0x0064) +#define BHIE_RXVECSIZE_OFFS (0x0068) +#define BHIE_RXVECDB_OFFS (0x0070) +#define BHIE_RXVECDB_SEQNUM_BMSK (0x3FFFFFFF) +#define BHIE_RXVECDB_SEQNUM_SHFT (0) +#define BHIE_RXVECSTATUS_OFFS (0x0078) +#define BHIE_RXVECSTATUS_SEQNUM_BMSK (0x3FFFFFFF) +#define BHIE_RXVECSTATUS_SEQNUM_SHFT (0) +#define BHIE_RXVECSTATUS_STATUS_BMSK (0xC0000000) +#define BHIE_RXVECSTATUS_STATUS_SHFT (30) +#define BHIE_RXVECSTATUS_STATUS_RESET (0x00) +#define BHIE_RXVECSTATUS_STATUS_XFER_COMPL (0x02) +#define BHIE_RXVECSTATUS_STATUS_ERROR (0x03) + +/* convert ticks to micro seconds by dividing by 19.2 */ +#define TIME_TICKS_TO_US(x) (div_u64((x) * 10, 192)) + +struct mhi_event_ctxt { + u32 reserved : 8; + u32 intmodc : 8; + u32 intmodt : 16; + u32 ertype; + u32 msivec; + + u64 rbase __packed __aligned(4); + u64 rlen __packed __aligned(4); + u64 rp __packed __aligned(4); + u64 wp __packed __aligned(4); +}; + +struct mhi_chan_ctxt { + u32 chstate : 8; + u32 brstmode : 2; + u32 pollcfg : 6; + u32 reserved : 16; + u32 chtype; + u32 erindex; + + u64 rbase __packed __aligned(4); + u64 rlen __packed __aligned(4); + u64 rp __packed __aligned(4); + u64 wp __packed __aligned(4); +}; + +struct mhi_cmd_ctxt { + u32 reserved0; + u32 reserved1; + u32 reserved2; + + u64 rbase __packed __aligned(4); + u64 rlen __packed __aligned(4); + u64 rp __packed __aligned(4); + u64 wp __packed __aligned(4); +}; + +struct mhi_tre { + u64 ptr; + u32 dword[2]; +}; + +/* Channel context state */ +enum mhi_dev_ch_ctx_state { + MHI_DEV_CH_STATE_DISABLED, + MHI_DEV_CH_STATE_ENABLED, + MHI_DEV_CH_STATE_RUNNING, + MHI_DEV_CH_STATE_SUSPENDED, + MHI_DEV_CH_STATE_STOP, + MHI_DEV_CH_STATE_ERROR, + MHI_DEV_CH_STATE_RESERVED, + MHI_DEV_CH_STATE_32BIT = 0x7FFFFFFF +}; + +/* Channel type */ +enum mhi_dev_ch_ctx_type { + MHI_DEV_CH_TYPE_NONE, + MHI_DEV_CH_TYPE_OUTBOUND_CHANNEL, + MHI_DEV_CH_TYPE_INBOUND_CHANNEL, + MHI_DEV_CH_RESERVED +}; + +/* Channel context type */ +struct mhi_dev_ch_ctx { + enum mhi_dev_ch_ctx_state ch_state; + enum mhi_dev_ch_ctx_type ch_type; + uint32_t err_indx; + uint64_t rbase; + uint64_t rlen; + uint64_t rp; + uint64_t wp; +} __packed; + +enum mhi_dev_ring_element_type_id { + MHI_DEV_RING_EL_INVALID = 0, + MHI_DEV_RING_EL_NOOP = 1, + MHI_DEV_RING_EL_TRANSFER = 2, + MHI_DEV_RING_EL_RESET = 16, + MHI_DEV_RING_EL_STOP = 17, + MHI_DEV_RING_EL_START = 18, + MHI_DEV_RING_EL_MHI_STATE_CHG = 32, + MHI_DEV_RING_EL_CMD_COMPLETION_EVT = 33, + MHI_DEV_RING_EL_TRANSFER_COMPLETION_EVENT = 34, + MHI_DEV_RING_EL_EE_STATE_CHANGE_NOTIFY = 64, + MHI_DEV_RING_EL_UNDEF +}; + +enum mhi_dev_ring_state { + RING_STATE_UINT = 0, + RING_STATE_IDLE, + RING_STATE_PENDING, +}; + +enum mhi_dev_ring_type { + RING_TYPE_CMD = 0, + RING_TYPE_ER, + RING_TYPE_CH, + RING_TYPE_INVAL +}; + +/* Event context interrupt moderation */ +enum mhi_dev_evt_ctx_int_mod_timer { + MHI_DEV_EVT_INT_MODERATION_DISABLED +}; + +/* Event ring type */ +enum mhi_dev_evt_ctx_event_ring_type { + MHI_DEV_EVT_TYPE_DEFAULT, + MHI_DEV_EVT_TYPE_VALID, + MHI_DEV_EVT_RESERVED +}; + +/* Event ring context type */ +struct mhi_dev_ev_ctx { + uint32_t res1:16; + enum mhi_dev_evt_ctx_int_mod_timer intmodt:16; + enum mhi_dev_evt_ctx_event_ring_type ertype; + uint32_t msivec; + uint64_t rbase; + uint64_t rlen; + uint64_t rp; + uint64_t wp; +} __packed; + +/* Command context */ +struct mhi_dev_cmd_ctx { + uint32_t res1; + uint32_t res2; + uint32_t res3; + uint64_t rbase; + uint64_t rlen; + uint64_t rp; + uint64_t wp; +} __packed; + +/* generic context */ +struct mhi_dev_gen_ctx { + uint32_t res1; + uint32_t res2; + uint32_t res3; + uint64_t rbase; + uint64_t rlen; + uint64_t rp; + uint64_t wp; +} __packed; + +/* Transfer ring element */ +struct mhi_dev_transfer_ring_element { + uint64_t data_buf_ptr; + uint32_t len:16; + uint32_t res1:16; + uint32_t chain:1; + uint32_t res2:7; + uint32_t ieob:1; + uint32_t ieot:1; + uint32_t bei:1; + uint32_t res3:5; + enum mhi_dev_ring_element_type_id type:8; + uint32_t res4:8; +} __packed; + +/* Command ring element */ +/* Command ring No op command */ +struct mhi_dev_cmd_ring_op { + uint64_t res1; + uint32_t res2; + uint32_t res3:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +/* Command ring reset channel command */ +struct mhi_dev_cmd_ring_reset_channel_cmd { + uint64_t res1; + uint32_t res2; + uint32_t res3:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +/* Command ring stop channel command */ +struct mhi_dev_cmd_ring_stop_channel_cmd { + uint64_t res1; + uint32_t res2; + uint32_t res3:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +/* Command ring start channel command */ +struct mhi_dev_cmd_ring_start_channel_cmd { + uint64_t res1; + uint32_t seqnum; + uint32_t reliable:1; + uint32_t res2:15; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +enum mhi_dev_cmd_completion_code { + MHI_CMD_COMPL_CODE_INVALID = 0, + MHI_CMD_COMPL_CODE_SUCCESS = 1, + MHI_CMD_COMPL_CODE_EOT = 2, + MHI_CMD_COMPL_CODE_OVERFLOW = 3, + MHI_CMD_COMPL_CODE_EOB = 4, + MHI_CMD_COMPL_CODE_UNDEFINED = 16, + MHI_CMD_COMPL_CODE_RING_EL = 17, + MHI_CMD_COMPL_CODE_RES +}; + +/* Event ring elements */ +/* Transfer completion event */ +struct mhi_dev_event_ring_transfer_completion { + uint64_t ptr; + uint32_t len:16; + uint32_t res1:8; + enum mhi_dev_cmd_completion_code code:8; + uint32_t res2:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +/* Command completion event */ +struct mhi_dev_event_ring_cmd_completion { + uint64_t ptr; + uint32_t res1:24; + enum mhi_dev_cmd_completion_code code:8; + uint32_t res2:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t res3:8; +} __packed; + +/* MHI state change event */ +struct mhi_dev_event_ring_state_change { + uint64_t ptr; + uint32_t res1:24; + uint32_t /*enum mhi_dev_state*/ mhistate:8; + uint32_t res2:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t res3:8; +} __packed; + +enum mhi_dev_execenv { + MHI_DEV_SBL_EE = 1, + MHI_DEV_AMSS_EE = 2, + MHI_DEV_UNRESERVED +}; + +/* EE state change event */ +struct mhi_dev_event_ring_ee_state_change { + uint64_t ptr; + uint32_t res1:24; + enum mhi_dev_execenv execenv:8; + uint32_t res2:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t res3:8; +} __packed; + +/* Generic cmd to parse common details like type and channel id */ +struct mhi_dev_ring_generic { + uint64_t ptr; + uint32_t res1:24; + uint32_t /*enum mhi_dev_state*/ mhistate:8; + uint32_t res2:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +struct mhi_config { + uint32_t mhi_reg_len; + uint32_t version; + uint32_t event_rings; + uint32_t channels; + uint32_t chdb_offset; + uint32_t erdb_offset; +}; + +/* Possible ring element types */ +union mhi_dev_ring_element_type { + struct mhi_dev_cmd_ring_op cmd_no_op; + struct mhi_dev_cmd_ring_reset_channel_cmd cmd_reset; + struct mhi_dev_cmd_ring_stop_channel_cmd cmd_stop; + struct mhi_dev_cmd_ring_start_channel_cmd cmd_start; + struct mhi_dev_transfer_ring_element cmd_transfer; + struct mhi_dev_event_ring_transfer_completion evt_tr_comp; + struct mhi_dev_event_ring_cmd_completion evt_cmd_comp; + struct mhi_dev_event_ring_state_change evt_state_change; + struct mhi_dev_event_ring_ee_state_change evt_ee_state; + struct mhi_dev_ring_generic generic; +}; + +struct bhi_vec_entry { + u64 dma_addr; + u64 size; +}; + +enum mhi_cmd_type { + MHI_CMD_TYPE_NOP = 1, + MHI_CMD_TYPE_RESET = 16, + MHI_CMD_TYPE_STOP = 17, + MHI_CMD_TYPE_START = 18, + MHI_CMD_TYPE_TSYNC = 24, +}; + +/* no operation command */ +#define MHI_TRE_CMD_NOOP_PTR (0) +#define MHI_TRE_CMD_NOOP_DWORD0 (0) +#define MHI_TRE_CMD_NOOP_DWORD1 (MHI_CMD_TYPE_NOP << 16) + +/* channel reset command */ +#define MHI_TRE_CMD_RESET_PTR (0) +#define MHI_TRE_CMD_RESET_DWORD0 (0) +#define MHI_TRE_CMD_RESET_DWORD1(chid) ((chid << 24) | \ + (MHI_CMD_TYPE_RESET << 16)) + +/* channel stop command */ +#define MHI_TRE_CMD_STOP_PTR (0) +#define MHI_TRE_CMD_STOP_DWORD0 (0) +#define MHI_TRE_CMD_STOP_DWORD1(chid) ((chid << 24) | (MHI_CMD_TYPE_STOP << 16)) + +/* channel start command */ +#define MHI_TRE_CMD_START_PTR (0) +#define MHI_TRE_CMD_START_DWORD0 (0) +#define MHI_TRE_CMD_START_DWORD1(chid) ((chid << 24) | \ + (MHI_CMD_TYPE_START << 16)) + +/* time sync cfg command */ +#define MHI_TRE_CMD_TSYNC_CFG_PTR (0) +#define MHI_TRE_CMD_TSYNC_CFG_DWORD0 (0) +#define MHI_TRE_CMD_TSYNC_CFG_DWORD1(er) ((MHI_CMD_TYPE_TSYNC << 16) | \ + (er << 24)) + +#define MHI_TRE_GET_CMD_CHID(tre) (((tre)->dword[1] >> 24) & 0xFF) +#define MHI_TRE_GET_CMD_TYPE(tre) (((tre)->dword[1] >> 16) & 0xFF) + +/* event descriptor macros */ +#define MHI_TRE_EV_PTR(ptr) (ptr) +#define MHI_TRE_EV_DWORD0(code, len) ((code << 24) | len) +#define MHI_TRE_EV_DWORD1(chid, type) ((chid << 24) | (type << 16)) +#define MHI_TRE_GET_EV_PTR(tre) ((tre)->ptr) +#define MHI_TRE_GET_EV_CODE(tre) (((tre)->dword[0] >> 24) & 0xFF) +#define MHI_TRE_GET_EV_LEN(tre) ((tre)->dword[0] & 0xFFFF) +#define MHI_TRE_GET_EV_CHID(tre) (((tre)->dword[1] >> 24) & 0xFF) +#define MHI_TRE_GET_EV_TYPE(tre) (((tre)->dword[1] >> 16) & 0xFF) +#define MHI_TRE_GET_EV_STATE(tre) (((tre)->dword[0] >> 24) & 0xFF) +#define MHI_TRE_GET_EV_EXECENV(tre) (((tre)->dword[0] >> 24) & 0xFF) +#define MHI_TRE_GET_EV_SEQ(tre) ((tre)->dword[0]) +#define MHI_TRE_GET_EV_TIME(tre) ((tre)->ptr) +#define MHI_TRE_GET_EV_COOKIE(tre) lower_32_bits((tre)->ptr) +#define MHI_TRE_GET_EV_VEID(tre) (((tre)->dword[0] >> 16) & 0xFF) + +/* transfer descriptor macros */ +#define MHI_TRE_DATA_PTR(ptr) (ptr) +#define MHI_TRE_DATA_DWORD0(len) (len & MHI_MAX_MTU) +#define MHI_TRE_DATA_DWORD1(bei, ieot, ieob, chain) ((2 << 16) | (bei << 10) \ + | (ieot << 9) | (ieob << 8) | chain) + +/* rsc transfer descriptor macros */ +#define MHI_RSCTRE_DATA_PTR(ptr, len) (((u64)len << 48) | ptr) +#define MHI_RSCTRE_DATA_DWORD0(cookie) (cookie) +#define MHI_RSCTRE_DATA_DWORD1 (MHI_PKT_TYPE_COALESCING << 16) + +enum MHI_CMD { + MHI_CMD_RESET_CHAN, + MHI_CMD_START_CHAN, + MHI_CMD_TIMSYNC_CFG, +}; + +enum MHI_PKT_TYPE { + MHI_PKT_TYPE_INVALID = 0x0, + MHI_PKT_TYPE_NOOP_CMD = 0x1, + MHI_PKT_TYPE_TRANSFER = 0x2, + MHI_PKT_TYPE_COALESCING = 0x8, + MHI_PKT_TYPE_RESET_CHAN_CMD = 0x10, + MHI_PKT_TYPE_STOP_CHAN_CMD = 0x11, + MHI_PKT_TYPE_START_CHAN_CMD = 0x12, + MHI_PKT_TYPE_STATE_CHANGE_EVENT = 0x20, + MHI_PKT_TYPE_CMD_COMPLETION_EVENT = 0x21, + MHI_PKT_TYPE_TX_EVENT = 0x22, + MHI_PKT_TYPE_RSC_TX_EVENT = 0x28, + MHI_PKT_TYPE_EE_EVENT = 0x40, + MHI_PKT_TYPE_TSYNC_EVENT = 0x48, + MHI_PKT_TYPE_STALE_EVENT, /* internal event */ +}; + +/* MHI transfer completion events */ +enum MHI_EV_CCS { + MHI_EV_CC_INVALID = 0x0, + MHI_EV_CC_SUCCESS = 0x1, + MHI_EV_CC_EOT = 0x2, + MHI_EV_CC_OVERFLOW = 0x3, + MHI_EV_CC_EOB = 0x4, + MHI_EV_CC_OOB = 0x5, + MHI_EV_CC_DB_MODE = 0x6, + MHI_EV_CC_UNDEFINED_ERR = 0x10, + MHI_EV_CC_BAD_TRE = 0x11, +}; + +enum MHI_CH_STATE { + MHI_CH_STATE_DISABLED = 0x0, + MHI_CH_STATE_ENABLED = 0x1, + MHI_CH_STATE_RUNNING = 0x2, + MHI_CH_STATE_SUSPENDED = 0x3, + MHI_CH_STATE_STOP = 0x4, + MHI_CH_STATE_ERROR = 0x5, +}; + +enum MHI_BRSTMODE { + MHI_BRSTMODE_DISABLE = 0x2, + MHI_BRSTMODE_ENABLE = 0x3, +}; + +#define MHI_INVALID_BRSTMODE(mode) (mode != MHI_BRSTMODE_DISABLE && \ + mode != MHI_BRSTMODE_ENABLE) + +#define MHI_IN_PBL(ee) (ee == MHI_EE_PBL || ee == MHI_EE_PTHRU || \ + ee == MHI_EE_EDL) + +#define MHI_IN_MISSION_MODE(ee) (ee == MHI_EE_AMSS || ee == MHI_EE_WFW) + +enum MHI_ST_TRANSITION { + MHI_ST_TRANSITION_PBL, + MHI_ST_TRANSITION_READY, + MHI_ST_TRANSITION_SBL, + MHI_ST_TRANSITION_MISSION_MODE, + MHI_ST_TRANSITION_FP, + MHI_ST_TRANSITION_MAX, +}; + +extern const char * const mhi_state_tran_str[MHI_ST_TRANSITION_MAX]; +#define TO_MHI_STATE_TRANS_STR(state) (((state) >= MHI_ST_TRANSITION_MAX) ? \ + "INVALID_STATE" : mhi_state_tran_str[state]) + +extern const char * const mhi_state_str[MHI_STATE_MAX]; +#define TO_MHI_STATE_STR(state) ((state >= MHI_STATE_MAX || \ + !mhi_state_str[state]) ? \ + "INVALID_STATE" : mhi_state_str[state]) + +enum { + MHI_PM_BIT_DISABLE, + MHI_PM_BIT_POR, + MHI_PM_BIT_M0, + MHI_PM_BIT_M2, + MHI_PM_BIT_M3_ENTER, + MHI_PM_BIT_M3, + MHI_PM_BIT_M3_EXIT, + MHI_PM_BIT_FW_DL_ERR, + MHI_PM_BIT_SYS_ERR_DETECT, + MHI_PM_BIT_SYS_ERR_PROCESS, + MHI_PM_BIT_SHUTDOWN_PROCESS, + MHI_PM_BIT_LD_ERR_FATAL_DETECT, + MHI_PM_BIT_MAX +}; + +/* internal power states */ +enum MHI_PM_STATE { + MHI_PM_DISABLE = BIT(MHI_PM_BIT_DISABLE), /* MHI is not enabled */ + MHI_PM_POR = BIT(MHI_PM_BIT_POR), /* reset state */ + MHI_PM_M0 = BIT(MHI_PM_BIT_M0), + MHI_PM_M2 = BIT(MHI_PM_BIT_M2), + MHI_PM_M3_ENTER = BIT(MHI_PM_BIT_M3_ENTER), + MHI_PM_M3 = BIT(MHI_PM_BIT_M3), + MHI_PM_M3_EXIT = BIT(MHI_PM_BIT_M3_EXIT), + /* firmware download failure state */ + MHI_PM_FW_DL_ERR = BIT(MHI_PM_BIT_FW_DL_ERR), + MHI_PM_SYS_ERR_DETECT = BIT(MHI_PM_BIT_SYS_ERR_DETECT), + MHI_PM_SYS_ERR_PROCESS = BIT(MHI_PM_BIT_SYS_ERR_PROCESS), + MHI_PM_SHUTDOWN_PROCESS = BIT(MHI_PM_BIT_SHUTDOWN_PROCESS), + /* link not accessible */ + MHI_PM_LD_ERR_FATAL_DETECT = BIT(MHI_PM_BIT_LD_ERR_FATAL_DETECT), +}; + +#define MHI_REG_ACCESS_VALID(pm_state) ((pm_state & (MHI_PM_POR | MHI_PM_M0 | \ + MHI_PM_M2 | MHI_PM_M3_ENTER | MHI_PM_M3_EXIT | \ + MHI_PM_SYS_ERR_DETECT | MHI_PM_SYS_ERR_PROCESS | \ + MHI_PM_SHUTDOWN_PROCESS | MHI_PM_FW_DL_ERR))) +#define MHI_PM_IN_ERROR_STATE(pm_state) (pm_state >= MHI_PM_FW_DL_ERR) +#define MHI_PM_IN_FATAL_STATE(pm_state) (pm_state == MHI_PM_LD_ERR_FATAL_DETECT) +#define MHI_DB_ACCESS_VALID(pm_state) (pm_state & MHI_PM_M0) +#define MHI_WAKE_DB_CLEAR_VALID(pm_state) (pm_state & (MHI_PM_M0 | \ + MHI_PM_M2 | MHI_PM_M3_EXIT)) +#define MHI_WAKE_DB_SET_VALID(pm_state) (pm_state & MHI_PM_M2) +#define MHI_WAKE_DB_FORCE_SET_VALID(pm_state) MHI_WAKE_DB_CLEAR_VALID(pm_state) +#define MHI_EVENT_ACCESS_INVALID(pm_state) (pm_state == MHI_PM_DISABLE || \ + MHI_PM_IN_ERROR_STATE(pm_state)) +#define MHI_PM_IN_SUSPEND_STATE(pm_state) (pm_state & \ + (MHI_PM_M3_ENTER | MHI_PM_M3)) + +/* accepted buffer type for the channel */ +enum MHI_XFER_TYPE { + MHI_XFER_BUFFER, + MHI_XFER_SKB, + MHI_XFER_SCLIST, + MHI_XFER_NOP, /* CPU offload channel, host does not accept transfer */ + MHI_XFER_DMA, /* receive dma address, already mapped by client */ + MHI_XFER_RSC_DMA, /* RSC type, accept premapped buffer */ +}; + +#define NR_OF_CMD_RINGS (1) +#define CMD_EL_PER_RING (128) +#define PRIMARY_CMD_RING (0) +#define MHI_DEV_WAKE_DB (127) +#define MHI_MAX_MTU (0xffff) + +enum MHI_ER_TYPE { + MHI_ER_TYPE_INVALID = 0x0, + MHI_ER_TYPE_VALID = 0x1, +}; + +enum mhi_er_data_type { + MHI_ER_DATA_ELEMENT_TYPE, + MHI_ER_CTRL_ELEMENT_TYPE, + MHI_ER_TSYNC_ELEMENT_TYPE, + MHI_ER_DATA_TYPE_MAX = MHI_ER_TSYNC_ELEMENT_TYPE, +}; + +enum mhi_ch_ee_mask { + MHI_CH_EE_PBL = BIT(MHI_EE_PBL), + MHI_CH_EE_SBL = BIT(MHI_EE_SBL), + MHI_CH_EE_AMSS = BIT(MHI_EE_AMSS), + MHI_CH_EE_RDDM = BIT(MHI_EE_RDDM), + MHI_CH_EE_PTHRU = BIT(MHI_EE_PTHRU), + MHI_CH_EE_WFW = BIT(MHI_EE_WFW), + MHI_CH_EE_EDL = BIT(MHI_EE_EDL), +}; + +enum mhi_ch_type { + MHI_CH_TYPE_INVALID = 0, + MHI_CH_TYPE_OUTBOUND = DMA_TO_DEVICE, + MHI_CH_TYPE_INBOUND = DMA_FROM_DEVICE, + MHI_CH_TYPE_INBOUND_COALESCED = 3, +}; + +struct db_cfg { + bool reset_req; + bool db_mode; + u32 pollcfg; + enum MHI_BRSTMODE brstmode; + dma_addr_t db_val; + void (*process_db)(struct mhi_controller *mhi_cntrl, + struct db_cfg *db_cfg, void __iomem *io_addr, + dma_addr_t db_val); +}; + +struct mhi_pm_transitions { + enum MHI_PM_STATE from_state; + u32 to_states; +}; + +struct state_transition { + struct list_head node; + enum MHI_ST_TRANSITION state; +}; + +/* Control Segment */ +struct mhi_ctrl_seg +{ + struct mhi_tre hw_in_chan_ring[NUM_MHI_IPA_IN_RING_ELEMENTS] __packed __aligned(NUM_MHI_IPA_IN_RING_ELEMENTS*16); + struct mhi_tre hw_out_chan_ring[NUM_MHI_IPA_OUT_RING_ELEMENTS] __packed __aligned(NUM_MHI_IPA_OUT_RING_ELEMENTS*16); +#ifdef ENABLE_IP_SW0 + struct mhi_tre sw_in_chan_ring[NUM_MHI_SW_IP_RING_ELEMENTS] __packed __aligned(NUM_MHI_IPA_IN_RING_ELEMENTS*16); + struct mhi_tre sw_out_chan_ring[NUM_MHI_SW_IP_RING_ELEMENTS] __packed __aligned(NUM_MHI_IPA_OUT_RING_ELEMENTS*16); +#endif + struct mhi_tre diag_in_chan_ring[NUM_MHI_DIAG_IN_RING_ELEMENTS] __packed __aligned(NUM_MHI_IPA_OUT_RING_ELEMENTS*16); + struct mhi_tre chan_ring[NUM_MHI_CHAN_RING_ELEMENTS*2*12] __packed __aligned(NUM_MHI_CHAN_RING_ELEMENTS*16); + struct mhi_tre event_ring[NUM_MHI_EVT_RINGS][NUM_MHI_EVT_RING_ELEMENTS] __packed __aligned(NUM_MHI_EVT_RING_ELEMENTS*16); + struct mhi_tre cmd_ring[NR_OF_CMD_RINGS][CMD_EL_PER_RING] __packed __aligned(CMD_EL_PER_RING*16); + + struct mhi_chan_ctxt chan_ctxt[NUM_MHI_XFER_RINGS] __aligned(128); + struct mhi_event_ctxt er_ctxt[NUM_MHI_EVT_RINGS] __aligned(128); + struct mhi_cmd_ctxt cmd_ctxt[NR_OF_CMD_RINGS] __aligned(128); +} __aligned(4096); + +struct mhi_ctxt { + struct mhi_event_ctxt *er_ctxt; + struct mhi_chan_ctxt *chan_ctxt; + struct mhi_cmd_ctxt *cmd_ctxt; + dma_addr_t er_ctxt_addr; + dma_addr_t chan_ctxt_addr; + dma_addr_t cmd_ctxt_addr; + struct mhi_ctrl_seg *ctrl_seg; + dma_addr_t ctrl_seg_addr; +}; + +struct mhi_ring { + dma_addr_t dma_handle; + dma_addr_t iommu_base; + u64 *ctxt_wp; /* point to ctxt wp */ + void *pre_aligned; + void *base; + void *rp; + void *wp; + size_t el_size; + size_t len; + size_t elements; + size_t alloc_size; + void __iomem *db_addr; +}; + +struct mhi_cmd { + struct mhi_ring ring; + spinlock_t lock; +}; + +struct mhi_buf_info { + dma_addr_t p_addr; + void *v_addr; + void *bb_addr; + void *wp; + size_t len; + void *cb_buf; + bool used; /* indicate element is free to use */ + bool pre_mapped; /* already pre-mapped by client */ + enum dma_data_direction dir; +}; + +struct mhi_event { + u32 er_index; + u32 intmod; + u32 msi; + int chan; /* this event ring is dedicated to a channel */ + u32 priority; + enum mhi_er_data_type data_type; + struct mhi_ring ring; + struct db_cfg db_cfg; + u32 used_elements; + bool hw_ring; + bool cl_manage; + bool offload_ev; /* managed by a device driver */ + spinlock_t lock; + struct mhi_chan *mhi_chan; /* dedicated to channel */ + struct tasklet_struct task; + int (*process_event)(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, + u32 event_quota); + struct mhi_controller *mhi_cntrl; +}; + +struct mhi_chan { + u32 chan; + const char *name; + /* + * important, when consuming increment tre_ring first, when releasing + * decrement buf_ring first. If tre_ring has space, buf_ring + * guranteed to have space so we do not need to check both rings. + */ + struct mhi_ring buf_ring; + struct mhi_ring tre_ring; + + u32 used_elements; + u32 used_events[MHI_EV_CC_DB_MODE+1]; + + u32 er_index; + u32 intmod; + enum mhi_ch_type type; + enum dma_data_direction dir; + struct db_cfg db_cfg; + u32 ee_mask; + enum MHI_XFER_TYPE xfer_type; + enum MHI_CH_STATE ch_state; + enum MHI_EV_CCS ccs; + bool lpm_notify; + bool configured; + bool offload_ch; + bool pre_alloc; + bool auto_start; + bool wake_capable; /* channel should wake up system */ + /* functions that generate the transfer ring elements */ + int (*gen_tre)(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan, void *buf, void *cb, + size_t len, enum MHI_FLAGS flags); + int (*queue_xfer)(struct mhi_device *mhi_dev, + struct mhi_chan *mhi_chan, void *buf, + size_t len, enum MHI_FLAGS flags); + /* xfer call back */ + struct mhi_device *mhi_dev; + void (*xfer_cb)(struct mhi_device *mhi_dev, struct mhi_result *result); + struct mutex mutex; + struct completion completion; + rwlock_t lock; + u32 ring; + u32 tiocm; + struct list_head node; +}; + +struct tsync_node { + struct list_head node; + u32 sequence; + u64 local_time; + u64 remote_time; + struct mhi_device *mhi_dev; + void (*cb_func)(struct mhi_device *mhi_dev, u32 sequence, + u64 local_time, u64 remote_time); +}; + +struct mhi_timesync { + u32 er_index; + void __iomem *db; + void __iomem *time_reg; + enum MHI_EV_CCS ccs; + struct completion completion; + spinlock_t lock; /* list protection */ + struct mutex lpm_mutex; /* lpm protection */ + struct list_head head; +}; + +struct mhi_bus { + struct list_head controller_list; + struct mutex lock; + struct dentry *dentry; +}; + +/* default MHI timeout */ +#define MHI_TIMEOUT_MS (3000) +extern struct mhi_bus mhi_bus; + +/* debug fs related functions */ +int mhi_debugfs_mhi_chan_show(struct seq_file *m, void *d); +int mhi_debugfs_mhi_event_show(struct seq_file *m, void *d); +int mhi_debugfs_mhi_states_show(struct seq_file *m, void *d); +int mhi_debugfs_trigger_reset(void *data, u64 val); + +void mhi_deinit_debugfs(struct mhi_controller *mhi_cntrl); +void mhi_init_debugfs(struct mhi_controller *mhi_cntrl); + +/* power management apis */ +enum MHI_PM_STATE __must_check mhi_tryset_pm_state( + struct mhi_controller *mhi_cntrl, + enum MHI_PM_STATE state); +const char *to_mhi_pm_state_str(enum MHI_PM_STATE state); +void mhi_reset_chan(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan); +enum mhi_ee mhi_get_exec_env(struct mhi_controller *mhi_cntrl); +int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl, + enum MHI_ST_TRANSITION state); +void mhi_pm_st_worker(struct work_struct *work); +void mhi_fw_load_worker(struct work_struct *work); +void mhi_pm_sys_err_worker(struct work_struct *work); +void mhi_pm_ready_worker(struct work_struct *work); +int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl); +void mhi_ctrl_ev_task(unsigned long data); +int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl); +void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl); +int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl); +void mhi_notify(struct mhi_device *mhi_dev, enum MHI_CB cb_reason); +int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, u32 event_quota); +int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, u32 event_quota); +int mhi_process_tsync_event_ring(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, u32 event_quota); +int mhi_send_cmd(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, + enum MHI_CMD cmd); +int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl); + +/* queue transfer buffer */ +int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, + void *buf, void *cb, size_t buf_len, enum MHI_FLAGS flags); +int mhi_queue_buf(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan, + void *buf, size_t len, enum MHI_FLAGS mflags); +int mhi_queue_skb(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan, + void *buf, size_t len, enum MHI_FLAGS mflags); +int mhi_queue_sclist(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan, + void *buf, size_t len, enum MHI_FLAGS mflags); +int mhi_queue_nop(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan, + void *buf, size_t len, enum MHI_FLAGS mflags); +int mhi_queue_dma(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan, + void *buf, size_t len, enum MHI_FLAGS mflags); + +/* register access methods */ +void mhi_db_brstmode(struct mhi_controller *mhi_cntrl, struct db_cfg *db_cfg, + void __iomem *db_addr, dma_addr_t wp); +void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl, + struct db_cfg *db_mode, void __iomem *db_addr, + dma_addr_t wp); +int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl, + void __iomem *base, u32 offset, u32 *out); +int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl, + void __iomem *base, u32 offset, u32 mask, + u32 shift, u32 *out); +void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base, + u32 offset, u32 val); +void mhi_write_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base, + u32 offset, u32 mask, u32 shift, u32 val); +void mhi_ring_er_db(struct mhi_event *mhi_event); +void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr, + dma_addr_t wp); +void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd); +void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan); +int mhi_get_capability_offset(struct mhi_controller *mhi_cntrl, u32 capability, + u32 *offset); +int mhi_init_timesync(struct mhi_controller *mhi_cntrl); +int mhi_create_timesync_sysfs(struct mhi_controller *mhi_cntrl); +void mhi_destroy_timesync(struct mhi_controller *mhi_cntrl); + +/* memory allocation methods */ +static inline void *mhi_alloc_coherent(struct mhi_controller *mhi_cntrl, + size_t size, + dma_addr_t *dma_handle, + gfp_t gfp) +{ +#if (LINUX_VERSION_CODE < KERNEL_VERSION( 5,0,0 )) + void *buf = dma_zalloc_coherent(mhi_cntrl->dev, size, dma_handle, gfp); +#else + void *buf = dma_alloc_coherent(mhi_cntrl->dev, size, dma_handle, gfp | __GFP_ZERO); +#endif + + MHI_LOG("size = %zd, dma_handle = %llx\n", size, (u64)*dma_handle); + if (buf) + atomic_add(size, &mhi_cntrl->alloc_size); + + return buf; +} +static inline void mhi_free_coherent(struct mhi_controller *mhi_cntrl, + size_t size, + void *vaddr, + dma_addr_t dma_handle) +{ + atomic_sub(size, &mhi_cntrl->alloc_size); + dma_free_coherent(mhi_cntrl->dev, size, vaddr, dma_handle); +} +struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl); +static inline void mhi_dealloc_device(struct mhi_controller *mhi_cntrl, + struct mhi_device *mhi_dev) +{ + kfree(mhi_dev); +} +int mhi_destroy_device(struct device *dev, void *data); +void mhi_create_devices(struct mhi_controller *mhi_cntrl); +int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl, + struct image_info **image_info, size_t alloc_size); +void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl, + struct image_info *image_info); + +int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info); +int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info); +void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info); +void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info); + +/* initialization methods */ +int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan); +void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan); +int mhi_init_mmio(struct mhi_controller *mhi_cntrl); +int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl); +void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl); +int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl); +void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl); +int mhi_dtr_init(void); + +/* isr handlers */ +irqreturn_t mhi_one_msi_handlr(int irq_number, void *dev); +irqreturn_t mhi_msi_handlr(int irq_number, void *dev); +irqreturn_t mhi_intvec_threaded_handlr(int irq_number, void *dev); +irqreturn_t mhi_intvec_handlr(int irq_number, void *dev); +void mhi_ev_task(unsigned long data); + +#ifdef CONFIG_MHI_DEBUG + +#define MHI_ASSERT(cond, msg) do { \ + if (cond) \ + panic(msg); \ +} while (0) + +#else + +#define MHI_ASSERT(cond, msg) do { \ + if (cond) { \ + MHI_ERR(msg); \ + WARN_ON(cond); \ + } \ +} while (0) + +#endif + +#endif /* _MHI_INT_H */ diff --git a/package/wwan/driver/quectel_MHI/src/core/mhi_main.c b/package/wwan/driver/quectel_MHI/src/core/mhi_main.c new file mode 100644 index 000000000..335b9454e --- /dev/null +++ b/package/wwan/driver/quectel_MHI/src/core/mhi_main.c @@ -0,0 +1,2722 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "mhi.h" +#include "mhi_internal.h" + +static void __mhi_unprepare_channel(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan); + +int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl, + void __iomem *base, + u32 offset, + u32 *out) +{ + u32 tmp = readl_relaxed(base + offset); + + /* unexpected value, query the link status */ + if (PCI_INVALID_READ(tmp) && + mhi_cntrl->link_status(mhi_cntrl, mhi_cntrl->priv_data)) + return -EIO; + + *out = tmp; + + return 0; +} + +int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl, + void __iomem *base, + u32 offset, + u32 mask, + u32 shift, + u32 *out) +{ + u32 tmp; + int ret; + + ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp); + if (ret) + return ret; + + *out = (tmp & mask) >> shift; + + return 0; +} + +int mhi_get_capability_offset(struct mhi_controller *mhi_cntrl, + u32 capability, + u32 *offset) +{ + u32 cur_cap, next_offset; + int ret; + + /* get the 1st supported capability offset */ + ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MISC_OFFSET, + MISC_CAP_MASK, MISC_CAP_SHIFT, offset); + if (ret) + return ret; + if (*offset >= 0x1000) + return -ENXIO; + do { + ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, *offset, + CAP_CAPID_MASK, CAP_CAPID_SHIFT, + &cur_cap); + if (ret) + return ret; + + if (cur_cap == capability) + return 0; + + ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, *offset, + CAP_NEXT_CAP_MASK, CAP_NEXT_CAP_SHIFT, + &next_offset); + if (ret) + return ret; + + *offset += next_offset; + } while (next_offset); + + return -ENXIO; +} + +void mhi_write_reg(struct mhi_controller *mhi_cntrl, + void __iomem *base, + u32 offset, + u32 val) +{ + writel_relaxed(val, base + offset); +} + +void mhi_write_reg_field(struct mhi_controller *mhi_cntrl, + void __iomem *base, + u32 offset, + u32 mask, + u32 shift, + u32 val) +{ + int ret; + u32 tmp; + + ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp); + if (ret) + return; + + tmp &= ~mask; + tmp |= (val << shift); + mhi_write_reg(mhi_cntrl, base, offset, tmp); +} + +void mhi_write_db(struct mhi_controller *mhi_cntrl, + void __iomem *db_addr, + dma_addr_t wp) +{ + mhi_write_reg(mhi_cntrl, db_addr, 4, upper_32_bits(wp)); + mhi_write_reg(mhi_cntrl, db_addr, 0, lower_32_bits(wp)); +#if 0 //carl.yin 20190527 for debug + if ((lower_32_bits(db_addr)&0xFFF) != 0x620) + { + u32 out = 0; + int ret = mhi_read_reg(mhi_cntrl, db_addr, 0, &out); + if (out != lower_32_bits(wp)) + MHI_ERR("%s db=%x, wp=w:%x - r:%x, ret=%d\n", __func__, lower_32_bits(db_addr), lower_32_bits(wp), out, ret); + } +#endif +} + +void mhi_db_brstmode(struct mhi_controller *mhi_cntrl, + struct db_cfg *db_cfg, + void __iomem *db_addr, + dma_addr_t wp) +{ + if (db_cfg->db_mode) { + db_cfg->db_val = wp; + mhi_write_db(mhi_cntrl, db_addr, wp); + db_cfg->db_mode = false; + } +} + +void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl, + struct db_cfg *db_cfg, + void __iomem *db_addr, + dma_addr_t wp) +{ + db_cfg->db_val = wp; + mhi_write_db(mhi_cntrl, db_addr, wp); +} + +void mhi_ring_er_db(struct mhi_event *mhi_event) +{ + struct mhi_ring *ring = &mhi_event->ring; + + mhi_event->db_cfg.process_db(mhi_event->mhi_cntrl, &mhi_event->db_cfg, + ring->db_addr, *ring->ctxt_wp); +} + +void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd) +{ + dma_addr_t db; + struct mhi_ring *ring = &mhi_cmd->ring; + + db = ring->iommu_base + (ring->wp - ring->base); + *ring->ctxt_wp = db; + mhi_write_db(mhi_cntrl, ring->db_addr, db); +} + +//#define DEBUG_CHAN100_DB +#ifdef DEBUG_CHAN100_DB +static atomic_t chan100_seq = ATOMIC_INIT(0); +#define CHAN100_SIZE 0x1000 +static unsigned int chan100_t[CHAN100_SIZE]; +#endif + +void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan) +{ + struct mhi_ring *ring = &mhi_chan->tre_ring; + dma_addr_t db; + + db = ring->iommu_base + (ring->wp - ring->base); + /* + * Writes to the new ring element must be visible to the hardware + * before letting h/w know there is new element to fetch. + */ + dma_wmb(); + *ring->ctxt_wp = db; + mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg, ring->db_addr, + db); +} + +enum mhi_ee mhi_get_exec_env(struct mhi_controller *mhi_cntrl) +{ + u32 exec; + int ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_EXECENV, &exec); + + return (ret) ? MHI_EE_MAX : exec; +} + +enum mhi_dev_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl) +{ + u32 state; + int ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS, + MHISTATUS_MHISTATE_MASK, + MHISTATUS_MHISTATE_SHIFT, &state); + return ret ? MHI_STATE_MAX : state; +} + +int mhi_queue_sclist(struct mhi_device *mhi_dev, + struct mhi_chan *mhi_chan, + void *buf, + size_t len, + enum MHI_FLAGS mflags) +{ + return -EINVAL; +} + +int mhi_queue_nop(struct mhi_device *mhi_dev, + struct mhi_chan *mhi_chan, + void *buf, + size_t len, + enum MHI_FLAGS mflags) +{ + return -EINVAL; +} + +static void mhi_add_ring_element(struct mhi_controller *mhi_cntrl, + struct mhi_ring *ring) +{ + void *wp = ring->wp; + wp += ring->el_size; + if (wp >= (ring->base + ring->len)) + wp = ring->base; + ring->wp = wp; + /* smp update */ + smp_wmb(); +} + +static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl, + struct mhi_ring *ring) +{ + void *rp = ring->rp; + rp += ring->el_size; + if (rp >= (ring->base + ring->len)) + rp = ring->base; + ring->rp = rp; + /* smp update */ + smp_wmb(); +} + +static int get_nr_avail_ring_elements(struct mhi_controller *mhi_cntrl, + struct mhi_ring *ring) +{ + int nr_el; + + if (ring->wp < ring->rp) + nr_el = ((ring->rp - ring->wp) / ring->el_size) - 1; + else { + nr_el = (ring->rp - ring->base) / ring->el_size; + nr_el += ((ring->base + ring->len - ring->wp) / + ring->el_size) - 1; + } + return nr_el; +} + +static u32 get_used_ring_elements(void *rp, void *wp, u32 el_num) +{ + u32 nr_el; + + if (wp >= rp) + nr_el = (wp - rp)/sizeof(struct mhi_tre); + else { + nr_el = (rp - wp)/sizeof(struct mhi_tre); + nr_el = el_num - nr_el; + } + return nr_el; +} + +static void *mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr) +{ + return (addr - ring->iommu_base) + ring->base; +} + +dma_addr_t mhi_to_physical(struct mhi_ring *ring, void *addr) +{ + return (addr - ring->base) + ring->iommu_base; +} + +static void mhi_recycle_ev_ring_element(struct mhi_controller *mhi_cntrl, + struct mhi_ring *ring) +{ + void *rp, *wp; + + /* update the WP */ + wp = ring->wp; + wp += ring->el_size; + if (wp >= (ring->base + ring->len)) { + wp = ring->base; + } + ring->wp = wp; + + *ring->ctxt_wp = ring->iommu_base + (ring->wp - ring->base); + + /* update the RP */ + rp = ring->rp; + rp += ring->el_size; + if (rp >= (ring->base + ring->len)) + rp = ring->base; + ring->rp = rp; + + /* visible to other cores */ + smp_wmb(); +} + +static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl, + struct mhi_ring *ring) +{ + void *tmp = ring->wp + ring->el_size; + + if (tmp >= (ring->base + ring->len)) + tmp = ring->base; + + return (tmp == ring->rp); +} + +int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info) +{ + buf_info->p_addr = dma_map_single(mhi_cntrl->dev, buf_info->v_addr, + buf_info->len, buf_info->dir); + if (dma_mapping_error(mhi_cntrl->dev, buf_info->p_addr)) + return -ENOMEM; + + return 0; +} + +int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info) +{ + void *buf = mhi_alloc_coherent(mhi_cntrl, buf_info->len, + &buf_info->p_addr, GFP_ATOMIC); + + if (!buf) + return -ENOMEM; + + if (buf_info->dir == DMA_TO_DEVICE) + memcpy(buf, buf_info->v_addr, buf_info->len); + + buf_info->bb_addr = buf; + + return 0; +} + +void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info) +{ + dma_unmap_single(mhi_cntrl->dev, buf_info->p_addr, buf_info->len, + buf_info->dir); +} + +void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info) +{ + if (buf_info->dir == DMA_FROM_DEVICE) + memcpy(buf_info->v_addr, buf_info->bb_addr, buf_info->len); + + mhi_free_coherent(mhi_cntrl, buf_info->len, buf_info->bb_addr, + buf_info->p_addr); +} + +#ifdef ENABLE_MHI_MON +static void mon_bus_submit(struct mhi_controller *mbus, u32 chan, dma_addr_t wp, struct mhi_tre *mhi_tre, void *buf, size_t len) +{ + unsigned long flags; + struct list_head *pos; + struct mon_reader *r; + + spin_lock_irqsave(&mbus->lock, flags); + mbus->cnt_events++; + list_for_each (pos, &mbus->r_list) { + r = list_entry(pos, struct mon_reader, r_link); + r->rnf_submit(r->r_data, chan, wp, mhi_tre, buf, len); + } + spin_unlock_irqrestore(&mbus->lock, flags); +} + +static void mon_bus_receive(struct mhi_controller *mbus, u32 chan, dma_addr_t wp, struct mhi_tre *mhi_tre, void *buf, size_t len) +{ + unsigned long flags; + struct list_head *pos; + struct mon_reader *r; + + spin_lock_irqsave(&mbus->lock, flags); + mbus->cnt_events++; + list_for_each (pos, &mbus->r_list) { + r = list_entry(pos, struct mon_reader, r_link); + r->rnf_receive(r->r_data, chan, wp, mhi_tre, buf, len); + } + spin_unlock_irqrestore(&mbus->lock, flags); +} + +static void mon_bus_complete(struct mhi_controller *mbus, u32 chan, dma_addr_t wp, struct mhi_tre *mhi_tre) +{ + unsigned long flags; + struct list_head *pos; + struct mon_reader *r; + + spin_lock_irqsave(&mbus->lock, flags); + mbus->cnt_events++; + list_for_each (pos, &mbus->r_list) { + r = list_entry(pos, struct mon_reader, r_link); + r->rnf_complete(r->r_data, chan, wp, mhi_tre); + } + spin_unlock_irqrestore(&mbus->lock, flags); +} +#endif + +int mhi_queue_skb(struct mhi_device *mhi_dev, + struct mhi_chan *mhi_chan, + void *buf, + size_t len, + enum MHI_FLAGS mflags) +{ + struct sk_buff *skb = buf; + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_ring *tre_ring = &mhi_chan->tre_ring; + struct mhi_ring *buf_ring = &mhi_chan->buf_ring; + struct mhi_buf_info *buf_info; + struct mhi_tre *mhi_tre; + bool assert_wake = false; + int ret; + + if (mhi_is_ring_full(mhi_cntrl, tre_ring)) + return -ENOMEM; + + read_lock_bh(&mhi_cntrl->pm_lock); + if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) { + MHI_VERB("MHI is not in activate state, pm_state:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + read_unlock_bh(&mhi_cntrl->pm_lock); + + return -EIO; + } + + /* we're in M3 or transitioning to M3 */ + if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) { + mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data); + mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data); + } + + /* + * For UL channels always assert WAKE until work is done, + * For DL channels only assert if MHI is in a LPM + */ + if (mhi_chan->dir == DMA_TO_DEVICE || + (mhi_chan->dir == DMA_FROM_DEVICE && + mhi_cntrl->pm_state != MHI_PM_M0)) { + assert_wake = true; + mhi_cntrl->wake_get(mhi_cntrl, false); + } + + /* generate the tre */ + buf_info = buf_ring->wp; + buf_info->v_addr = skb->data; + buf_info->cb_buf = skb; + buf_info->wp = tre_ring->wp; + buf_info->dir = mhi_chan->dir; + buf_info->len = len; + ret = mhi_cntrl->map_single(mhi_cntrl, buf_info); + if (ret) + goto map_error; + + mhi_tre = tre_ring->wp; + + mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr); + mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_info->len); + mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(1, 1, 0, 0); + +#ifdef ENABLE_MHI_MON + if (mhi_cntrl->nreaders) { + mon_bus_submit(mhi_cntrl, mhi_chan->chan, + mhi_to_physical(tre_ring, mhi_tre), mhi_tre, buf_info->v_addr, mhi_chan->chan&0x1 ? 0 : buf_info->len); + } +#endif + + MHI_VERB("chan:%d WP:0x%llx TRE:0x%llx 0x%08x 0x%08x\n", mhi_chan->chan, + (u64)mhi_to_physical(tre_ring, mhi_tre), mhi_tre->ptr, + mhi_tre->dword[0], mhi_tre->dword[1]); + + if (mhi_chan->dir == DMA_TO_DEVICE) { + if (atomic_inc_return(&mhi_cntrl->pending_pkts) == 1) + mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data); + } + + read_lock_bh(&mhi_chan->lock); + /* increment WP */ + mhi_add_ring_element(mhi_cntrl, tre_ring); + mhi_add_ring_element(mhi_cntrl, buf_ring); + +#ifdef DEBUG_CHAN100_DB + if (mhi_chan->chan == 100) { + chan100_t[atomic_inc_return(&chan100_seq)&(CHAN100_SIZE-1)] = (((unsigned long)tre_ring->wp)&0xffff) | (mhi_chan->db_cfg.db_mode<<31) | (0<<30); + } +#endif + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state))) { + mhi_ring_chan_db(mhi_cntrl, mhi_chan); + } + read_unlock_bh(&mhi_chan->lock); + + if (mhi_chan->dir == DMA_FROM_DEVICE && assert_wake) + mhi_cntrl->wake_put(mhi_cntrl, true); + + read_unlock_bh(&mhi_cntrl->pm_lock); + + return 0; + +map_error: + if (assert_wake) + mhi_cntrl->wake_put(mhi_cntrl, false); + + read_unlock_bh(&mhi_cntrl->pm_lock); + + return ret; +} + +int mhi_queue_dma(struct mhi_device *mhi_dev, + struct mhi_chan *mhi_chan, + void *buf, + size_t len, + enum MHI_FLAGS mflags) +{ + struct mhi_buf *mhi_buf = buf; + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_ring *tre_ring = &mhi_chan->tre_ring; + struct mhi_ring *buf_ring = &mhi_chan->buf_ring; + struct mhi_buf_info *buf_info; + struct mhi_tre *mhi_tre; + bool assert_wake = false; + + if (mhi_is_ring_full(mhi_cntrl, tre_ring)) + return -ENOMEM; + + read_lock_bh(&mhi_cntrl->pm_lock); + if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) { + MHI_VERB("MHI is not in activate state, pm_state:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + read_unlock_bh(&mhi_cntrl->pm_lock); + + return -EIO; + } + + /* we're in M3 or transitioning to M3 */ + if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) { + mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data); + mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data); + } + + /* + * For UL channels always assert WAKE until work is done, + * For DL channels only assert if MHI is in a LPM + */ + if (mhi_chan->dir == DMA_TO_DEVICE || + (mhi_chan->dir == DMA_FROM_DEVICE && + mhi_cntrl->pm_state != MHI_PM_M0)) { + assert_wake = true; + mhi_cntrl->wake_get(mhi_cntrl, false); + } + + /* generate the tre */ + buf_info = buf_ring->wp; + MHI_ASSERT(buf_info->used, "TRE Not Freed\n"); + buf_info->p_addr = mhi_buf->dma_addr; + buf_info->pre_mapped = true; + buf_info->cb_buf = mhi_buf; + buf_info->wp = tre_ring->wp; + buf_info->dir = mhi_chan->dir; + buf_info->len = len; + + mhi_tre = tre_ring->wp; + + if (mhi_chan->xfer_type == MHI_XFER_RSC_DMA) { + buf_info->used = true; + mhi_tre->ptr = + MHI_RSCTRE_DATA_PTR(buf_info->p_addr, buf_info->len); + mhi_tre->dword[0] = + MHI_RSCTRE_DATA_DWORD0(buf_ring->wp - buf_ring->base); + mhi_tre->dword[1] = MHI_RSCTRE_DATA_DWORD1; + } else { + mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr); + mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_info->len); + mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(1, 1, 0, 0); + } + +#ifdef ENABLE_MHI_MON + if (mhi_cntrl->nreaders) { + mon_bus_submit(mhi_cntrl, mhi_chan->chan, + mhi_to_physical(tre_ring, mhi_tre), mhi_tre, buf_info->v_addr, mhi_chan->chan&0x1 ? 0: buf_info->len); + } +#endif + + MHI_VERB("chan:%d WP:0x%llx TRE:0x%llx 0x%08x 0x%08x\n", mhi_chan->chan, + (u64)mhi_to_physical(tre_ring, mhi_tre), mhi_tre->ptr, + mhi_tre->dword[0], mhi_tre->dword[1]); + + if (mhi_chan->dir == DMA_TO_DEVICE) { + if (atomic_inc_return(&mhi_cntrl->pending_pkts) == 1) + mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data); + } + + read_lock_bh(&mhi_chan->lock); + /* increment WP */ + mhi_add_ring_element(mhi_cntrl, tre_ring); + mhi_add_ring_element(mhi_cntrl, buf_ring); + + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state))) { + mhi_ring_chan_db(mhi_cntrl, mhi_chan); + } + read_unlock_bh(&mhi_chan->lock); + + if (mhi_chan->dir == DMA_FROM_DEVICE && assert_wake) + mhi_cntrl->wake_put(mhi_cntrl, true); + + read_unlock_bh(&mhi_cntrl->pm_lock); + + return 0; +} + +int mhi_gen_tre(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan, + void *buf, + void *cb, + size_t buf_len, + enum MHI_FLAGS flags) +{ + struct mhi_ring *buf_ring, *tre_ring; + struct mhi_tre *mhi_tre; + struct mhi_buf_info *buf_info; + int eot, eob, chain, bei; + int ret; + + buf_ring = &mhi_chan->buf_ring; + tre_ring = &mhi_chan->tre_ring; + + buf_info = buf_ring->wp; + buf_info->v_addr = buf; + buf_info->cb_buf = cb; + buf_info->wp = tre_ring->wp; + buf_info->dir = mhi_chan->dir; + buf_info->len = buf_len; + + ret = mhi_cntrl->map_single(mhi_cntrl, buf_info); + if (ret) + return ret; + + eob = !!(flags & MHI_EOB); + eot = !!(flags & MHI_EOT); + chain = !!(flags & MHI_CHAIN); + bei = !!(mhi_chan->intmod); + + mhi_tre = tre_ring->wp; + mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr); + mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_len); + mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(bei, eot, eob, chain); + +#ifdef ENABLE_MHI_MON + if (mhi_cntrl->nreaders) { + mon_bus_submit(mhi_cntrl, mhi_chan->chan, + mhi_to_physical(tre_ring, mhi_tre), mhi_tre, buf_info->v_addr, mhi_chan->chan&0x1 ? 0 : buf_info->len); + } +#endif + MHI_VERB("chan:%d WP:0x%llx TRE:0x%llx 0x%08x 0x%08x\n", mhi_chan->chan, + (u64)mhi_to_physical(tre_ring, mhi_tre), mhi_tre->ptr, + mhi_tre->dword[0], mhi_tre->dword[1]); + + /* increment WP */ + mhi_add_ring_element(mhi_cntrl, tre_ring); + mhi_add_ring_element(mhi_cntrl, buf_ring); + + return 0; +} + +int mhi_queue_buf(struct mhi_device *mhi_dev, + struct mhi_chan *mhi_chan, + void *buf, + size_t len, + enum MHI_FLAGS mflags) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_ring *tre_ring; + unsigned long flags; + bool assert_wake = false; + int ret; + + /* + * this check here only as a guard, it's always + * possible mhi can enter error while executing rest of function, + * which is not fatal so we do not need to hold pm_lock + */ + if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) { + MHI_VERB("MHI is not in active state, pm_state:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + + return -EIO; + } + + tre_ring = &mhi_chan->tre_ring; + if (mhi_is_ring_full(mhi_cntrl, tre_ring)) + return -ENOMEM; + + ret = mhi_chan->gen_tre(mhi_cntrl, mhi_chan, buf, buf, len, mflags); + if (unlikely(ret)) + return ret; + + read_lock_irqsave(&mhi_cntrl->pm_lock, flags); + + /* we're in M3 or transitioning to M3 */ + if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) { + mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data); + mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data); + } + + /* + * For UL channels always assert WAKE until work is done, + * For DL channels only assert if MHI is in a LPM + */ + if (mhi_chan->dir == DMA_TO_DEVICE || + (mhi_chan->dir == DMA_FROM_DEVICE && + mhi_cntrl->pm_state != MHI_PM_M0)) { + assert_wake = true; + mhi_cntrl->wake_get(mhi_cntrl, false); + } + + if (mhi_chan->dir == DMA_TO_DEVICE) { + if (atomic_inc_return(&mhi_cntrl->pending_pkts) == 1) + mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data); + } + + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state))) { + unsigned long flags; + + read_lock_irqsave(&mhi_chan->lock, flags); + mhi_ring_chan_db(mhi_cntrl, mhi_chan); + read_unlock_irqrestore(&mhi_chan->lock, flags); + } + + if (mhi_chan->dir == DMA_FROM_DEVICE && assert_wake) + mhi_cntrl->wake_put(mhi_cntrl, true); + + read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags); + + return 0; +} + +static ssize_t ul_chan_id_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct mhi_device *mhi_dev = to_mhi_device(dev); + + return snprintf(buf, PAGE_SIZE, "%d\n", mhi_dev->ul_chan_id); +} + +static DEVICE_ATTR_RO(ul_chan_id); + +static struct attribute *mhi_dev_attrs[] = { + &dev_attr_ul_chan_id.attr, + NULL, +}; + +static struct attribute_group mhi_dev_attr_group = { + .attrs = mhi_dev_attrs, +}; + +/* destroy specific device */ +int mhi_destroy_device(struct device *dev, void *data) +{ + struct mhi_device *mhi_dev; + struct mhi_controller *mhi_cntrl; + + if (dev->bus != &mhi_bus_type) + return 0; + + mhi_dev = to_mhi_device(dev); + mhi_cntrl = mhi_dev->mhi_cntrl; + + /* only destroying virtual devices thats attached to bus */ + if (mhi_dev->dev_type == MHI_CONTROLLER_TYPE) + return 0; + + MHI_LOG("destroy device for chan:%s\n", mhi_dev->chan_name); + + sysfs_remove_group(&mhi_dev->dev.kobj, &mhi_dev_attr_group); + /* notify the client and remove the device from mhi bus */ + device_del(dev); + put_device(dev); + + return 0; +} + +void mhi_notify(struct mhi_device *mhi_dev, enum MHI_CB cb_reason) +{ + struct mhi_driver *mhi_drv; + + if (!mhi_dev->dev.driver) + return; + + mhi_drv = to_mhi_driver(mhi_dev->dev.driver); + + if (mhi_drv->status_cb) + mhi_drv->status_cb(mhi_dev, cb_reason); +} + +static void mhi_assign_of_node(struct mhi_controller *mhi_cntrl, + struct mhi_device *mhi_dev) +{ + struct device_node *controller, *node; + const char *dt_name; + int ret; + + controller = of_find_node_by_name(mhi_cntrl->of_node, "mhi_devices"); + if (!controller) + return; + + for_each_available_child_of_node(controller, node) { + ret = of_property_read_string(node, "mhi,chan", &dt_name); + if (ret) + continue; + if (!strcmp(mhi_dev->chan_name, dt_name)) { + mhi_dev->dev.of_node = node; + break; + } + } +} + +static ssize_t time_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + u64 t_host, t_device; + int ret; + + ret = mhi_get_remote_time_sync(mhi_dev, &t_host, &t_device); + if (ret) { + MHI_ERR("Failed to obtain time, ret:%d\n", ret); + return ret; + } + + return scnprintf(buf, PAGE_SIZE, "local: %llu remote: %llu (ticks)\n", + t_host, t_device); +} +static DEVICE_ATTR_RO(time); + +static ssize_t time_us_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + u64 t_host, t_device; + int ret; + + ret = mhi_get_remote_time_sync(mhi_dev, &t_host, &t_device); + if (ret) { + MHI_ERR("Failed to obtain time, ret:%d\n", ret); + return ret; + } + + return scnprintf(buf, PAGE_SIZE, "local: %llu remote: %llu (us)\n", + TIME_TICKS_TO_US(t_host), TIME_TICKS_TO_US(t_device)); +} +static DEVICE_ATTR_RO(time_us); + +static struct attribute *mhi_tsync_attrs[] = { + &dev_attr_time.attr, + &dev_attr_time_us.attr, + NULL, +}; + +static const struct attribute_group mhi_tsync_group = { + .attrs = mhi_tsync_attrs, +}; + +void mhi_destroy_timesync(struct mhi_controller *mhi_cntrl) +{ + if (mhi_cntrl->mhi_tsync) { + sysfs_remove_group(&mhi_cntrl->mhi_dev->dev.kobj, + &mhi_tsync_group); + kfree(mhi_cntrl->mhi_tsync); + mhi_cntrl->mhi_tsync = NULL; + } +} + +int mhi_create_timesync_sysfs(struct mhi_controller *mhi_cntrl) +{ + return sysfs_create_group(&mhi_cntrl->mhi_dev->dev.kobj, + &mhi_tsync_group); +} + +static void mhi_create_time_sync_dev(struct mhi_controller *mhi_cntrl) +{ + struct mhi_device *mhi_dev; + int ret; + + if (!MHI_IN_MISSION_MODE(mhi_cntrl->ee)) + return; + + mhi_dev = mhi_alloc_device(mhi_cntrl); + if (!mhi_dev) + return; + + mhi_dev->dev_type = MHI_TIMESYNC_TYPE; + mhi_dev->chan_name = "TIME_SYNC"; + dev_set_name(&mhi_dev->dev, "%04x_%02u.%02u.%02u_%s", mhi_dev->dev_id, + mhi_dev->domain, mhi_dev->bus, mhi_dev->slot, + mhi_dev->chan_name); + + /* add if there is a matching DT node */ + mhi_assign_of_node(mhi_cntrl, mhi_dev); + + ret = device_add(&mhi_dev->dev); + if (ret) { + MHI_ERR("Failed to register dev for chan:%s\n", + mhi_dev->chan_name); + mhi_dealloc_device(mhi_cntrl, mhi_dev); + return; + } + + mhi_cntrl->tsync_dev = mhi_dev; +} + +/* bind mhi channels into mhi devices */ +void mhi_create_devices(struct mhi_controller *mhi_cntrl) +{ + int i; + struct mhi_chan *mhi_chan; + struct mhi_device *mhi_dev; + int ret; + + /* + * we need to create time sync device before creating other + * devices, because client may try to capture time during + * clint probe. + */ + mhi_create_time_sync_dev(mhi_cntrl); + + mhi_chan = mhi_cntrl->mhi_chan; + for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { + if (!mhi_chan->configured || mhi_chan->mhi_dev || + !(mhi_chan->ee_mask & BIT(mhi_cntrl->ee))) + continue; + mhi_dev = mhi_alloc_device(mhi_cntrl); + if (!mhi_dev) + return; + + mhi_dev->dev_type = MHI_XFER_TYPE; + switch (mhi_chan->dir) { + case DMA_TO_DEVICE: + mhi_dev->ul_chan = mhi_chan; + mhi_dev->ul_chan_id = mhi_chan->chan; + mhi_dev->ul_xfer = mhi_chan->queue_xfer; + mhi_dev->ul_event_id = mhi_chan->er_index; + break; + case DMA_NONE: + case DMA_BIDIRECTIONAL: + mhi_dev->ul_chan_id = mhi_chan->chan; + mhi_dev->ul_event_id = mhi_chan->er_index; + mhi_dev->dl_chan = mhi_chan; + mhi_dev->dl_chan_id = mhi_chan->chan; + mhi_dev->dl_xfer = mhi_chan->queue_xfer; + mhi_dev->dl_event_id = mhi_chan->er_index; + break; + case DMA_FROM_DEVICE: + /* we use dl_chan for offload channels */ + mhi_dev->dl_chan = mhi_chan; + mhi_dev->dl_chan_id = mhi_chan->chan; + mhi_dev->dl_xfer = mhi_chan->queue_xfer; + mhi_dev->dl_event_id = mhi_chan->er_index; + break; + } + + mhi_chan->mhi_dev = mhi_dev; + + /* check next channel if it matches */ + if ((i + 1) < mhi_cntrl->max_chan && mhi_chan[1].configured) { + if (!strcmp(mhi_chan[1].name, mhi_chan->name)) { + i++; + mhi_chan++; + if (mhi_chan->dir == DMA_TO_DEVICE) { + mhi_dev->ul_chan = mhi_chan; + mhi_dev->ul_chan_id = mhi_chan->chan; + mhi_dev->ul_xfer = mhi_chan->queue_xfer; + mhi_dev->ul_event_id = + mhi_chan->er_index; + } else { + mhi_dev->dl_chan = mhi_chan; + mhi_dev->dl_chan_id = mhi_chan->chan; + mhi_dev->dl_xfer = mhi_chan->queue_xfer; + mhi_dev->dl_event_id = + mhi_chan->er_index; + } + mhi_chan->mhi_dev = mhi_dev; + } + } + + mhi_dev->chan_name = mhi_chan->name; + dev_set_name(&mhi_dev->dev, "%04x_%02u.%02u.%02u_%s", + mhi_dev->dev_id, mhi_dev->domain, mhi_dev->bus, + mhi_dev->slot, mhi_dev->chan_name); + + /* add if there is a matching DT node */ + mhi_assign_of_node(mhi_cntrl, mhi_dev); + + /* init wake source */ + if (mhi_dev->dl_chan && mhi_dev->dl_chan->wake_capable) + device_init_wakeup(&mhi_dev->dev, true); + + ret = device_add(&mhi_dev->dev); + if (ret) { + MHI_ERR("Failed to register dev for chan:%s\n", + mhi_dev->chan_name); + mhi_dealloc_device(mhi_cntrl, mhi_dev); + } + ret = sysfs_create_group(&mhi_dev->dev.kobj, &mhi_dev_attr_group); + } +} + +static void mhi_dump_tre(struct mhi_controller *mhi_cntrl, struct mhi_tre *_ev) { + union mhi_dev_ring_element_type *ev = (union mhi_dev_ring_element_type *)_ev; + + switch (ev->generic.type) { + case MHI_DEV_RING_EL_INVALID: { + MHI_ERR("carl_ev cmd_invalid, ptr=%llx, %x, %x\n", _ev->ptr, _ev->dword[0], _ev->dword[1]); + } + break; + case MHI_DEV_RING_EL_NOOP: { + MHI_LOG("carl_ev cmd_no_op chan=%u\n", ev->cmd_no_op.chid); + } + break; + case MHI_DEV_RING_EL_TRANSFER: { + MHI_LOG("carl_ev cmd_transfer data=%llx, len=%u, chan=%u\n", + ev->cmd_transfer.data_buf_ptr, ev->cmd_transfer.len, ev->cmd_transfer.chain); + } + break; + case MHI_DEV_RING_EL_RESET: { + MHI_LOG("carl_ev cmd_reset chan=%u\n", ev->cmd_reset.chid); + } + break; + case MHI_DEV_RING_EL_STOP: { + MHI_LOG("carl_ev cmd_stop chan=%u\n", ev->cmd_stop.chid); + } + break; + case MHI_DEV_RING_EL_START: { + MHI_LOG("carl_ev cmd_start chan=%u\n", ev->cmd_start.chid); + } + break; + case MHI_DEV_RING_EL_MHI_STATE_CHG: { + MHI_LOG("carl_ev evt_state_change mhistate=%u\n", ev->evt_state_change.mhistate); + } + break; + case MHI_DEV_RING_EL_CMD_COMPLETION_EVT:{ + MHI_LOG("carl_ev evt_cmd_comp code=%u, type=%u\n", ev->evt_cmd_comp.code, ev->evt_cmd_comp.type); + } + break; + case MHI_DEV_RING_EL_TRANSFER_COMPLETION_EVENT:{ + MHI_VERB("carl_ev evt_tr_comp ptr=%llx, len=%u, code=%u, chan=%u\n", + ev->evt_tr_comp.ptr, ev->evt_tr_comp.len, ev->evt_tr_comp.code, ev->evt_tr_comp.chid); + } + break; + case MHI_DEV_RING_EL_EE_STATE_CHANGE_NOTIFY:{ + MHI_LOG("carl_ev evt_ee_state execenv=%u\n", ev->evt_ee_state.execenv); + } + break; + case MHI_DEV_RING_EL_UNDEF: + default: { + MHI_ERR("carl_ev el_undef type=%d\n", ev->generic.type); + }; + break; + } +} + +static int parse_xfer_event(struct mhi_controller *mhi_cntrl, + struct mhi_tre *event, + struct mhi_chan *mhi_chan) +{ + struct mhi_ring *buf_ring, *tre_ring; + u32 ev_code; + struct mhi_result result; + unsigned long flags = 0; + + ev_code = MHI_TRE_GET_EV_CODE(event); + buf_ring = &mhi_chan->buf_ring; + tre_ring = &mhi_chan->tre_ring; + + result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ? + -EOVERFLOW : 0; + + /* + * if it's a DB Event then we need to grab the lock + * with preemption disable and as a write because we + * have to update db register and another thread could + * be doing same. + */ + if (ev_code >= MHI_EV_CC_OOB) + write_lock_irqsave(&mhi_chan->lock, flags); + else + read_lock_bh(&mhi_chan->lock); + + if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) + goto end_process_tx_event; + + switch (ev_code) { + case MHI_EV_CC_OVERFLOW: + case MHI_EV_CC_EOB: + case MHI_EV_CC_EOT: + { + dma_addr_t ptr = MHI_TRE_GET_EV_PTR(event); + struct mhi_tre *local_rp, *ev_tre; + void *dev_rp; + struct mhi_buf_info *buf_info; + u16 xfer_len; + + /* Get the TRB this event points to */ + ev_tre = mhi_to_virtual(tre_ring, ptr); + + /* device rp after servicing the TREs */ + dev_rp = ev_tre + 1; + if (dev_rp >= (tre_ring->base + tre_ring->len)) + dev_rp = tre_ring->base; + + mhi_chan->used_events[ev_code]++; + + result.dir = mhi_chan->dir; + + /* local rp */ + local_rp = tre_ring->rp; + while (local_rp != dev_rp) { + buf_info = buf_ring->rp; + /* Always get the get len from the event */ + xfer_len = MHI_TRE_GET_EV_LEN(event); + + /* unmap if it's not premapped by client */ + if (likely(!buf_info->pre_mapped)) + mhi_cntrl->unmap_single(mhi_cntrl, buf_info); + + result.buf_addr = buf_info->cb_buf; + result.bytes_xferd = xfer_len; +#ifdef ENABLE_MHI_MON + if (mhi_cntrl->nreaders) { + void *buf = NULL; + size_t len = 0; + + if (mhi_chan->queue_xfer == mhi_queue_skb) { + struct sk_buff *skb = result.buf_addr; + buf = skb->data; + len = result.bytes_xferd; + } + else if (CHAN_INBOUND(mhi_chan->chan)) { + buf = result.buf_addr; + len = result.bytes_xferd; + } + mon_bus_receive(mhi_cntrl, mhi_chan->chan, + mhi_to_physical(tre_ring, local_rp), local_rp, buf, len); + } +#endif + mhi_del_ring_element(mhi_cntrl, buf_ring); + mhi_del_ring_element(mhi_cntrl, tre_ring); + local_rp = tre_ring->rp; + + /* notify client */ + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + + if (mhi_chan->dir == DMA_TO_DEVICE) { + if (atomic_dec_return(&mhi_cntrl->pending_pkts) == 0) + mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data); + } + + if (mhi_chan->dir == DMA_TO_DEVICE) { + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + } + + /* + * recycle the buffer if buffer is pre-allocated, + * if there is error, not much we can do apart from + * dropping the packet + */ + if (mhi_chan->pre_alloc) { + if (mhi_queue_buf(mhi_chan->mhi_dev, mhi_chan, + buf_info->cb_buf, + buf_info->len, MHI_EOT)) { + MHI_ERR( + "Error recycling buffer for chan:%d\n", + mhi_chan->chan); + kfree(buf_info->cb_buf); + } + } + } + break; + } /* CC_EOT */ + case MHI_EV_CC_OOB: + case MHI_EV_CC_DB_MODE: + { + unsigned long flags; + + mhi_chan->used_events[ev_code]++; + MHI_VERB("DB_MODE/OOB Detected chan %d.\n", mhi_chan->chan); + mhi_chan->db_cfg.db_mode = true; + read_lock_irqsave(&mhi_cntrl->pm_lock, flags); +#ifdef DEBUG_CHAN100_DB + if (mhi_chan->chan == 100) { + chan100_t[atomic_inc_return(&chan100_seq)&(CHAN100_SIZE-1)] = (((unsigned long)tre_ring->rp)&0xffff) | (0xf0000); + chan100_t[atomic_inc_return(&chan100_seq)&(CHAN100_SIZE-1)] = (((unsigned long)tre_ring->wp)&0xffff) | (mhi_chan->db_cfg.db_mode<<31) | (1<<30); + } +#endif + if (tre_ring->wp != tre_ring->rp && + MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state)) { + mhi_ring_chan_db(mhi_cntrl, mhi_chan); + } + read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags); + break; + } + case MHI_EV_CC_BAD_TRE: + MHI_ASSERT(1, "Received BAD TRE event for ring"); + break; + default: + MHI_CRITICAL("Unknown TX completion.\n"); + + break; + } /* switch(MHI_EV_READ_CODE(EV_TRB_CODE,event)) */ + +end_process_tx_event: + if (ev_code >= MHI_EV_CC_OOB) + write_unlock_irqrestore(&mhi_chan->lock, flags); + else + read_unlock_bh(&mhi_chan->lock); + + return 0; +} + +static int parse_rsc_event(struct mhi_controller *mhi_cntrl, + struct mhi_tre *event, + struct mhi_chan *mhi_chan) +{ + struct mhi_ring *buf_ring, *tre_ring; + struct mhi_buf_info *buf_info; + struct mhi_result result; + int ev_code; + u32 cookie; /* offset to local descriptor */ + u16 xfer_len; + + buf_ring = &mhi_chan->buf_ring; + tre_ring = &mhi_chan->tre_ring; + + ev_code = MHI_TRE_GET_EV_CODE(event); + cookie = MHI_TRE_GET_EV_COOKIE(event); + xfer_len = MHI_TRE_GET_EV_LEN(event); + + /* received out of bound cookie */ + MHI_ASSERT(cookie >= buf_ring->len, "Invalid Cookie\n"); + + buf_info = buf_ring->base + cookie; + + result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ? + -EOVERFLOW : 0; + result.bytes_xferd = xfer_len; + result.buf_addr = buf_info->cb_buf; + result.dir = mhi_chan->dir; + + read_lock_bh(&mhi_chan->lock); + + if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) + goto end_process_rsc_event; + + MHI_ASSERT(!buf_info->used, "TRE already Freed\n"); + + /* notify the client */ + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + + /* + * Note: We're arbitrarily incrementing RP even though, completion + * packet we processed might not be the same one, reason we can do this + * is because device guaranteed to cache descriptors in order it + * receive, so even though completion event is different we can re-use + * all descriptors in between. + * Example: + * Transfer Ring has descriptors: A, B, C, D + * Last descriptor host queue is D (WP) and first descriptor + * host queue is A (RP). + * The completion event we just serviced is descriptor C. + * Then we can safely queue descriptors to replace A, B, and C + * even though host did not receive any completions. + */ + mhi_del_ring_element(mhi_cntrl, tre_ring); + buf_info->used = false; + +end_process_rsc_event: + read_unlock_bh(&mhi_chan->lock); + + return 0; +} + +static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl, + struct mhi_tre *tre) +{ + dma_addr_t ptr = MHI_TRE_GET_EV_PTR(tre); + struct mhi_cmd *cmd_ring = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; + struct mhi_ring *mhi_ring = &cmd_ring->ring; + struct mhi_tre *cmd_pkt; + struct mhi_chan *mhi_chan; + struct mhi_timesync *mhi_tsync; + enum mhi_cmd_type type; + u32 chan; + + cmd_pkt = mhi_to_virtual(mhi_ring, ptr); + + /* out of order completion received */ + MHI_ASSERT(cmd_pkt != mhi_ring->rp, "Out of order cmd completion"); + + type = MHI_TRE_GET_CMD_TYPE(cmd_pkt); + + if (type == MHI_CMD_TYPE_TSYNC) { + mhi_tsync = mhi_cntrl->mhi_tsync; + mhi_tsync->ccs = MHI_TRE_GET_EV_CODE(tre); + complete(&mhi_tsync->completion); + } else { + chan = MHI_TRE_GET_CMD_CHID(cmd_pkt); + mhi_chan = &mhi_cntrl->mhi_chan[chan]; + write_lock_bh(&mhi_chan->lock); + mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre); + complete(&mhi_chan->completion); + write_unlock_bh(&mhi_chan->lock); + } + + mhi_del_ring_element(mhi_cntrl, mhi_ring); +} + +int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, + u32 event_quota) +{ + struct mhi_tre *dev_rp, *local_rp; + struct mhi_ring *ev_ring = &mhi_event->ring; + struct mhi_event_ctxt *er_ctxt = + &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; + int count = 0; + + /* + * this is a quick check to avoid unnecessary event processing + * in case we already in error state, but it's still possible + * to transition to error state while processing events + */ + if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) { + MHI_ERR("No EV access, PM_STATE:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + return -EIO; + } + + dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); + local_rp = ev_ring->rp; + + while (dev_rp != local_rp) { + enum MHI_PKT_TYPE type = MHI_TRE_GET_EV_TYPE(local_rp); + +//#define QL_READ_EVENT_WA //from Quectel Windows driver +#ifdef QL_READ_EVENT_WA + if (mhi_event->er_index == 0) { + if (local_rp->ptr == 0 && local_rp->dword[0] == 0 && local_rp->dword[1] == 0) { + // event content no sync to memory, just break and wait next event. + MHI_ERR("Zero Event!\n"); + break; + } + } +#endif + + mhi_dump_tre(mhi_cntrl, local_rp); + MHI_VERB("Processing Event:0x%llx 0x%08x 0x%08x\n", + local_rp->ptr, local_rp->dword[0], local_rp->dword[1]); +#ifdef ENABLE_MHI_MON + if (mhi_cntrl->nreaders) { + mon_bus_complete(mhi_cntrl, mhi_event->er_index, mhi_to_physical(ev_ring, local_rp), local_rp); + } +#endif + + switch (type) { + case MHI_PKT_TYPE_STATE_CHANGE_EVENT: + { + enum mhi_dev_state new_state; + + new_state = MHI_TRE_GET_EV_STATE(local_rp); + + MHI_LOG("MHI state change event to state:%s\n", + TO_MHI_STATE_STR(new_state)); + + switch (new_state) { + case MHI_STATE_M0: + mhi_pm_m0_transition(mhi_cntrl); + break; + case MHI_STATE_M1: + mhi_pm_m1_transition(mhi_cntrl); + break; + case MHI_STATE_M3: + mhi_pm_m3_transition(mhi_cntrl); + break; + case MHI_STATE_SYS_ERR: + { + enum MHI_PM_STATE new_state; + + MHI_ERR("MHI system error detected\n"); + write_lock_irq(&mhi_cntrl->pm_lock); + new_state = mhi_tryset_pm_state(mhi_cntrl, + MHI_PM_SYS_ERR_DETECT); + write_unlock_irq(&mhi_cntrl->pm_lock); + if (new_state == MHI_PM_SYS_ERR_DETECT) + schedule_work( + &mhi_cntrl->syserr_worker); + break; + } + default: + MHI_ERR("Unsupported STE:%s\n", + TO_MHI_STATE_STR(new_state)); + } + + break; + } + case MHI_PKT_TYPE_CMD_COMPLETION_EVENT: + mhi_process_cmd_completion(mhi_cntrl, local_rp); + break; + case MHI_PKT_TYPE_EE_EVENT: + { + enum MHI_ST_TRANSITION st = MHI_ST_TRANSITION_MAX; + enum mhi_ee event = MHI_TRE_GET_EV_EXECENV(local_rp); + + MHI_LOG("MHI EE received event:%s\n", + TO_MHI_EXEC_STR(event)); + switch (event) { + case MHI_EE_SBL: + st = MHI_ST_TRANSITION_SBL; + break; + case MHI_EE_FP: + st = MHI_ST_TRANSITION_FP; + break; + case MHI_EE_WFW: + case MHI_EE_AMSS: + st = MHI_ST_TRANSITION_MISSION_MODE; + break; + case MHI_EE_RDDM: + mhi_cntrl->status_cb(mhi_cntrl, + mhi_cntrl->priv_data, + MHI_CB_EE_RDDM); + write_lock_irq(&mhi_cntrl->pm_lock); + mhi_cntrl->ee = event; + write_unlock_irq(&mhi_cntrl->pm_lock); + wake_up_all(&mhi_cntrl->state_event); + break; + default: + MHI_ERR("Unhandled EE event:%s\n", + TO_MHI_EXEC_STR(event)); + } + if (st != MHI_ST_TRANSITION_MAX) + mhi_queue_state_transition(mhi_cntrl, st); + break; + } +#if 1 //Add by Quectel + case MHI_PKT_TYPE_TX_EVENT: + case MHI_PKT_TYPE_RSC_TX_EVENT: + { + u32 chan = MHI_TRE_GET_EV_CHID(local_rp); + struct mhi_chan *mhi_chan = &mhi_cntrl->mhi_chan[chan]; + + if (likely(type == MHI_PKT_TYPE_TX_EVENT)) { + parse_xfer_event(mhi_cntrl, local_rp, mhi_chan); + } else if (type == MHI_PKT_TYPE_RSC_TX_EVENT) { + parse_rsc_event(mhi_cntrl, local_rp, mhi_chan); + } + break; + } +#endif + default: + MHI_ASSERT(1, "Unsupported ev type"); + break; + } + +#ifdef QL_READ_EVENT_WA + if (mhi_event->er_index == 0) { + local_rp->ptr = 0; + local_rp->dword[0] = local_rp->dword[1] = 0; + } +#endif + + mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring); + local_rp = ev_ring->rp; + dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); + count++; + } + + if (count > mhi_event->used_elements) { + mhi_event->used_elements = count; + } + + read_lock_bh(&mhi_cntrl->pm_lock); + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state))) + mhi_ring_er_db(mhi_event); + read_unlock_bh(&mhi_cntrl->pm_lock); + + MHI_VERB("exit er_index:%u\n", mhi_event->er_index); + + return count; +} + +int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, + u32 event_quota) +{ + struct mhi_tre *dev_rp, *local_rp; + struct mhi_ring *ev_ring = &mhi_event->ring; + struct mhi_event_ctxt *er_ctxt = + &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; + int count = 0; + u32 chan; + struct mhi_chan *mhi_chan = NULL; + u32 chan_count = 0; + void *chan_local_rp = NULL; + + if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) { + MHI_ERR("No EV access, PM_STATE:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + return -EIO; + } + + dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); + local_rp = ev_ring->rp; + + while (dev_rp != local_rp && event_quota > 0) { + enum MHI_PKT_TYPE type = MHI_TRE_GET_EV_TYPE(local_rp); + + mhi_dump_tre(mhi_cntrl, local_rp); + MHI_VERB("Processing Event:0x%llx 0x%08x 0x%08x\n", + local_rp->ptr, local_rp->dword[0], local_rp->dword[1]); + + chan = MHI_TRE_GET_EV_CHID(local_rp); + mhi_chan = &mhi_cntrl->mhi_chan[chan]; + chan_local_rp = mhi_chan->tre_ring.rp; + +#ifdef ENABLE_MHI_MON + if (mhi_cntrl->nreaders) { + mon_bus_complete(mhi_cntrl, mhi_event->er_index, mhi_to_physical(ev_ring, local_rp), local_rp); + } +#endif + if (likely(type == MHI_PKT_TYPE_TX_EVENT)) { + parse_xfer_event(mhi_cntrl, local_rp, mhi_chan); + event_quota--; + } else if (type == MHI_PKT_TYPE_RSC_TX_EVENT) { + parse_rsc_event(mhi_cntrl, local_rp, mhi_chan); + event_quota--; + } + + chan_count += get_used_ring_elements(chan_local_rp, mhi_chan->tre_ring.rp, mhi_chan->tre_ring.elements); + mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring); + local_rp = ev_ring->rp; + if (local_rp == dev_rp || event_quota == 0) { + if (chan_count > mhi_chan->used_elements) + mhi_chan->used_elements = chan_count; + chan_count = 0; + dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); + } + count++; + } + + if (count > mhi_event->used_elements) { + mhi_event->used_elements = count; + } + + read_lock_bh(&mhi_cntrl->pm_lock); + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state))) + mhi_ring_er_db(mhi_event); + read_unlock_bh(&mhi_cntrl->pm_lock); + + MHI_VERB("exit er_index:%u\n", mhi_event->er_index); + + return count; +} + +int mhi_process_tsync_event_ring(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, + u32 event_quota) +{ + struct mhi_tre *dev_rp, *local_rp; + struct mhi_ring *ev_ring = &mhi_event->ring; + struct mhi_event_ctxt *er_ctxt = + &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; + struct mhi_timesync *mhi_tsync = mhi_cntrl->mhi_tsync; + int count = 0; + u32 sequence; + u64 remote_time; + + if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) { + MHI_ERR("No EV access, PM_STATE:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + read_unlock_bh(&mhi_cntrl->pm_lock); + return -EIO; + } + + dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); + local_rp = ev_ring->rp; + + while (dev_rp != local_rp) { + enum MHI_PKT_TYPE type = MHI_TRE_GET_EV_TYPE(local_rp); + struct tsync_node *tsync_node; + + MHI_VERB("Processing Event:0x%llx 0x%08x 0x%08x\n", + local_rp->ptr, local_rp->dword[0], local_rp->dword[1]); + + MHI_ASSERT(type != MHI_PKT_TYPE_TSYNC_EVENT, "!TSYNC event"); + + sequence = MHI_TRE_GET_EV_SEQ(local_rp); + remote_time = MHI_TRE_GET_EV_TIME(local_rp); + + do { + spin_lock_irq(&mhi_tsync->lock); + tsync_node = list_first_entry_or_null(&mhi_tsync->head, + struct tsync_node, node); + MHI_ASSERT(!tsync_node, "Unexpected Event"); + + if (unlikely(!tsync_node)) + break; + + list_del(&tsync_node->node); + spin_unlock_irq(&mhi_tsync->lock); + + /* + * device may not able to process all time sync commands + * host issue and only process last command it receive + */ + if (tsync_node->sequence == sequence) { + tsync_node->cb_func(tsync_node->mhi_dev, + sequence, + tsync_node->local_time, + remote_time); + kfree(tsync_node); + } else { + kfree(tsync_node); + } + } while (true); + + mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring); + local_rp = ev_ring->rp; + dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); + count++; + } + + read_lock_bh(&mhi_cntrl->pm_lock); + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state))) + mhi_ring_er_db(mhi_event); + read_unlock_bh(&mhi_cntrl->pm_lock); + + MHI_VERB("exit er_index:%u\n", mhi_event->er_index); + + return count; +} + +void mhi_ev_task(unsigned long data) +{ + struct mhi_event *mhi_event = (struct mhi_event *)data; + struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl; + + MHI_VERB("Enter for ev_index:%d\n", mhi_event->er_index); + + mhi_cntrl->runtime_mark_last_busy(mhi_cntrl, mhi_cntrl->priv_data); + /* process all pending events */ + spin_lock_bh(&mhi_event->lock); + mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX); + spin_unlock_bh(&mhi_event->lock); +} + +void mhi_ctrl_ev_task(unsigned long data) +{ + struct mhi_event *mhi_event = (struct mhi_event *)data; + struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl; + enum mhi_dev_state state; + enum MHI_PM_STATE pm_state = 0; + int ret; + + MHI_VERB("Enter for ev_index:%d\n", mhi_event->er_index); + + /* + * we can check pm_state w/o a lock here because there is no way + * pm_state can change from reg access valid to no access while this + * therad being executed. + */ + if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) + return; + + mhi_cntrl->runtime_mark_last_busy(mhi_cntrl, mhi_cntrl->priv_data); + /* process ctrl events events */ + ret = mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX); + + /* + * we received a MSI but no events to process maybe device went to + * SYS_ERR state, check the state + */ + if (!ret) { + write_lock_irq(&mhi_cntrl->pm_lock); + state = mhi_get_mhi_state(mhi_cntrl); + if (state == MHI_STATE_SYS_ERR) { + MHI_ERR("MHI system error detected\n"); + pm_state = mhi_tryset_pm_state(mhi_cntrl, + MHI_PM_SYS_ERR_DETECT); + } + write_unlock_irq(&mhi_cntrl->pm_lock); + if (pm_state == MHI_PM_SYS_ERR_DETECT) + schedule_work(&mhi_cntrl->syserr_worker); + } +} + +irqreturn_t mhi_msi_handlr(int irq_number, void *dev) +{ + struct mhi_event *mhi_event = dev; + struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl; + struct mhi_event_ctxt *er_ctxt = + &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; + struct mhi_ring *ev_ring = &mhi_event->ring; + void *dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); + + /* confirm ER has pending events to process before scheduling work */ + if (ev_ring->rp == dev_rp) + return IRQ_HANDLED; + + /* client managed event ring, notify pending data */ + if (mhi_event->cl_manage) { + struct mhi_chan *mhi_chan = mhi_event->mhi_chan; + struct mhi_device *mhi_dev = mhi_chan->mhi_dev; + + if (mhi_dev) + mhi_dev->status_cb(mhi_dev, MHI_CB_PENDING_DATA); + } else + tasklet_schedule(&mhi_event->task); + + return IRQ_HANDLED; +} + +/* this is the threaded fn */ +irqreturn_t mhi_intvec_threaded_handlr(int irq_number, void *dev) +{ + struct mhi_controller *mhi_cntrl = dev; + enum mhi_dev_state state = MHI_STATE_MAX; + enum MHI_PM_STATE pm_state = 0; + enum mhi_ee ee = MHI_EE_MAX; + unsigned long flags; + + MHI_VERB("Enter\n"); + + write_lock_irqsave(&mhi_cntrl->pm_lock, flags); + if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { + state = mhi_get_mhi_state(mhi_cntrl); + ee = mhi_get_exec_env(mhi_cntrl); + if (mhi_cntrl->msi_allocated >= 5 ||(mhi_cntrl->msi_allocated == 1 && (mhi_cntrl->dev_state != state || mhi_cntrl->ee != ee))) + MHI_LOG("device ee:%s dev_state:%s, pm_state:%s\n", TO_MHI_EXEC_STR(ee), + TO_MHI_STATE_STR(state), to_mhi_pm_state_str(mhi_cntrl->pm_state)); + } + + if (state == MHI_STATE_SYS_ERR) { + MHI_ERR("MHI system error detected\n"); + if (mhi_cntrl->pm_state != MHI_PM_SYS_ERR_DETECT) + pm_state = mhi_tryset_pm_state(mhi_cntrl, + MHI_PM_SYS_ERR_DETECT); + } + write_unlock_irqrestore(&mhi_cntrl->pm_lock, flags); + if (pm_state == MHI_PM_SYS_ERR_DETECT) { + wake_up_all(&mhi_cntrl->state_event); + + if (mhi_cntrl->ee != ee) { + MHI_LOG("device ee:%s -> %s\n", TO_MHI_EXEC_STR(mhi_cntrl->ee), TO_MHI_EXEC_STR(ee)); + schedule_work(&mhi_cntrl->syserr_worker); + } + /* for fatal errors, we let controller decide next step */ + else if (MHI_IN_PBL(ee)) + mhi_cntrl->status_cb(mhi_cntrl, mhi_cntrl->priv_data, + MHI_CB_FATAL_ERROR); + else + schedule_work(&mhi_cntrl->syserr_worker); + } + if (mhi_cntrl->msi_allocated >= 5||(mhi_cntrl->msi_allocated == 1 && (mhi_cntrl->dev_state != state || mhi_cntrl->ee != ee))) + MHI_LOG("device ee:%s dev_state:%s, %s\n", TO_MHI_EXEC_STR(ee), + TO_MHI_STATE_STR(state), TO_MHI_EXEC_STR(mhi_cntrl->ee)); + + if (pm_state == MHI_PM_POR) { + wake_up_all(&mhi_cntrl->state_event); + } + + MHI_VERB("Exit\n"); + + return IRQ_HANDLED; +} + +irqreturn_t mhi_intvec_handlr(int irq_number, void *dev) +{ + + struct mhi_controller *mhi_cntrl = dev; + + /* wake up any events waiting for state change */ + MHI_VERB("Enter\n"); + wake_up_all(&mhi_cntrl->state_event); + MHI_VERB("Exit\n"); + + return IRQ_WAKE_THREAD; +} + +irqreturn_t mhi_one_msi_handlr(int irq_number, void *dev) +{ + struct mhi_controller *mhi_cntrl = dev; + struct mhi_event_ctxt *er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt; + struct mhi_event *mhi_event = mhi_cntrl->mhi_event; + u32 i; + u32 handle = 0; + + for (i = 0; i < NUM_MHI_EVT_RINGS; i++, er_ctxt++, mhi_event++) { + struct mhi_ring *ev_ring = &mhi_event->ring; + void *dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); + if (ev_ring->rp != dev_rp) { + handle++; + mhi_msi_handlr(irq_number, mhi_event); + } + } + + if (handle ==0) { + mhi_intvec_threaded_handlr(irq_number, dev); + } + + return IRQ_HANDLED; +} + +int mhi_send_cmd(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan, + enum MHI_CMD cmd) +{ + struct mhi_tre *cmd_tre = NULL; + struct mhi_cmd *mhi_cmd = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; + struct mhi_ring *ring = &mhi_cmd->ring; + int chan = 0; + + MHI_VERB("Entered, MHI pm_state:%s dev_state:%s ee:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + TO_MHI_EXEC_STR(mhi_cntrl->ee)); + + if (mhi_chan) + chan = mhi_chan->chan; + + spin_lock_bh(&mhi_cmd->lock); + if (!get_nr_avail_ring_elements(mhi_cntrl, ring)) { + spin_unlock_bh(&mhi_cmd->lock); + return -ENOMEM; + } + + /* prepare the cmd tre */ + cmd_tre = ring->wp; + switch (cmd) { + case MHI_CMD_RESET_CHAN: + cmd_tre->ptr = MHI_TRE_CMD_RESET_PTR; + cmd_tre->dword[0] = MHI_TRE_CMD_RESET_DWORD0; + cmd_tre->dword[1] = MHI_TRE_CMD_RESET_DWORD1(chan); + break; + case MHI_CMD_START_CHAN: + cmd_tre->ptr = MHI_TRE_CMD_START_PTR; + cmd_tre->dword[0] = MHI_TRE_CMD_START_DWORD0; + cmd_tre->dword[1] = MHI_TRE_CMD_START_DWORD1(chan); + break; + case MHI_CMD_TIMSYNC_CFG: + cmd_tre->ptr = MHI_TRE_CMD_TSYNC_CFG_PTR; + cmd_tre->dword[0] = MHI_TRE_CMD_TSYNC_CFG_DWORD0; + cmd_tre->dword[1] = MHI_TRE_CMD_TSYNC_CFG_DWORD1 + (mhi_cntrl->mhi_tsync->er_index); + break; + } + +#ifdef ENABLE_MHI_MON + if (mhi_cntrl->nreaders) { + mon_bus_submit(mhi_cntrl, 128, mhi_to_physical(ring, cmd_tre), cmd_tre, NULL, 0); + } +#endif + MHI_VERB("WP:0x%llx TRE: 0x%llx 0x%08x 0x%08x\n", + (u64)mhi_to_physical(ring, cmd_tre), cmd_tre->ptr, + cmd_tre->dword[0], cmd_tre->dword[1]); + + /* queue to hardware */ + mhi_add_ring_element(mhi_cntrl, ring); + read_lock_bh(&mhi_cntrl->pm_lock); + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state))) + mhi_ring_cmd_db(mhi_cntrl, mhi_cmd); + read_unlock_bh(&mhi_cntrl->pm_lock); + spin_unlock_bh(&mhi_cmd->lock); + + return 0; +} + +static int __mhi_prepare_channel(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan) +{ + int ret = 0; + + MHI_LOG("Entered: preparing channel:%d\n", mhi_chan->chan); + + if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) { + MHI_ERR("Current EE:%s Required EE Mask:0x%x for chan:%s\n", + TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask, + mhi_chan->name); + return -ENOTCONN; + } + + mutex_lock(&mhi_chan->mutex); + + /* if channel is not disable state do not allow to start */ + if (mhi_chan->ch_state != MHI_CH_STATE_DISABLED) { + ret = -EIO; + MHI_LOG("channel:%d is not in disabled state, ch_state%d\n", + mhi_chan->chan, mhi_chan->ch_state); + goto error_init_chan; + } + + /* client manages channel context for offload channels */ + if (!mhi_chan->offload_ch) { + ret = mhi_init_chan_ctxt(mhi_cntrl, mhi_chan); + if (ret) { + MHI_ERR("Error with init chan\n"); + goto error_init_chan; + } + } + + reinit_completion(&mhi_chan->completion); + read_lock_bh(&mhi_cntrl->pm_lock); + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + MHI_ERR("MHI host is not in active state\n"); + read_unlock_bh(&mhi_cntrl->pm_lock); + ret = -EIO; + goto error_pm_state; + } + + mhi_cntrl->wake_get(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data); + + ret = mhi_send_cmd(mhi_cntrl, mhi_chan, MHI_CMD_START_CHAN); + if (ret) { + MHI_ERR("Failed to send start chan cmd\n"); + goto error_send_cmd; + } + + ret = wait_for_completion_timeout(&mhi_chan->completion, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS) { + MHI_ERR("Failed to receive cmd completion for chan:%d\n", + mhi_chan->chan); + ret = -EIO; + goto error_send_cmd; + } + + mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data); + + write_lock_irq(&mhi_chan->lock); + mhi_chan->ch_state = MHI_CH_STATE_ENABLED; + write_unlock_irq(&mhi_chan->lock); + + /* pre allocate buffer for xfer ring */ + if (mhi_chan->pre_alloc) { + int nr_el = get_nr_avail_ring_elements(mhi_cntrl, + &mhi_chan->tre_ring); + size_t len = mhi_cntrl->buffer_len; + + while (nr_el--) { + void *buf; + + buf = kmalloc(len, GFP_KERNEL); + if (!buf) { + ret = -ENOMEM; + goto error_pre_alloc; + } + + /* prepare transfer descriptors */ + ret = mhi_chan->gen_tre(mhi_cntrl, mhi_chan, buf, buf, + len, MHI_EOT); + if (ret) { + MHI_ERR("Chan:%d error prepare buffer\n", + mhi_chan->chan); + kfree(buf); + goto error_pre_alloc; + } + } + + read_lock_bh(&mhi_cntrl->pm_lock); + if (MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state)) { + read_lock_irq(&mhi_chan->lock); + mhi_ring_chan_db(mhi_cntrl, mhi_chan); + read_unlock_irq(&mhi_chan->lock); + } + read_unlock_bh(&mhi_cntrl->pm_lock); + } + + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + + mutex_unlock(&mhi_chan->mutex); + + MHI_LOG("Chan:%d successfully moved to start state\n", mhi_chan->chan); + + return 0; + +error_send_cmd: + mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data); + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + +error_pm_state: + if (!mhi_chan->offload_ch) + mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan); + +error_init_chan: + mutex_unlock(&mhi_chan->mutex); + + return ret; + +error_pre_alloc: + + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + + mutex_unlock(&mhi_chan->mutex); + __mhi_unprepare_channel(mhi_cntrl, mhi_chan); + + return ret; +} + +static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, + struct mhi_event_ctxt *er_ctxt, + int chan) +{ + struct mhi_tre *dev_rp, *local_rp; + struct mhi_ring *ev_ring; + unsigned long flags; + + MHI_LOG("Marking all events for chan:%d as stale\n", chan); + + ev_ring = &mhi_event->ring; + + /* mark all stale events related to channel as STALE event */ + spin_lock_irqsave(&mhi_event->lock, flags); + dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); + + local_rp = ev_ring->rp; + while (dev_rp != local_rp) { + if (MHI_TRE_GET_EV_TYPE(local_rp) == + MHI_PKT_TYPE_TX_EVENT && + chan == MHI_TRE_GET_EV_CHID(local_rp)) + local_rp->dword[1] = MHI_TRE_EV_DWORD1(chan, + MHI_PKT_TYPE_STALE_EVENT); + local_rp++; + if (local_rp == (ev_ring->base + ev_ring->len)) + local_rp = ev_ring->base; + } + + + MHI_LOG("Finished marking events as stale events\n"); + spin_unlock_irqrestore(&mhi_event->lock, flags); +} + +static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan) +{ + struct mhi_ring *buf_ring, *tre_ring; + struct mhi_result result; + + /* reset any pending buffers */ + buf_ring = &mhi_chan->buf_ring; + tre_ring = &mhi_chan->tre_ring; + result.transaction_status = -ENOTCONN; + result.bytes_xferd = 0; + while (tre_ring->rp != tre_ring->wp) { + struct mhi_buf_info *buf_info = buf_ring->rp; + + if (mhi_chan->dir == DMA_TO_DEVICE) { + if (atomic_dec_return(&mhi_cntrl->pending_pkts) == 0) + mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data); + } + + if (mhi_chan->dir == DMA_TO_DEVICE) + mhi_cntrl->wake_put(mhi_cntrl, false); + if (!buf_info->pre_mapped) + mhi_cntrl->unmap_single(mhi_cntrl, buf_info); + mhi_del_ring_element(mhi_cntrl, buf_ring); + mhi_del_ring_element(mhi_cntrl, tre_ring); + + if (mhi_chan->pre_alloc) { + kfree(buf_info->cb_buf); + } else { + result.buf_addr = buf_info->cb_buf; + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + } + } +} + +static void mhi_reset_rsc_chan(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan) +{ + struct mhi_ring *buf_ring, *tre_ring; + struct mhi_result result; + struct mhi_buf_info *buf_info; + + /* reset any pending buffers */ + buf_ring = &mhi_chan->buf_ring; + tre_ring = &mhi_chan->tre_ring; + result.transaction_status = -ENOTCONN; + result.bytes_xferd = 0; + + buf_info = buf_ring->base; + for (; (void *)buf_info < buf_ring->base + buf_ring->len; buf_info++) { + if (!buf_info->used) + continue; + + result.buf_addr = buf_info->cb_buf; + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + buf_info->used = false; + } +} + +void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan) +{ + + struct mhi_event *mhi_event; + struct mhi_event_ctxt *er_ctxt; + int chan = mhi_chan->chan; + + /* nothing to reset, client don't queue buffers */ + if (mhi_chan->offload_ch) + return; + + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index]; + er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_chan->er_index]; + + mhi_mark_stale_events(mhi_cntrl, mhi_event, er_ctxt, chan); + + if (mhi_chan->xfer_type == MHI_XFER_RSC_DMA) + mhi_reset_rsc_chan(mhi_cntrl, mhi_chan); + else + mhi_reset_data_chan(mhi_cntrl, mhi_chan); + + read_unlock_bh(&mhi_cntrl->pm_lock); + MHI_LOG("Reset complete.\n"); +} + +static void __mhi_unprepare_channel(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan) +{ + int ret; + + MHI_LOG("Entered: unprepare channel:%d\n", mhi_chan->chan); + + /* no more processing events for this channel */ + mutex_lock(&mhi_chan->mutex); + write_lock_irq(&mhi_chan->lock); + if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) { + MHI_LOG("chan:%d is already disabled\n", mhi_chan->chan); + write_unlock_irq(&mhi_chan->lock); + mutex_unlock(&mhi_chan->mutex); + return; + } + + mhi_chan->ch_state = MHI_CH_STATE_DISABLED; + write_unlock_irq(&mhi_chan->lock); + + reinit_completion(&mhi_chan->completion); + read_lock_bh(&mhi_cntrl->pm_lock); + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + read_unlock_bh(&mhi_cntrl->pm_lock); + goto error_invalid_state; + } + + mhi_cntrl->wake_get(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + + mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data); + ret = mhi_send_cmd(mhi_cntrl, mhi_chan, MHI_CMD_RESET_CHAN); + if (ret) { + MHI_ERR("Failed to send reset chan cmd\n"); + goto error_completion; + } + + /* even if it fails we will still reset */ + ret = wait_for_completion_timeout(&mhi_chan->completion, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS) + MHI_ERR("Failed to receive cmd completion, still resetting\n"); + +error_completion: + mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data); + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + +error_invalid_state: + if (!mhi_chan->offload_ch) { + mhi_reset_chan(mhi_cntrl, mhi_chan); + mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan); + } + MHI_LOG("chan:%d successfully resetted\n", mhi_chan->chan); + mutex_unlock(&mhi_chan->mutex); +} + +int mhi_debugfs_mhi_states_show(struct seq_file *m, void *d) +{ + struct mhi_controller *mhi_cntrl = m->private; + int reg = 0; + int ret; + u32 val[4]; + + seq_printf(m, + "pm_state:%s dev_state:%s EE:%s M0:%u M2:%u M3:%u wake:%d dev_wake:%u alloc_size:%u\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + TO_MHI_EXEC_STR(mhi_cntrl->ee), + mhi_cntrl->M0, mhi_cntrl->M2, mhi_cntrl->M3, + mhi_cntrl->wake_set, + atomic_read(&mhi_cntrl->dev_wake), + atomic_read(&mhi_cntrl->alloc_size)); + + seq_printf(m, + "mhi_state:%s exec_env:%s\n", + TO_MHI_STATE_STR(mhi_get_mhi_state(mhi_cntrl)), TO_MHI_EXEC_STR(mhi_get_exec_env(mhi_cntrl))); + + seq_printf(m, "dump mhi reg addr:%p\n", mhi_cntrl->regs); + for (reg = 0; reg < 0x100; reg+=16) { + val[0] = val[1] = val[2] = val[3] = 0xFFFFFFFF; + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, reg+0, &val[0]); + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, reg+4, &val[1]); + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, reg+8, &val[2]); + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, reg+12, &val[3]); + seq_printf(m, "%02x: %08x %08x %08x %08x\n", reg, val[0], val[1], val[2], val[3]); + } + + seq_printf(m, "dump bhi reg addr:%p\n", mhi_cntrl->bhi); + for (reg = 0; reg < 0x100; reg+=16) { + val[0] = val[1] = val[2] = val[3] = 0xFFFFFFFF; + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, reg+0, &val[0]); + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, reg+4, &val[1]); + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, reg+8, &val[2]); + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, reg+12, &val[3]); + seq_printf(m, "%02x: %08x %08x %08x %08x\n", reg, val[0], val[1], val[2], val[3]); + } + + return 0; +} + +int mhi_debugfs_mhi_event_show(struct seq_file *m, void *d) +{ + struct mhi_controller *mhi_cntrl = m->private; + struct mhi_event *mhi_event; + struct mhi_event_ctxt *er_ctxt; + + int i; + + er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt; + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++, + mhi_event++) { + struct mhi_ring *ring = &mhi_event->ring; + + if (mhi_event->offload_ev) { + seq_printf(m, "Index:%d offload event ring\n", i); + } else { + seq_printf(m, + "Index:%d modc:%d modt:%d base:0x%0llx len:0x%llx", + i, er_ctxt->intmodc, er_ctxt->intmodt, + er_ctxt->rbase, er_ctxt->rlen); + seq_printf(m, + " rp:0x%llx wp:0x%llx local_rp:0x%llx local_wp:0x%llx db:0x%llx\n", + er_ctxt->rp, er_ctxt->wp, + (unsigned long long)mhi_to_physical(ring, ring->rp), + (unsigned long long)mhi_to_physical(ring, ring->wp), + (unsigned long long)mhi_event->db_cfg.db_val); + seq_printf(m, "used:%u\n", mhi_event->used_elements); + +#ifdef DEBUG_CHAN100_DB + if (mhi_event->mhi_chan && mhi_event->chan == 100) { + struct mhi_tre *tre = (struct mhi_tre *)ring->base; + size_t j; + + for (j = 0; j < ring->elements; j++, tre++) { + seq_printf(m, + "%08x: %llx, %08x, %08x\n", + (unsigned int)(j*sizeof(struct mhi_tre)), + tre->ptr, tre->dword[0], tre->dword[1]); + } + } +#endif + } + } + + return 0; +} + +int mhi_debugfs_mhi_chan_show(struct seq_file *m, void *d) +{ + struct mhi_controller *mhi_cntrl = m->private; + struct mhi_chan *mhi_chan; + struct mhi_chan_ctxt *chan_ctxt; + int i; + + mhi_chan = mhi_cntrl->mhi_chan; + chan_ctxt = mhi_cntrl->mhi_ctxt->chan_ctxt; + for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) { + struct mhi_ring *ring = &mhi_chan->tre_ring; + + if (mhi_chan->ch_state == MHI_CH_STATE_DISABLED) + continue; + + if (mhi_chan->offload_ch) { + seq_printf(m, "%s(%u) offload channel\n", + mhi_chan->name, mhi_chan->chan); + } else if (mhi_chan->mhi_dev) { + seq_printf(m, + "%s(%u) state:0x%x brstmode:0x%x pllcfg:0x%x type:0x%x erindex:%u", + mhi_chan->name, mhi_chan->chan, + chan_ctxt->chstate, chan_ctxt->brstmode, + chan_ctxt->pollcfg, chan_ctxt->chtype, + chan_ctxt->erindex); + seq_printf(m, + " base:0x%llx len:0x%llx rp:%llx wp:0x%llx local_rp:0x%llx local_wp:0x%llx db:0x%llx\n", + chan_ctxt->rbase, chan_ctxt->rlen, + chan_ctxt->rp, chan_ctxt->wp, + (unsigned long long)mhi_to_physical(ring, ring->rp), + (unsigned long long)mhi_to_physical(ring, ring->wp), + (unsigned long long)mhi_chan->db_cfg.db_val); + seq_printf(m, "used:%u, EOB:%u, EOT:%u, OOB:%u, DB_MODE:%u\n", mhi_chan->used_elements, + mhi_chan->used_events[MHI_EV_CC_EOB], mhi_chan->used_events[MHI_EV_CC_EOT], + mhi_chan->used_events[MHI_EV_CC_OOB],mhi_chan->used_events[MHI_EV_CC_DB_MODE]); + +#ifdef DEBUG_CHAN100_DB + if (mhi_chan->chan == 100) { + unsigned int n = 0; + seq_printf(m, "chan100_seq = %04x\n", atomic_read(&chan100_seq)%CHAN100_SIZE); + for (n = 0; n < CHAN100_SIZE; n++) { + seq_printf(m, "%04x: %08x\n", n, chan100_t[n]); + } + } +#endif + +#if 0 + if (ring->base && /*(i&1) &&*/ (i < MHI_CLIENT_IP_HW_0_OUT)) { + struct mhi_tre *tre = (struct mhi_tre *)ring->base; + size_t e; + + for (e = 0; e < ring->elements; e++, tre++) { + seq_printf(m, "[%03d] %llx, %08x, %08x\n", i, tre->ptr, tre->dword[0], tre->dword[1]); + } + } +#endif + } + } + + return 0; +} + +/* move channel to start state */ +int mhi_prepare_for_transfer(struct mhi_device *mhi_dev) +{ + int ret, dir; + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_chan *mhi_chan; + + for (dir = 0; dir < 2; dir++) { + mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan; + + if (!mhi_chan) + continue; + + ret = __mhi_prepare_channel(mhi_cntrl, mhi_chan); + if (ret) { + MHI_ERR("Error moving chan %s,%d to START state\n", + mhi_chan->name, mhi_chan->chan); + goto error_open_chan; + } + + if (mhi_dev->dl_chan == mhi_dev->ul_chan) { + break; + } + } + + return 0; + +error_open_chan: + for (--dir; dir >= 0; dir--) { + mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan; + + if (!mhi_chan) + continue; + + __mhi_unprepare_channel(mhi_cntrl, mhi_chan); + } + + return ret; +} +EXPORT_SYMBOL(mhi_prepare_for_transfer); + +void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_chan *mhi_chan; + int dir; + + for (dir = 0; dir < 2; dir++) { + mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; + + if (!mhi_chan) + continue; + + __mhi_unprepare_channel(mhi_cntrl, mhi_chan); + + if (mhi_dev->dl_chan == mhi_dev->ul_chan) { + break; + } + } +} +EXPORT_SYMBOL(mhi_unprepare_from_transfer); + +int mhi_get_no_free_descriptors(struct mhi_device *mhi_dev, + enum dma_data_direction dir) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? + mhi_dev->ul_chan : mhi_dev->dl_chan; + struct mhi_ring *tre_ring = &mhi_chan->tre_ring; + + if (mhi_chan->offload_ch) + return 0; + + return get_nr_avail_ring_elements(mhi_cntrl, tre_ring); +} +EXPORT_SYMBOL(mhi_get_no_free_descriptors); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION( 5,3,0 )) +static int __mhi_bdf_to_controller(struct device *dev, void *tmp) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + struct mhi_device *match = tmp; + + /* return any none-zero value if match */ + if (mhi_dev->dev_type == MHI_CONTROLLER_TYPE && + mhi_dev->domain == match->domain && mhi_dev->bus == match->bus && + mhi_dev->slot == match->slot && mhi_dev->dev_id == match->dev_id) + return 1; + + return 0; +} +#else +static int __mhi_bdf_to_controller(struct device *dev, const void *tmp) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + const struct mhi_device *match = tmp; + + /* return any none-zero value if match */ + if (mhi_dev->dev_type == MHI_CONTROLLER_TYPE && + mhi_dev->domain == match->domain && mhi_dev->bus == match->bus && + mhi_dev->slot == match->slot && mhi_dev->dev_id == match->dev_id) + return 1; + + return 0; +} +#endif + +struct mhi_controller *mhi_bdf_to_controller(u32 domain, + u32 bus, + u32 slot, + u32 dev_id) +{ + struct mhi_device tmp, *mhi_dev; + struct device *dev; + + tmp.domain = domain; + tmp.bus = bus; + tmp.slot = slot; + tmp.dev_id = dev_id; + + dev = bus_find_device(&mhi_bus_type, NULL, &tmp, + __mhi_bdf_to_controller); + if (!dev) + return NULL; + + mhi_dev = to_mhi_device(dev); + + return mhi_dev->mhi_cntrl; +} +EXPORT_SYMBOL(mhi_bdf_to_controller); + +int mhi_poll(struct mhi_device *mhi_dev, + u32 budget) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_chan *mhi_chan = mhi_dev->dl_chan; + struct mhi_event *mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index]; + int ret; + + spin_lock_bh(&mhi_event->lock); + ret = mhi_event->process_event(mhi_cntrl, mhi_event, budget); + spin_unlock_bh(&mhi_event->lock); + + return ret; +} +EXPORT_SYMBOL(mhi_poll); + +int mhi_get_remote_time_sync(struct mhi_device *mhi_dev, + u64 *t_host, + u64 *t_dev) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_timesync *mhi_tsync = mhi_cntrl->mhi_tsync; + int ret; + + /* not all devices support time feature */ + if (!mhi_tsync) + return -EIO; + + /* bring to M0 state */ + ret = __mhi_device_get_sync(mhi_cntrl); + if (ret) + return ret; + + mutex_lock(&mhi_tsync->lpm_mutex); + + read_lock_bh(&mhi_cntrl->pm_lock); + if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) { + MHI_ERR("MHI is not in active state, pm_state:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + ret = -EIO; + goto error_invalid_state; + } + + /* disable link level low power modes */ + ret = mhi_cntrl->lpm_disable(mhi_cntrl, mhi_cntrl->priv_data); + if (ret) + goto error_invalid_state; + + /* + * time critical code to fetch device times, + * delay between these two steps should be + * deterministic as possible. + */ + preempt_disable(); + local_irq_disable(); + + *t_host = mhi_cntrl->time_get(mhi_cntrl, mhi_cntrl->priv_data); + *t_dev = readq_relaxed_no_log(mhi_tsync->time_reg); + + local_irq_enable(); + preempt_enable(); + + mhi_cntrl->lpm_enable(mhi_cntrl, mhi_cntrl->priv_data); + +error_invalid_state: + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + mutex_unlock(&mhi_tsync->lpm_mutex); + + return ret; +} +EXPORT_SYMBOL(mhi_get_remote_time_sync); + +/** + * mhi_get_remote_time - Get external modem time relative to host time + * Trigger event to capture modem time, also capture host time so client + * can do a relative drift comparision. + * Recommended only tsync device calls this method and do not call this + * from atomic context + * @mhi_dev: Device associated with the channels + * @sequence:unique sequence id track event + * @cb_func: callback function to call back + */ +int mhi_get_remote_time(struct mhi_device *mhi_dev, + u32 sequence, + void (*cb_func)(struct mhi_device *mhi_dev, + u32 sequence, + u64 local_time, + u64 remote_time)) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_timesync *mhi_tsync = mhi_cntrl->mhi_tsync; + struct tsync_node *tsync_node; + int ret; + + /* not all devices support time feature */ + if (!mhi_tsync) + return -EIO; + + /* tsync db can only be rung in M0 state */ + ret = __mhi_device_get_sync(mhi_cntrl); + if (ret) + return ret; + + /* + * technically we can use GFP_KERNEL, but wants to avoid + * # of times scheduling out + */ + tsync_node = kzalloc(sizeof(*tsync_node), GFP_ATOMIC); + if (!tsync_node) { + ret = -ENOMEM; + goto error_no_mem; + } + + tsync_node->sequence = sequence; + tsync_node->cb_func = cb_func; + tsync_node->mhi_dev = mhi_dev; + + /* disable link level low power modes */ + mhi_cntrl->lpm_disable(mhi_cntrl, mhi_cntrl->priv_data); + + read_lock_bh(&mhi_cntrl->pm_lock); + if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) { + MHI_ERR("MHI is not in active state, pm_state:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + ret = -EIO; + goto error_invalid_state; + } + + spin_lock_irq(&mhi_tsync->lock); + list_add_tail(&tsync_node->node, &mhi_tsync->head); + spin_unlock_irq(&mhi_tsync->lock); + + /* + * time critical code, delay between these two steps should be + * deterministic as possible. + */ + preempt_disable(); + local_irq_disable(); + + tsync_node->local_time = + mhi_cntrl->time_get(mhi_cntrl, mhi_cntrl->priv_data); + writel_relaxed_no_log(tsync_node->sequence, mhi_tsync->db); + /* write must go thru immediately */ + wmb(); + + local_irq_enable(); + preempt_enable(); + + ret = 0; + +error_invalid_state: + if (ret) + kfree(tsync_node); + read_unlock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->lpm_enable(mhi_cntrl, mhi_cntrl->priv_data); + +error_no_mem: + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + + return ret; +} +EXPORT_SYMBOL(mhi_get_remote_time); + +void mhi_debug_reg_dump(struct mhi_controller *mhi_cntrl) +{ + enum mhi_dev_state state; + enum mhi_ee ee; + int i, ret; + u32 val = 0; + void __iomem *mhi_base = mhi_cntrl->regs; + void __iomem *bhi_base = mhi_cntrl->bhi; + void __iomem *bhie_base = mhi_cntrl->bhie; + void __iomem *wake_db = mhi_cntrl->wake_db; + struct { + const char *name; + int offset; + void *base; + } debug_reg[] = { + { "MHI_CNTRL", MHICTRL, mhi_base}, + { "MHI_STATUS", MHISTATUS, mhi_base}, + { "MHI_WAKE_DB", 0, wake_db}, + { "BHI_EXECENV", BHI_EXECENV, bhi_base}, + { "BHI_STATUS", BHI_STATUS, bhi_base}, + { "BHI_ERRCODE", BHI_ERRCODE, bhi_base}, + { "BHI_ERRDBG1", BHI_ERRDBG1, bhi_base}, + { "BHI_ERRDBG2", BHI_ERRDBG2, bhi_base}, + { "BHI_ERRDBG3", BHI_ERRDBG3, bhi_base}, + { "BHIE_TXVEC_DB", BHIE_TXVECDB_OFFS, bhie_base}, + { "BHIE_TXVEC_STATUS", BHIE_TXVECSTATUS_OFFS, bhie_base}, + { "BHIE_RXVEC_DB", BHIE_RXVECDB_OFFS, bhie_base}, + { "BHIE_RXVEC_STATUS", BHIE_RXVECSTATUS_OFFS, bhie_base}, + { NULL }, + }; + + MHI_LOG("host pm_state:%s dev_state:%s ee:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + TO_MHI_EXEC_STR(mhi_cntrl->ee)); + + state = mhi_get_mhi_state(mhi_cntrl); + ee = mhi_get_exec_env(mhi_cntrl); + + MHI_LOG("device ee:%s dev_state:%s\n", TO_MHI_EXEC_STR(ee), + TO_MHI_STATE_STR(state)); + + for (i = 0; debug_reg[i].name; i++) { + ret = mhi_read_reg(mhi_cntrl, debug_reg[i].base, + debug_reg[i].offset, &val); + MHI_LOG("reg:%s val:0x%x, ret:%d\n", debug_reg[i].name, val, + ret); + } +} +EXPORT_SYMBOL(mhi_debug_reg_dump); diff --git a/package/wwan/driver/quectel_MHI/src/core/mhi_pm.c b/package/wwan/driver/quectel_MHI/src/core/mhi_pm.c new file mode 100644 index 000000000..fd4bd966f --- /dev/null +++ b/package/wwan/driver/quectel_MHI/src/core/mhi_pm.c @@ -0,0 +1,1253 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "mhi.h" +#include "mhi_internal.h" + +/* + * Not all MHI states transitions are sync transitions. Linkdown, SSR, and + * shutdown can happen anytime asynchronously. This function will transition to + * new state only if we're allowed to transitions. + * + * Priority increase as we go down, example while in any states from L0, start + * state from L1, L2, or L3 can be set. Notable exception to this rule is state + * DISABLE. From DISABLE state we can transition to only POR or state. Also + * for example while in L2 state, user cannot jump back to L1 or L0 states. + * Valid transitions: + * L0: DISABLE <--> POR + * POR <--> POR + * POR -> M0 -> M2 --> M0 + * POR -> FW_DL_ERR + * FW_DL_ERR <--> FW_DL_ERR + * M0 -> FW_DL_ERR + * M0 -> M3_ENTER -> M3 -> M3_EXIT --> M0 + * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS --> POR + * L2: SHUTDOWN_PROCESS -> DISABLE + * L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT + * LD_ERR_FATAL_DETECT -> SHUTDOWN_PROCESS + */ +static struct mhi_pm_transitions const mhi_state_transitions[] = { + /* L0 States */ + { + MHI_PM_DISABLE, + MHI_PM_POR + }, + { + MHI_PM_POR, + MHI_PM_POR | MHI_PM_DISABLE | MHI_PM_M0 | + MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | + MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR + }, + { + MHI_PM_M0, + MHI_PM_M2 | MHI_PM_M3_ENTER | MHI_PM_SYS_ERR_DETECT | + MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT | + MHI_PM_FW_DL_ERR + }, + { + MHI_PM_M2, + MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | + MHI_PM_LD_ERR_FATAL_DETECT + }, + { + MHI_PM_M3_ENTER, + MHI_PM_M3 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | + MHI_PM_LD_ERR_FATAL_DETECT + }, + { + MHI_PM_M3, + MHI_PM_M3_EXIT | MHI_PM_SYS_ERR_DETECT | + MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT + }, + { + MHI_PM_M3_EXIT, + MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | + MHI_PM_LD_ERR_FATAL_DETECT + }, + { + MHI_PM_FW_DL_ERR, + MHI_PM_FW_DL_ERR | MHI_PM_SYS_ERR_DETECT | + MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT + }, + /* L1 States */ + { + MHI_PM_SYS_ERR_DETECT, + MHI_PM_SYS_ERR_PROCESS | MHI_PM_SHUTDOWN_PROCESS | + MHI_PM_LD_ERR_FATAL_DETECT + }, + { + MHI_PM_SYS_ERR_PROCESS, + MHI_PM_POR | MHI_PM_SHUTDOWN_PROCESS | + MHI_PM_LD_ERR_FATAL_DETECT + }, + /* L2 States */ + { + MHI_PM_SHUTDOWN_PROCESS, + MHI_PM_DISABLE | MHI_PM_LD_ERR_FATAL_DETECT + }, + /* L3 States */ + { + MHI_PM_LD_ERR_FATAL_DETECT, + MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_SHUTDOWN_PROCESS + }, +}; + +enum MHI_PM_STATE __must_check mhi_tryset_pm_state( + struct mhi_controller *mhi_cntrl, + enum MHI_PM_STATE state) +{ + unsigned long cur_state = mhi_cntrl->pm_state; + int index = find_last_bit(&cur_state, 32); + + if (unlikely(index >= ARRAY_SIZE(mhi_state_transitions))) { + MHI_CRITICAL("cur_state:%s is not a valid pm_state\n", + to_mhi_pm_state_str(cur_state)); + return cur_state; + } + + if (unlikely(mhi_state_transitions[index].from_state != cur_state)) { + MHI_ERR("index:%u cur_state:%s != actual_state: %s\n", + index, to_mhi_pm_state_str(cur_state), + to_mhi_pm_state_str + (mhi_state_transitions[index].from_state)); + return cur_state; + } + + if (unlikely(!(mhi_state_transitions[index].to_states & state))) { + MHI_LOG( + "Not allowing pm state transition from:%s to:%s state\n", + to_mhi_pm_state_str(cur_state), + to_mhi_pm_state_str(state)); + return cur_state; + } + + MHI_LOG("Transition to pm state from:%s to:%s\n", + to_mhi_pm_state_str(cur_state), to_mhi_pm_state_str(state)); + + mhi_cntrl->pm_state = state; + return mhi_cntrl->pm_state; +} + +void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, + enum mhi_dev_state state) +{ + if (state == MHI_STATE_RESET) { + mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, + MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, 1); + } else { + mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, + MHICTRL_MHISTATE_MASK, MHICTRL_MHISTATE_SHIFT, state); + } +} + +/* set device wake */ +void mhi_assert_dev_wake(struct mhi_controller *mhi_cntrl, bool force) +{ + unsigned long flags; + +#if 1 //carl.yin 20200907, according to WIN driver, only in M2 state, need to assert, and do not need to deassert + if (mhi_cntrl->dev_state == MHI_STATE_M2) { + spin_lock_irqsave(&mhi_cntrl->wlock, flags); + mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1); + spin_unlock_irqrestore(&mhi_cntrl->wlock, flags); + } + return; +#endif + + /* if set, regardless of count set the bit if not set */ + if (unlikely(force)) { + spin_lock_irqsave(&mhi_cntrl->wlock, flags); + atomic_inc(&mhi_cntrl->dev_wake); + if (MHI_WAKE_DB_FORCE_SET_VALID(mhi_cntrl->pm_state) && + !mhi_cntrl->wake_set) { + mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1); + mhi_cntrl->wake_set = true; + } + spin_unlock_irqrestore(&mhi_cntrl->wlock, flags); + } else { + /* if resources requested already, then increment and exit */ + if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, 1, 0))) + return; + + spin_lock_irqsave(&mhi_cntrl->wlock, flags); + if ((atomic_inc_return(&mhi_cntrl->dev_wake) == 1) && + MHI_WAKE_DB_SET_VALID(mhi_cntrl->pm_state) && + !mhi_cntrl->wake_set) { + mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1); + mhi_cntrl->wake_set = true; + } + spin_unlock_irqrestore(&mhi_cntrl->wlock, flags); + } +} + +/* clear device wake */ +void mhi_deassert_dev_wake(struct mhi_controller *mhi_cntrl, bool override) +{ + unsigned long flags; + +#if 1 //carl.yin 20200907, according to WIN driver, only in M2 state, need to assert, and do not need to deassert + return; +#endif + +#if 1 //Add by Quectel + if (atomic_read(&mhi_cntrl->dev_wake) == 0) + return; +#endif + + MHI_ASSERT(atomic_read(&mhi_cntrl->dev_wake) == 0, "dev_wake == 0"); + + /* resources not dropping to 0, decrement and exit */ + if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, -1, 1))) + return; + + spin_lock_irqsave(&mhi_cntrl->wlock, flags); + if ((atomic_dec_return(&mhi_cntrl->dev_wake) == 0) && + MHI_WAKE_DB_CLEAR_VALID(mhi_cntrl->pm_state) && !override && + mhi_cntrl->wake_set) { + mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 0); + mhi_cntrl->wake_set = false; + } + spin_unlock_irqrestore(&mhi_cntrl->wlock, flags); +} + +int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl) +{ + void __iomem *base = mhi_cntrl->regs; + u32 reset = 1, ready = 0; + struct mhi_event *mhi_event; + enum MHI_PM_STATE cur_state; + int ret, i; + + MHI_LOG("Waiting to enter READY state\n"); + + /* wait for RESET to be cleared and READY bit to be set */ + wait_event_timeout(mhi_cntrl->state_event, + MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state) || + mhi_read_reg_field(mhi_cntrl, base, MHICTRL, + MHICTRL_RESET_MASK, + MHICTRL_RESET_SHIFT, &reset) || + mhi_read_reg_field(mhi_cntrl, base, MHISTATUS, + MHISTATUS_READY_MASK, + MHISTATUS_READY_SHIFT, &ready) || + (!reset && ready), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + /* device enter into error state */ + if (MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) + return -EIO; + + /* device did not transition to ready state */ + if (reset || !ready) + return -ETIMEDOUT; + + MHI_LOG("Device in READY State\n"); + write_lock_irq(&mhi_cntrl->pm_lock); + cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR); + mhi_cntrl->dev_state = MHI_STATE_READY; + write_unlock_irq(&mhi_cntrl->pm_lock); + + if (cur_state != MHI_PM_POR) { + MHI_ERR("Error moving to state %s from %s\n", + to_mhi_pm_state_str(MHI_PM_POR), + to_mhi_pm_state_str(cur_state)); + return -EIO; + } + read_lock_bh(&mhi_cntrl->pm_lock); + if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) + goto error_mmio; + + ret = mhi_init_mmio(mhi_cntrl); + if (ret) { + MHI_ERR("Error programming mmio registers\n"); + goto error_mmio; + } + + /* add elements to all sw event rings */ + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + struct mhi_ring *ring = &mhi_event->ring; + + if (mhi_event->offload_ev || mhi_event->hw_ring) + continue; + + ring->wp = ring->base + ring->len - ring->el_size; + *ring->ctxt_wp = ring->iommu_base + ring->len - ring->el_size; + /* needs to update to all cores */ + smp_wmb(); + + /* ring the db for event rings */ + spin_lock_irq(&mhi_event->lock); + mhi_ring_er_db(mhi_event); + spin_unlock_irq(&mhi_event->lock); + } + + /* set device into M0 state */ + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0); + read_unlock_bh(&mhi_cntrl->pm_lock); + + return 0; + +error_mmio: + read_unlock_bh(&mhi_cntrl->pm_lock); + + return -EIO; +} + +int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl) +{ + enum MHI_PM_STATE cur_state; + struct mhi_chan *mhi_chan; + int i; + + MHI_LOG("Entered With State:%s PM_STATE:%s\n", + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + + write_lock_irq(&mhi_cntrl->pm_lock); + mhi_cntrl->dev_state = MHI_STATE_M0; + cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M0); + write_unlock_irq(&mhi_cntrl->pm_lock); + if (unlikely(cur_state != MHI_PM_M0)) { + MHI_ERR("Failed to transition to state %s from %s\n", + to_mhi_pm_state_str(MHI_PM_M0), + to_mhi_pm_state_str(cur_state)); + return -EIO; + } + mhi_cntrl->M0++; + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_get(mhi_cntrl, false); + + /* ring all event rings and CMD ring only if we're in mission mode */ + if (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) { + struct mhi_event *mhi_event = mhi_cntrl->mhi_event; + struct mhi_cmd *mhi_cmd = + &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; + + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + if (mhi_event->offload_ev) + continue; + + spin_lock_irq(&mhi_event->lock); + mhi_ring_er_db(mhi_event); + spin_unlock_irq(&mhi_event->lock); + } + + /* only ring primary cmd ring */ + spin_lock_irq(&mhi_cmd->lock); + if (mhi_cmd->ring.rp != mhi_cmd->ring.wp) + mhi_ring_cmd_db(mhi_cntrl, mhi_cmd); + spin_unlock_irq(&mhi_cmd->lock); + } + + /* ring channel db registers */ + mhi_chan = mhi_cntrl->mhi_chan; + for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { + struct mhi_ring *tre_ring = &mhi_chan->tre_ring; + + write_lock_irq(&mhi_chan->lock); + if (mhi_chan->db_cfg.reset_req) + mhi_chan->db_cfg.db_mode = true; + + /* only ring DB if ring is not empty */ + if (tre_ring->base && tre_ring->wp != tre_ring->rp) + mhi_ring_chan_db(mhi_cntrl, mhi_chan); + write_unlock_irq(&mhi_chan->lock); + } + + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + wake_up_all(&mhi_cntrl->state_event); + MHI_VERB("Exited\n"); + + return 0; +} + +void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl) +{ + enum MHI_PM_STATE state; + + write_lock_irq(&mhi_cntrl->pm_lock); + /* if it fails, means we transition to M3 */ + state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M2); + if (state == MHI_PM_M2) { + MHI_VERB("Entered M2 State\n"); + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M2); + mhi_cntrl->dev_state = MHI_STATE_M2; + mhi_cntrl->M2++; + + write_unlock_irq(&mhi_cntrl->pm_lock); + wake_up_all(&mhi_cntrl->state_event); + + /* transfer pending, exit M2 immediately */ + if (unlikely(atomic_read(&mhi_cntrl->dev_wake))) { + MHI_VERB("Exiting M2 Immediately, count:%d\n", + atomic_read(&mhi_cntrl->dev_wake)); + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_get(mhi_cntrl, true); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + } else { + mhi_cntrl->status_cb(mhi_cntrl, mhi_cntrl->priv_data, + MHI_CB_IDLE); + } + } else { + write_unlock_irq(&mhi_cntrl->pm_lock); + } +} + +int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl) +{ + enum MHI_PM_STATE state; + + write_lock_irq(&mhi_cntrl->pm_lock); + mhi_cntrl->dev_state = MHI_STATE_M3; + state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3); + write_unlock_irq(&mhi_cntrl->pm_lock); + if (state != MHI_PM_M3) { + MHI_ERR("Failed to transition to state %s from %s\n", + to_mhi_pm_state_str(MHI_PM_M3), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + return -EIO; + } + wake_up_all(&mhi_cntrl->state_event); + mhi_cntrl->M3++; + + MHI_LOG("Entered mhi_state:%s pm_state:%s\n", + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + return 0; +} + +static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl) +{ + int i, ret; + struct mhi_event *mhi_event; + + MHI_LOG("Processing Mission Mode Transition\n"); + mhi_cntrl->status_cb(mhi_cntrl, mhi_cntrl->priv_data, MHI_CB_EE_MISSION_MODE); + + /* force MHI to be in M0 state before continuing */ + ret = __mhi_device_get_sync(mhi_cntrl); + if (ret) + return ret; + + ret = -EIO; + + write_lock_irq(&mhi_cntrl->pm_lock); + if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) + mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl); + write_unlock_irq(&mhi_cntrl->pm_lock); + + read_lock_bh(&mhi_cntrl->pm_lock); + if (!MHI_IN_MISSION_MODE(mhi_cntrl->ee)) + goto error_mission_mode; + + wake_up_all(&mhi_cntrl->state_event); + + /* add elements to all HW event rings */ + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) + goto error_mission_mode; + + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + struct mhi_ring *ring = &mhi_event->ring; + + if (mhi_event->offload_ev || !mhi_event->hw_ring) + continue; + + ring->wp = ring->base + ring->len - ring->el_size; + *ring->ctxt_wp = ring->iommu_base + ring->len - ring->el_size; + /* all ring updates must get updated immediately */ + smp_wmb(); + + spin_lock_irq(&mhi_event->lock); + if (MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state)) + mhi_ring_er_db(mhi_event); + spin_unlock_irq(&mhi_event->lock); + + } + + read_unlock_bh(&mhi_cntrl->pm_lock); + + /* setup support for time sync */ + mhi_init_timesync(mhi_cntrl); + + MHI_LOG("Adding new devices\n"); + + /* add supported devices */ + mhi_create_devices(mhi_cntrl); + + ret = 0; + + read_lock_bh(&mhi_cntrl->pm_lock); + +error_mission_mode: + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + + MHI_LOG("Exit with ret:%d\n", ret); + + return ret; +} + +/* handles both sys_err and shutdown transitions */ +static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl, + enum MHI_PM_STATE transition_state) +{ + enum MHI_PM_STATE cur_state, prev_state; + struct mhi_event *mhi_event; + struct mhi_cmd_ctxt *cmd_ctxt; + struct mhi_cmd *mhi_cmd; + struct mhi_event_ctxt *er_ctxt; + int ret, i; + + MHI_LOG("Enter with from pm_state:%s MHI_STATE:%s to pm_state:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + to_mhi_pm_state_str(transition_state)); + + /* We must notify MHI control driver so it can clean up first */ + if (transition_state == MHI_PM_SYS_ERR_PROCESS) + mhi_cntrl->status_cb(mhi_cntrl, mhi_cntrl->priv_data, + MHI_CB_SYS_ERROR); + + mutex_lock(&mhi_cntrl->pm_mutex); + write_lock_irq(&mhi_cntrl->pm_lock); + prev_state = mhi_cntrl->pm_state; + cur_state = mhi_tryset_pm_state(mhi_cntrl, transition_state); + if (cur_state == transition_state) { + mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION; + mhi_cntrl->dev_state = MHI_STATE_RESET; + } + write_unlock_irq(&mhi_cntrl->pm_lock); + + /* wake up any threads waiting for state transitions */ + wake_up_all(&mhi_cntrl->state_event); + + /* not handling sys_err, could be middle of shut down */ + if (cur_state != transition_state) { + MHI_LOG("Failed to transition to state:0x%x from:0x%x\n", + transition_state, cur_state); + mutex_unlock(&mhi_cntrl->pm_mutex); + return; + } + + /* trigger MHI RESET so device will not access host ddr */ + if (MHI_REG_ACCESS_VALID(prev_state)) { + u32 in_reset = -1; + unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms); + + MHI_LOG("Trigger device into MHI_RESET\n"); + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET); + + /* wait for reset to be cleared */ + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_read_reg_field(mhi_cntrl, + mhi_cntrl->regs, MHICTRL, + MHICTRL_RESET_MASK, + MHICTRL_RESET_SHIFT, &in_reset) + || !in_reset, timeout); + if ((!ret || in_reset) && cur_state == MHI_PM_SYS_ERR_PROCESS) { + MHI_CRITICAL("Device failed to exit RESET state\n"); + mutex_unlock(&mhi_cntrl->pm_mutex); + return; + } + + /* Set the numbers of Event Rings supported */ + mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICFG, MHICFG_NER_MASK, MHICFG_NER_SHIFT, NUM_MHI_EVT_RINGS); + mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICFG, MHICFG_NHWER_MASK, MHICFG_NHWER_SHIFT, NUM_MHI_HW_EVT_RINGS); + + /* + * device cleares INTVEC as part of RESET processing, + * re-program it + */ + mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, mhi_cntrl->msi_irq_base); + } + + MHI_LOG("Waiting for all pending event ring processing to complete\n"); + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + if (mhi_event->offload_ev) + continue; + tasklet_kill(&mhi_event->task); + } + + mutex_unlock(&mhi_cntrl->pm_mutex); + + MHI_LOG("Reset all active channels and remove mhi devices\n"); + mhi_cntrl->klog_slient = 1; + device_for_each_child(mhi_cntrl->dev, NULL, mhi_destroy_device); + mhi_cntrl->klog_slient = 0; + + MHI_LOG("Finish resetting channels\n"); + + MHI_LOG("Waiting for all pending threads to complete\n"); + wake_up_all(&mhi_cntrl->state_event); + flush_delayed_work(&mhi_cntrl->ready_worker); + flush_work(&mhi_cntrl->st_worker); + flush_work(&mhi_cntrl->fw_worker); + + mutex_lock(&mhi_cntrl->pm_mutex); + + MHI_ASSERT(atomic_read(&mhi_cntrl->dev_wake), "dev_wake != 0"); + + /* reset the ev rings and cmd rings */ + MHI_LOG("Resetting EV CTXT and CMD CTXT\n"); + mhi_cmd = mhi_cntrl->mhi_cmd; + cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt; + for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) { + struct mhi_ring *ring = &mhi_cmd->ring; + + ring->rp = ring->base; + ring->wp = ring->base; + cmd_ctxt->rp = cmd_ctxt->rbase; + cmd_ctxt->wp = cmd_ctxt->rbase; + } + + mhi_event = mhi_cntrl->mhi_event; + er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++, + mhi_event++) { + struct mhi_ring *ring = &mhi_event->ring; + + /* do not touch offload er */ + if (mhi_event->offload_ev) + continue; + + ring->rp = ring->base; + ring->wp = ring->base; + er_ctxt->rp = er_ctxt->rbase; + er_ctxt->wp = er_ctxt->rbase; + } + + /* remove support for time sync */ + mhi_destroy_timesync(mhi_cntrl); + + if (cur_state == MHI_PM_SYS_ERR_PROCESS) { + if (mhi_get_exec_env(mhi_cntrl) == MHI_EE_EDL && mhi_get_mhi_state(mhi_cntrl) == MHI_STATE_RESET) { + write_lock_irq(&mhi_cntrl->pm_lock); + mhi_cntrl->ee = MHI_EE_EDL; + cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR); + write_unlock_irq(&mhi_cntrl->pm_lock); + } + else + mhi_ready_state_transition(mhi_cntrl); + } else { + /* move to disable state */ + write_lock_irq(&mhi_cntrl->pm_lock); + cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_DISABLE); + write_unlock_irq(&mhi_cntrl->pm_lock); + if (unlikely(cur_state != MHI_PM_DISABLE)) + MHI_ERR("Error moving from pm state:%s to state:%s\n", + to_mhi_pm_state_str(cur_state), + to_mhi_pm_state_str(MHI_PM_DISABLE)); + } + + MHI_LOG("Exit with pm_state:%s mhi_state:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_STATE_STR(mhi_cntrl->dev_state)); + + mutex_unlock(&mhi_cntrl->pm_mutex); +} + +int mhi_debugfs_trigger_reset(void *data, u64 val) +{ + struct mhi_controller *mhi_cntrl = data; + enum MHI_PM_STATE cur_state; + int ret; + + MHI_LOG("Trigger MHI Reset\n"); + + /* exit lpm first */ + mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data); + mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data); + + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->dev_state == MHI_STATE_M0 || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + MHI_ERR("Did not enter M0 state, cur_state:%s pm_state:%s\n", + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + return -EIO; + } + + write_lock_irq(&mhi_cntrl->pm_lock); + cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_SYS_ERR_DETECT); + write_unlock_irq(&mhi_cntrl->pm_lock); + + if (cur_state == MHI_PM_SYS_ERR_DETECT) + schedule_work(&mhi_cntrl->syserr_worker); + + return 0; +} + +/* queue a new work item and scheduler work */ +int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl, + enum MHI_ST_TRANSITION state) +{ + struct state_transition *item = kmalloc(sizeof(*item), GFP_ATOMIC); + unsigned long flags; + + if (!item) + return -ENOMEM; + + item->state = state; + spin_lock_irqsave(&mhi_cntrl->transition_lock, flags); + list_add_tail(&item->node, &mhi_cntrl->transition_list); + spin_unlock_irqrestore(&mhi_cntrl->transition_lock, flags); + + schedule_work(&mhi_cntrl->st_worker); + + return 0; +} + +void mhi_pm_sys_err_worker(struct work_struct *work) +{ + struct mhi_controller *mhi_cntrl = container_of(work, + struct mhi_controller, + syserr_worker); + + MHI_LOG("Enter with pm_state:%s MHI_STATE:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_STATE_STR(mhi_cntrl->dev_state)); + + mhi_pm_disable_transition(mhi_cntrl, MHI_PM_SYS_ERR_PROCESS); +} + +void mhi_pm_ready_worker(struct work_struct *work) +{ + struct mhi_controller *mhi_cntrl = container_of(work, + struct mhi_controller, + ready_worker.work); + enum mhi_ee ee = MHI_EE_MAX; + + if (mhi_cntrl->dev_state != MHI_STATE_RESET) + return; + + write_lock_irq(&mhi_cntrl->pm_lock); + if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) + ee = mhi_get_exec_env(mhi_cntrl); + write_unlock_irq(&mhi_cntrl->pm_lock); + + if (ee == MHI_EE_PTHRU) + schedule_delayed_work(&mhi_cntrl->ready_worker, msecs_to_jiffies(10)); + else if (ee == MHI_EE_AMSS || ee == MHI_EE_SBL) + mhi_queue_state_transition(mhi_cntrl, MHI_ST_TRANSITION_READY); +} + +void mhi_pm_st_worker(struct work_struct *work) +{ + struct state_transition *itr, *tmp; + LIST_HEAD(head); + struct mhi_controller *mhi_cntrl = container_of(work, + struct mhi_controller, + st_worker); + spin_lock_irq(&mhi_cntrl->transition_lock); + list_splice_tail_init(&mhi_cntrl->transition_list, &head); + spin_unlock_irq(&mhi_cntrl->transition_lock); + + list_for_each_entry_safe(itr, tmp, &head, node) { + list_del(&itr->node); + MHI_LOG("Transition to state:%s\n", + TO_MHI_STATE_TRANS_STR(itr->state)); + + if (mhi_cntrl->ee != mhi_get_exec_env(mhi_cntrl)) { + MHI_LOG("%s -> %s\n", TO_MHI_EXEC_STR(mhi_cntrl->ee), + TO_MHI_EXEC_STR(mhi_get_exec_env(mhi_cntrl))); + } + + switch (itr->state) { + case MHI_ST_TRANSITION_PBL: + write_lock_irq(&mhi_cntrl->pm_lock); + if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) + mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl); + write_unlock_irq(&mhi_cntrl->pm_lock); + if (MHI_IN_PBL(mhi_cntrl->ee)) + wake_up_all(&mhi_cntrl->state_event); + break; + case MHI_ST_TRANSITION_SBL: + write_lock_irq(&mhi_cntrl->pm_lock); + mhi_cntrl->ee = MHI_EE_SBL; + write_unlock_irq(&mhi_cntrl->pm_lock); + wake_up_all(&mhi_cntrl->state_event); + mhi_create_devices(mhi_cntrl); + break; + case MHI_ST_TRANSITION_MISSION_MODE: + mhi_pm_mission_mode_transition(mhi_cntrl); + break; + case MHI_ST_TRANSITION_READY: + mhi_ready_state_transition(mhi_cntrl); + break; + case MHI_ST_TRANSITION_FP: + write_lock_irq(&mhi_cntrl->pm_lock); + mhi_cntrl->ee = MHI_EE_FP; + write_unlock_irq(&mhi_cntrl->pm_lock); + wake_up(&mhi_cntrl->state_event); + mhi_create_devices(mhi_cntrl); + break; + default: + break; + } + kfree(itr); + } +} + +int mhi_async_power_up(struct mhi_controller *mhi_cntrl) +{ + int ret; + u32 val, regVal; + enum mhi_ee current_ee; + enum MHI_ST_TRANSITION next_state; + + MHI_LOG("Requested to power on\n"); + +#if 0 + if (mhi_cntrl->msi_allocated < mhi_cntrl->total_ev_rings) + return -EINVAL; +#endif + + if (mhi_get_mhi_state(mhi_cntrl) >= MHI_STATE_M0) { + MHI_LOG("mhi_state = %s\n", mhi_state_str[mhi_get_mhi_state(mhi_cntrl)]); + MHI_LOG("Trigger device into MHI_RESET\n"); + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET); + msleep(50); + MHI_LOG("mhi_state = %s\n", mhi_state_str[mhi_get_mhi_state(mhi_cntrl)]); + } + +#if 1 //GLUE.SDX55_LE.1.0-00098-NOOP_TEST-1\common\hostdrivers\win\MhiHost MhiInitNewDev() + /* Check device Channels support */ + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, MHICFG, ®Val); +#if 0 + val = MHI_READ_REG_FIELD(regVal, MHICFG, NCH); + MHI_LOG("Device CHs: %d\n", val); + val = MHI_READ_REG_FIELD(regVal, MHICFG, NHWCH); + MHI_LOG("Device HW CHs: %d\n", val); + val = MHI_READ_REG_FIELD(regVal, MHICFG, NER); + MHI_LOG("Device ERs: %d\n", val); + val = MHI_READ_REG_FIELD(regVal, MHICFG, NHWER); + MHI_LOG("Device HW ERs: %d\n", val); +#endif + /* Set the numbers of Event Rings supported */ + MHI_WRITE_REG_FIELD(regVal, MHICFG, NER, NUM_MHI_EVT_RINGS); + MHI_WRITE_REG_FIELD(regVal, MHICFG, NHWER, NUM_MHI_HW_EVT_RINGS); + mhi_write_reg(mhi_cntrl, mhi_cntrl->regs, MHICFG, regVal); +#endif + + /* set to default wake if not set */ + if (!mhi_cntrl->wake_get || !mhi_cntrl->wake_put) { + mhi_cntrl->wake_get = mhi_assert_dev_wake; + mhi_cntrl->wake_put = mhi_deassert_dev_wake; + } + + mutex_lock(&mhi_cntrl->pm_mutex); + mhi_cntrl->pm_state = MHI_PM_DISABLE; + mhi_cntrl->dev_state = MHI_STATE_RESET; + + if (!mhi_cntrl->pre_init) { + /* setup device context */ + ret = mhi_init_dev_ctxt(mhi_cntrl); + if (ret) { + MHI_ERR("Error setting dev_context\n"); + goto error_dev_ctxt; + } + + ret = mhi_init_irq_setup(mhi_cntrl); + if (ret) { + MHI_ERR("Error setting up irq\n"); + goto error_setup_irq; + } + } + + /* setup bhi offset & intvec */ + write_lock_irq(&mhi_cntrl->pm_lock); + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIOFF, &val); + if (ret) { + write_unlock_irq(&mhi_cntrl->pm_lock); + MHI_ERR("Error getting bhi offset\n"); + goto error_bhi_offset; + } + + mhi_cntrl->bhi = mhi_cntrl->regs + val; + + /* setup bhie offset */ + if (mhi_cntrl->fbc_download || true) { + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF, &val); + if (ret) { + write_unlock_irq(&mhi_cntrl->pm_lock); + MHI_ERR("Error getting bhie offset\n"); + goto error_bhi_offset; + } + + mhi_cntrl->bhie = mhi_cntrl->regs + val; + } + + mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, mhi_cntrl->msi_irq_base); + mhi_cntrl->pm_state = MHI_PM_POR; + mhi_cntrl->ee = MHI_EE_MAX; + current_ee = mhi_get_exec_env(mhi_cntrl); + write_unlock_irq(&mhi_cntrl->pm_lock); + + MHI_LOG("dev_state:%s ee:%s\n", + TO_MHI_STATE_STR(mhi_get_mhi_state(mhi_cntrl)), + TO_MHI_EXEC_STR(mhi_get_exec_env(mhi_cntrl))); + + /* confirm device is in valid exec env */ + if (!MHI_IN_PBL(current_ee) && current_ee != MHI_EE_AMSS) { + //MHI_ERR("Not a valid ee for power on\n"); + //ret = -EIO; + //goto error_bhi_offset; + } + + /* transition to next state */ + next_state = MHI_IN_PBL(current_ee) ? + MHI_ST_TRANSITION_PBL : MHI_ST_TRANSITION_READY; + + //if (next_state == MHI_ST_TRANSITION_PBL) + // schedule_work(&mhi_cntrl->fw_worker); + + if (next_state == MHI_ST_TRANSITION_PBL) + schedule_delayed_work(&mhi_cntrl->ready_worker, msecs_to_jiffies(10)); + else + mhi_queue_state_transition(mhi_cntrl, next_state); + + mhi_init_debugfs(mhi_cntrl); + + mutex_unlock(&mhi_cntrl->pm_mutex); + + MHI_LOG("Power on setup success\n"); + + return 0; + +error_bhi_offset: + if (!mhi_cntrl->pre_init) + mhi_deinit_free_irq(mhi_cntrl); + +error_setup_irq: + if (!mhi_cntrl->pre_init) + mhi_deinit_dev_ctxt(mhi_cntrl); + +error_dev_ctxt: + mutex_unlock(&mhi_cntrl->pm_mutex); + + return ret; +} +EXPORT_SYMBOL(mhi_async_power_up); + +void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful) +{ + enum MHI_PM_STATE cur_state; + + /* if it's not graceful shutdown, force MHI to a linkdown state */ + if (!graceful) { + mutex_lock(&mhi_cntrl->pm_mutex); + write_lock_irq(&mhi_cntrl->pm_lock); + cur_state = mhi_tryset_pm_state(mhi_cntrl, + MHI_PM_LD_ERR_FATAL_DETECT); + write_unlock_irq(&mhi_cntrl->pm_lock); + mutex_unlock(&mhi_cntrl->pm_mutex); + if (cur_state != MHI_PM_LD_ERR_FATAL_DETECT) + MHI_ERR("Failed to move to state:%s from:%s\n", + to_mhi_pm_state_str(MHI_PM_LD_ERR_FATAL_DETECT), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + } + mhi_pm_disable_transition(mhi_cntrl, MHI_PM_SHUTDOWN_PROCESS); + + mhi_deinit_debugfs(mhi_cntrl); + + if (!mhi_cntrl->pre_init) { + /* free all allocated resources */ + if (mhi_cntrl->fbc_image) { + mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image); + mhi_cntrl->fbc_image = NULL; + } + mhi_deinit_free_irq(mhi_cntrl); + mhi_deinit_dev_ctxt(mhi_cntrl); + } +} +EXPORT_SYMBOL(mhi_power_down); + +int mhi_sync_power_up(struct mhi_controller *mhi_cntrl) +{ + int ret = mhi_async_power_up(mhi_cntrl); + + if (ret) + return ret; + + wait_event_timeout(mhi_cntrl->state_event, + MHI_IN_MISSION_MODE(mhi_cntrl->ee) || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + return (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -EIO; +} +EXPORT_SYMBOL(mhi_sync_power_up); + +int mhi_pm_suspend(struct mhi_controller *mhi_cntrl) +{ + int ret; + enum MHI_PM_STATE new_state; + struct mhi_chan *itr, *tmp; + + if (mhi_cntrl->pm_state == MHI_PM_DISABLE) + return -EINVAL; + + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) + return -EIO; + + /* do a quick check to see if any pending data, then exit */ + if (atomic_read(&mhi_cntrl->dev_wake)) { + MHI_VERB("Busy, aborting M3\n"); + return -EBUSY; + } + + /* exit MHI out of M2 state */ + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_get(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->dev_state == MHI_STATE_M0 || + mhi_cntrl->dev_state == MHI_STATE_M1 || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + MHI_ERR( + "Did not enter M0||M1 state, cur_state:%s pm_state:%s\n", + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + ret = -EIO; + goto error_m0_entry; + } + + write_lock_irq(&mhi_cntrl->pm_lock); + + /* we're asserting wake so count would be @ least 1 */ + if (atomic_read(&mhi_cntrl->dev_wake) > 1) { + MHI_VERB("Busy, aborting M3\n"); + write_unlock_irq(&mhi_cntrl->pm_lock); + ret = -EBUSY; + goto error_m0_entry; + } + + /* anytime after this, we will resume thru runtime pm framework */ + MHI_LOG("Allowing M3 transition\n"); + new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_ENTER); + if (new_state != MHI_PM_M3_ENTER) { + write_unlock_irq(&mhi_cntrl->pm_lock); + MHI_ERR("Error setting to pm_state:%s from pm_state:%s\n", + to_mhi_pm_state_str(MHI_PM_M3_ENTER), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + + ret = -EIO; + goto error_m0_entry; + } + + /* set dev to M3 and wait for completion */ + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M3); + mhi_cntrl->wake_put(mhi_cntrl, false); + write_unlock_irq(&mhi_cntrl->pm_lock); + MHI_LOG("Wait for M3 completion\n"); + + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->dev_state == MHI_STATE_M3 || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + MHI_ERR("Did not enter M3 state, cur_state:%s pm_state:%s\n", + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + return -EIO; + } + + /* notify any clients we enter lpm */ + list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) { + mutex_lock(&itr->mutex); + if (itr->mhi_dev) + mhi_notify(itr->mhi_dev, MHI_CB_LPM_ENTER); + mutex_unlock(&itr->mutex); + } + + return 0; + +error_m0_entry: + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + + return ret; +} +EXPORT_SYMBOL(mhi_pm_suspend); + +int mhi_pm_resume(struct mhi_controller *mhi_cntrl) +{ + enum MHI_PM_STATE cur_state; + int ret; + struct mhi_chan *itr, *tmp; + + MHI_LOG("Entered with pm_state:%s dev_state:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_STATE_STR(mhi_cntrl->dev_state)); + + if (mhi_cntrl->pm_state == MHI_PM_DISABLE) + return 0; + + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) + return -EIO; + + MHI_ASSERT(mhi_cntrl->pm_state != MHI_PM_M3, "mhi_pm_state != M3"); + + /* notify any clients we enter lpm */ + list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) { + mutex_lock(&itr->mutex); + if (itr->mhi_dev) + mhi_notify(itr->mhi_dev, MHI_CB_LPM_EXIT); + mutex_unlock(&itr->mutex); + } + + write_lock_irq(&mhi_cntrl->pm_lock); + cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_EXIT); + if (cur_state != MHI_PM_M3_EXIT) { + write_unlock_irq(&mhi_cntrl->pm_lock); + MHI_ERR("Error setting to pm_state:%s from pm_state:%s\n", + to_mhi_pm_state_str(MHI_PM_M3_EXIT), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + return -EIO; + } + + /* set dev to M0 and wait for completion */ + mhi_cntrl->wake_get(mhi_cntrl, true); + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0); + write_unlock_irq(&mhi_cntrl->pm_lock); + + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->dev_state == MHI_STATE_M0 || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + + if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + MHI_ERR("Did not enter M0 state, cur_state:%s pm_state:%s\n", + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + + /* + * It's possible device already in error state and we didn't + * process it due to low power mode, force a check + */ + mhi_intvec_threaded_handlr(0, mhi_cntrl); + return -EIO; + } + + return 0; +} + +int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl) +{ + int ret; + + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_get(mhi_cntrl, true); + if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) { + mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data); + mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data); + } + read_unlock_bh(&mhi_cntrl->pm_lock); + + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->pm_state == MHI_PM_M0 || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + MHI_ERR("Did not enter M0 state, cur_state:%s pm_state:%s\n", + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + return -EIO; + } + + return 0; +} + +void mhi_device_get(struct mhi_device *mhi_dev) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + + atomic_inc(&mhi_dev->dev_wake); + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_get(mhi_cntrl, true); + read_unlock_bh(&mhi_cntrl->pm_lock); +} +EXPORT_SYMBOL(mhi_device_get); + +int mhi_device_get_sync(struct mhi_device *mhi_dev) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + int ret; + + ret = __mhi_device_get_sync(mhi_cntrl); + if (!ret) + atomic_inc(&mhi_dev->dev_wake); + + return ret; +} +EXPORT_SYMBOL(mhi_device_get_sync); + +void mhi_device_put(struct mhi_device *mhi_dev) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + + atomic_dec(&mhi_dev->dev_wake); + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); +} +EXPORT_SYMBOL(mhi_device_put); + +int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl) +{ + int ret; + + MHI_LOG("Enter with pm_state:%s ee:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_EXEC_STR(mhi_cntrl->ee)); + + MHI_LOG("Triggering SYS_ERR to force rddm state\n"); + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR); + + /* wait for rddm event */ + MHI_LOG("Waiting for device to enter RDDM state\n"); + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->ee == MHI_EE_RDDM, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + ret = ret ? 0 : -EIO; + + MHI_LOG("Exiting with pm_state:%s ee:%s ret:%d\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_EXEC_STR(mhi_cntrl->ee), ret); + + return ret; +} +EXPORT_SYMBOL(mhi_force_rddm_mode); diff --git a/package/wwan/driver/quectel_MHI/src/core/mhi_sdx20.h b/package/wwan/driver/quectel_MHI/src/core/mhi_sdx20.h new file mode 100644 index 000000000..5a92efa4c --- /dev/null +++ b/package/wwan/driver/quectel_MHI/src/core/mhi_sdx20.h @@ -0,0 +1,362 @@ +#ifndef __SDX20_MHI_H +#define __SDX20_MHI_H + +#include + +/* MHI control data structures alloted by the host, including + * channel context array, event context array, command context and rings */ + +/* Channel context state */ +enum mhi_dev_ch_ctx_state { + MHI_DEV_CH_STATE_DISABLED, + MHI_DEV_CH_STATE_ENABLED, + MHI_DEV_CH_STATE_RUNNING, + MHI_DEV_CH_STATE_SUSPENDED, + MHI_DEV_CH_STATE_STOP, + MHI_DEV_CH_STATE_ERROR, + MHI_DEV_CH_STATE_RESERVED, + MHI_DEV_CH_STATE_32BIT = 0x7FFFFFFF +}; + +/* Channel type */ +enum mhi_dev_ch_ctx_type { + MHI_DEV_CH_TYPE_NONE, + MHI_DEV_CH_TYPE_OUTBOUND_CHANNEL, + MHI_DEV_CH_TYPE_INBOUND_CHANNEL, + MHI_DEV_CH_RESERVED +}; + +/* Channel context type */ +struct mhi_dev_ch_ctx { + enum mhi_dev_ch_ctx_state ch_state; + enum mhi_dev_ch_ctx_type ch_type; + uint32_t err_indx; + uint64_t rbase; + uint64_t rlen; + uint64_t rp; + uint64_t wp; +} __packed; + +enum mhi_dev_ring_element_type_id { + MHI_DEV_RING_EL_INVALID = 0, + MHI_DEV_RING_EL_NOOP = 1, + MHI_DEV_RING_EL_TRANSFER = 2, + MHI_DEV_RING_EL_RESET = 16, + MHI_DEV_RING_EL_STOP = 17, + MHI_DEV_RING_EL_START = 18, + MHI_DEV_RING_EL_MHI_STATE_CHG = 32, + MHI_DEV_RING_EL_CMD_COMPLETION_EVT = 33, + MHI_DEV_RING_EL_TRANSFER_COMPLETION_EVENT = 34, + MHI_DEV_RING_EL_EE_STATE_CHANGE_NOTIFY = 64, + MHI_DEV_RING_EL_UNDEF +}; + +enum mhi_dev_ring_state { + RING_STATE_UINT = 0, + RING_STATE_IDLE, + RING_STATE_PENDING, +}; + +enum mhi_dev_ring_type { + RING_TYPE_CMD = 0, + RING_TYPE_ER, + RING_TYPE_CH, + RING_TYPE_INVAL +}; + +/* Event context interrupt moderation */ +enum mhi_dev_evt_ctx_int_mod_timer { + MHI_DEV_EVT_INT_MODERATION_DISABLED +}; + +/* Event ring type */ +enum mhi_dev_evt_ctx_event_ring_type { + MHI_DEV_EVT_TYPE_DEFAULT, + MHI_DEV_EVT_TYPE_VALID, + MHI_DEV_EVT_RESERVED +}; + +/* Event ring context type */ +struct mhi_dev_ev_ctx { + uint32_t res1:16; + enum mhi_dev_evt_ctx_int_mod_timer intmodt:16; + enum mhi_dev_evt_ctx_event_ring_type ertype; + uint32_t msivec; + uint64_t rbase; + uint64_t rlen; + uint64_t rp; + uint64_t wp; +} __packed; + +/* Command context */ +struct mhi_dev_cmd_ctx { + uint32_t res1; + uint32_t res2; + uint32_t res3; + uint64_t rbase; + uint64_t rlen; + uint64_t rp; + uint64_t wp; +} __packed; + +/* generic context */ +struct mhi_dev_gen_ctx { + uint32_t res1; + uint32_t res2; + uint32_t res3; + uint64_t rbase; + uint64_t rlen; + uint64_t rp; + uint64_t wp; +} __packed; + +/* Transfer ring element */ +struct mhi_dev_transfer_ring_element { + uint64_t data_buf_ptr; + uint32_t len:16; + uint32_t res1:16; + uint32_t chain:1; + uint32_t res2:7; + uint32_t ieob:1; + uint32_t ieot:1; + uint32_t bei:1; + uint32_t res3:5; + enum mhi_dev_ring_element_type_id type:8; + uint32_t res4:8; +} __packed; + +/* Command ring element */ +/* Command ring No op command */ +struct mhi_dev_cmd_ring_op { + uint64_t res1; + uint32_t res2; + uint32_t res3:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +/* Command ring reset channel command */ +struct mhi_dev_cmd_ring_reset_channel_cmd { + uint64_t res1; + uint32_t res2; + uint32_t res3:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +/* Command ring stop channel command */ +struct mhi_dev_cmd_ring_stop_channel_cmd { + uint64_t res1; + uint32_t res2; + uint32_t res3:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +/* Command ring start channel command */ +struct mhi_dev_cmd_ring_start_channel_cmd { + uint64_t res1; + uint32_t seqnum; + uint32_t reliable:1; + uint32_t res2:15; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +enum mhi_dev_cmd_completion_code { + MHI_CMD_COMPL_CODE_INVALID = 0, + MHI_CMD_COMPL_CODE_SUCCESS = 1, + MHI_CMD_COMPL_CODE_EOT = 2, + MHI_CMD_COMPL_CODE_OVERFLOW = 3, + MHI_CMD_COMPL_CODE_EOB = 4, + MHI_CMD_COMPL_CODE_UNDEFINED = 16, + MHI_CMD_COMPL_CODE_RING_EL = 17, + MHI_CMD_COMPL_CODE_RES +}; + +/* Event ring elements */ +/* Transfer completion event */ +struct mhi_dev_event_ring_transfer_completion { + uint64_t ptr; + uint32_t len:16; + uint32_t res1:8; + enum mhi_dev_cmd_completion_code code:8; + uint32_t res2:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +/* Command completion event */ +struct mhi_dev_event_ring_cmd_completion { + uint64_t ptr; + uint32_t res1:24; + enum mhi_dev_cmd_completion_code code:8; + uint32_t res2:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t res3:8; +} __packed; + +enum mhi_dev_state { + MHI_DEV_RESET_STATE = 0, + MHI_DEV_READY_STATE, + MHI_DEV_M0_STATE, + MHI_DEV_M1_STATE, + MHI_DEV_M2_STATE, + MHI_DEV_M3_STATE, + MHI_DEV_MAX_STATE, + MHI_DEV_SYSERR_STATE = 0xff +}; + +/* MHI state change event */ +struct mhi_dev_event_ring_state_change { + uint64_t ptr; + uint32_t res1:24; + enum mhi_dev_state mhistate:8; + uint32_t res2:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t res3:8; +} __packed; + +enum mhi_dev_execenv { + MHI_DEV_SBL_EE = 1, + MHI_DEV_AMSS_EE = 2, + MHI_DEV_UNRESERVED +}; + +/* EE state change event */ +struct mhi_dev_event_ring_ee_state_change { + uint64_t ptr; + uint32_t res1:24; + enum mhi_dev_execenv execenv:8; + uint32_t res2:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t res3:8; +} __packed; + +/* Generic cmd to parse common details like type and channel id */ +struct mhi_dev_ring_generic { + uint64_t ptr; + uint32_t res1:24; + enum mhi_dev_state mhistate:8; + uint32_t res2:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +struct mhi_config { + uint32_t mhi_reg_len; + uint32_t version; + uint32_t event_rings; + uint32_t channels; + uint32_t chdb_offset; + uint32_t erdb_offset; +}; + +#define NUM_CHANNELS 128 +#define HW_CHANNEL_BASE 100 +#define HW_CHANNEL_END 107 +#define MHI_ENV_VALUE 2 +#define MHI_MASK_ROWS_CH_EV_DB 4 +#define TRB_MAX_DATA_SIZE 8192 +#define MHI_CTRL_STATE 25 +#define IPA_DMA_SYNC 1 +#define IPA_DMA_ASYNC 0 + +/*maximum trasnfer completion events buffer*/ +#define MAX_TR_EVENTS 50 +/*maximum event requests */ +#define MHI_MAX_EVT_REQ 50 + +/* Possible ring element types */ +union mhi_dev_ring_element_type { + struct mhi_dev_cmd_ring_op cmd_no_op; + struct mhi_dev_cmd_ring_reset_channel_cmd cmd_reset; + struct mhi_dev_cmd_ring_stop_channel_cmd cmd_stop; + struct mhi_dev_cmd_ring_start_channel_cmd cmd_start; + struct mhi_dev_transfer_ring_element cmd_transfer; + struct mhi_dev_event_ring_transfer_completion evt_tr_comp; + struct mhi_dev_event_ring_cmd_completion evt_cmd_comp; + struct mhi_dev_event_ring_state_change evt_state_change; + struct mhi_dev_event_ring_ee_state_change evt_ee_state; + struct mhi_dev_ring_generic generic; +}; + +/* Transfer ring element type */ +union mhi_dev_ring_ctx { + struct mhi_dev_cmd_ctx cmd; + struct mhi_dev_ev_ctx ev; + struct mhi_dev_ch_ctx ch; + struct mhi_dev_gen_ctx generic; +}; + +/* MHI host Control and data address region */ +struct mhi_host_addr { + uint32_t ctrl_base_lsb; + uint32_t ctrl_base_msb; + uint32_t ctrl_limit_lsb; + uint32_t ctrl_limit_msb; + uint32_t data_base_lsb; + uint32_t data_base_msb; + uint32_t data_limit_lsb; + uint32_t data_limit_msb; +}; + +/* MHI physical and virtual address region */ +struct mhi_meminfo { + struct device *dev; + uintptr_t pa_aligned; + uintptr_t pa_unaligned; + uintptr_t va_aligned; + uintptr_t va_unaligned; + uintptr_t size; +}; + +struct mhi_addr { + uint64_t host_pa; + uintptr_t device_pa; + uintptr_t device_va; + size_t size; + dma_addr_t phy_addr; + void *virt_addr; + bool use_ipa_dma; +}; + +struct mhi_interrupt_state { + uint32_t mask; + uint32_t status; +}; + +enum mhi_dev_channel_state { + MHI_DEV_CH_UNINT, + MHI_DEV_CH_STARTED, + MHI_DEV_CH_PENDING_START, + MHI_DEV_CH_PENDING_STOP, + MHI_DEV_CH_STOPPED, + MHI_DEV_CH_CLOSED, +}; + +enum mhi_dev_ch_operation { + MHI_DEV_OPEN_CH, + MHI_DEV_CLOSE_CH, + MHI_DEV_READ_CH, + MHI_DEV_READ_WR, + MHI_DEV_POLL, +}; + +enum mhi_ctrl_info { + MHI_STATE_CONFIGURED = 0, + MHI_STATE_CONNECTED = 1, + MHI_STATE_DISCONNECTED = 2, + MHI_STATE_INVAL, +}; + +enum mhi_dev_tr_compl_evt_type { + SEND_EVENT_BUFFER, + SEND_EVENT_RD_OFFSET, +}; + +enum mhi_dev_transfer_type { + MHI_DEV_DMA_SYNC, + MHI_DEV_DMA_ASYNC, +}; +#endif /* _SDX20_MHI_H_ */ diff --git a/package/wwan/driver/quectel_MHI/src/core/sdx20_mhi.h b/package/wwan/driver/quectel_MHI/src/core/sdx20_mhi.h new file mode 100644 index 000000000..a7d37839f --- /dev/null +++ b/package/wwan/driver/quectel_MHI/src/core/sdx20_mhi.h @@ -0,0 +1,426 @@ +#ifndef __SDX20_MHI_H +#define __SDX20_MHI_H + +#include + +/* MHI control data structures alloted by the host, including + * channel context array, event context array, command context and rings */ + +/* Channel context state */ +enum mhi_dev_ch_ctx_state { + MHI_DEV_CH_STATE_DISABLED, + MHI_DEV_CH_STATE_ENABLED, + MHI_DEV_CH_STATE_RUNNING, + MHI_DEV_CH_STATE_SUSPENDED, + MHI_DEV_CH_STATE_STOP, + MHI_DEV_CH_STATE_ERROR, + MHI_DEV_CH_STATE_RESERVED, + MHI_DEV_CH_STATE_32BIT = 0x7FFFFFFF +}; + +/* Channel type */ +enum mhi_dev_ch_ctx_type { + MHI_DEV_CH_TYPE_NONE, + MHI_DEV_CH_TYPE_OUTBOUND_CHANNEL, + MHI_DEV_CH_TYPE_INBOUND_CHANNEL, + MHI_DEV_CH_RESERVED +}; + +/* Channel context type */ +struct mhi_dev_ch_ctx { + enum mhi_dev_ch_ctx_state ch_state; + enum mhi_dev_ch_ctx_type ch_type; + uint32_t err_indx; + uint64_t rbase; + uint64_t rlen; + uint64_t rp; + uint64_t wp; +} __packed; + +enum mhi_dev_ring_element_type_id { + MHI_DEV_RING_EL_INVALID = 0, + MHI_DEV_RING_EL_NOOP = 1, + MHI_DEV_RING_EL_TRANSFER = 2, + MHI_DEV_RING_EL_RESET = 16, + MHI_DEV_RING_EL_STOP = 17, + MHI_DEV_RING_EL_START = 18, + MHI_DEV_RING_EL_MHI_STATE_CHG = 32, + MHI_DEV_RING_EL_CMD_COMPLETION_EVT = 33, + MHI_DEV_RING_EL_TRANSFER_COMPLETION_EVENT = 34, + MHI_DEV_RING_EL_EE_STATE_CHANGE_NOTIFY = 64, + MHI_DEV_RING_EL_UNDEF +}; + +enum mhi_dev_ring_state { + RING_STATE_UINT = 0, + RING_STATE_IDLE, + RING_STATE_PENDING, +}; + +enum mhi_dev_ring_type { + RING_TYPE_CMD = 0, + RING_TYPE_ER, + RING_TYPE_CH, + RING_TYPE_INVAL +}; + +/* Event context interrupt moderation */ +enum mhi_dev_evt_ctx_int_mod_timer { + MHI_DEV_EVT_INT_MODERATION_DISABLED +}; + +/* Event ring type */ +enum mhi_dev_evt_ctx_event_ring_type { + MHI_DEV_EVT_TYPE_DEFAULT, + MHI_DEV_EVT_TYPE_VALID, + MHI_DEV_EVT_RESERVED +}; + +/* Event ring context type */ +struct mhi_dev_ev_ctx { + uint32_t res1:16; + enum mhi_dev_evt_ctx_int_mod_timer intmodt:16; + enum mhi_dev_evt_ctx_event_ring_type ertype; + uint32_t msivec; + uint64_t rbase; + uint64_t rlen; + uint64_t rp; + uint64_t wp; +} __packed; + +/* Command context */ +struct mhi_dev_cmd_ctx { + uint32_t res1; + uint32_t res2; + uint32_t res3; + uint64_t rbase; + uint64_t rlen; + uint64_t rp; + uint64_t wp; +} __packed; + +/* generic context */ +struct mhi_dev_gen_ctx { + uint32_t res1; + uint32_t res2; + uint32_t res3; + uint64_t rbase; + uint64_t rlen; + uint64_t rp; + uint64_t wp; +} __packed; + +/* Transfer ring element */ +struct mhi_dev_transfer_ring_element { + uint64_t data_buf_ptr; + uint32_t len:16; + uint32_t res1:16; + uint32_t chain:1; + uint32_t res2:7; + uint32_t ieob:1; + uint32_t ieot:1; + uint32_t bei:1; + uint32_t res3:5; + enum mhi_dev_ring_element_type_id type:8; + uint32_t res4:8; +} __packed; + +/* Command ring element */ +/* Command ring No op command */ +struct mhi_dev_cmd_ring_op { + uint64_t res1; + uint32_t res2; + uint32_t res3:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +/* Command ring reset channel command */ +struct mhi_dev_cmd_ring_reset_channel_cmd { + uint64_t res1; + uint32_t res2; + uint32_t res3:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +/* Command ring stop channel command */ +struct mhi_dev_cmd_ring_stop_channel_cmd { + uint64_t res1; + uint32_t res2; + uint32_t res3:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +/* Command ring start channel command */ +struct mhi_dev_cmd_ring_start_channel_cmd { + uint64_t res1; + uint32_t seqnum; + uint32_t reliable:1; + uint32_t res2:15; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +enum mhi_dev_cmd_completion_code { + MHI_CMD_COMPL_CODE_INVALID = 0, + MHI_CMD_COMPL_CODE_SUCCESS = 1, + MHI_CMD_COMPL_CODE_EOT = 2, + MHI_CMD_COMPL_CODE_OVERFLOW = 3, + MHI_CMD_COMPL_CODE_EOB = 4, + MHI_CMD_COMPL_CODE_UNDEFINED = 16, + MHI_CMD_COMPL_CODE_RING_EL = 17, + MHI_CMD_COMPL_CODE_RES +}; + +/* Event ring elements */ +/* Transfer completion event */ +struct mhi_dev_event_ring_transfer_completion { + uint64_t ptr; + uint32_t len:16; + uint32_t res1:8; + enum mhi_dev_cmd_completion_code code:8; + uint32_t res2:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +/* Command completion event */ +struct mhi_dev_event_ring_cmd_completion { + uint64_t ptr; + uint32_t res1:24; + enum mhi_dev_cmd_completion_code code:8; + uint32_t res2:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t res3:8; +} __packed; + +enum mhi_dev_state { + MHI_DEV_RESET_STATE = 0, + MHI_DEV_READY_STATE, + MHI_DEV_M0_STATE, + MHI_DEV_M1_STATE, + MHI_DEV_M2_STATE, + MHI_DEV_M3_STATE, + MHI_DEV_MAX_STATE, + MHI_DEV_SYSERR_STATE = 0xff +}; + +/* MHI state change event */ +struct mhi_dev_event_ring_state_change { + uint64_t ptr; + uint32_t res1:24; + enum mhi_dev_state mhistate:8; + uint32_t res2:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t res3:8; +} __packed; + +enum mhi_dev_execenv { + MHI_DEV_SBL_EE = 1, + MHI_DEV_AMSS_EE = 2, + MHI_DEV_UNRESERVED +}; + +/* EE state change event */ +struct mhi_dev_event_ring_ee_state_change { + uint64_t ptr; + uint32_t res1:24; + enum mhi_dev_execenv execenv:8; + uint32_t res2:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t res3:8; +} __packed; + +/* Generic cmd to parse common details like type and channel id */ +struct mhi_dev_ring_generic { + uint64_t ptr; + uint32_t res1:24; + enum mhi_dev_state mhistate:8; + uint32_t res2:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +struct mhi_config { + uint32_t mhi_reg_len; + uint32_t version; + uint32_t event_rings; + uint32_t channels; + uint32_t chdb_offset; + uint32_t erdb_offset; +}; + +#define NUM_CHANNELS 128 +#define HW_CHANNEL_BASE 100 +#define HW_CHANNEL_END 107 +#define MHI_ENV_VALUE 2 +#define MHI_MASK_ROWS_CH_EV_DB 4 +#define TRB_MAX_DATA_SIZE 8192 +#define MHI_CTRL_STATE 25 +#define IPA_DMA_SYNC 1 +#define IPA_DMA_ASYNC 0 + +/*maximum trasnfer completion events buffer*/ +#define MAX_TR_EVENTS 50 +/*maximum event requests */ +#define MHI_MAX_EVT_REQ 50 + +/* Possible ring element types */ +union mhi_dev_ring_element_type { + struct mhi_dev_cmd_ring_op cmd_no_op; + struct mhi_dev_cmd_ring_reset_channel_cmd cmd_reset; + struct mhi_dev_cmd_ring_stop_channel_cmd cmd_stop; + struct mhi_dev_cmd_ring_start_channel_cmd cmd_start; + struct mhi_dev_transfer_ring_element tre; + struct mhi_dev_event_ring_transfer_completion evt_tr_comp; + struct mhi_dev_event_ring_cmd_completion evt_cmd_comp; + struct mhi_dev_event_ring_state_change evt_state_change; + struct mhi_dev_event_ring_ee_state_change evt_ee_state; + struct mhi_dev_ring_generic generic; +}; + +/* Transfer ring element type */ +union mhi_dev_ring_ctx { + struct mhi_dev_cmd_ctx cmd; + struct mhi_dev_ev_ctx ev; + struct mhi_dev_ch_ctx ch; + struct mhi_dev_gen_ctx generic; +}; + +/* MHI host Control and data address region */ +struct mhi_host_addr { + uint32_t ctrl_base_lsb; + uint32_t ctrl_base_msb; + uint32_t ctrl_limit_lsb; + uint32_t ctrl_limit_msb; + uint32_t data_base_lsb; + uint32_t data_base_msb; + uint32_t data_limit_lsb; + uint32_t data_limit_msb; +}; + +/* MHI physical and virtual address region */ +struct mhi_meminfo { + struct device *dev; + uintptr_t pa_aligned; + uintptr_t pa_unaligned; + uintptr_t va_aligned; + uintptr_t va_unaligned; + uintptr_t size; +}; + +struct mhi_addr { + uint64_t host_pa; + uintptr_t device_pa; + uintptr_t device_va; + size_t size; + dma_addr_t phy_addr; + void *virt_addr; + bool use_ipa_dma; +}; + +struct mhi_interrupt_state { + uint32_t mask; + uint32_t status; +}; + +enum mhi_dev_channel_state { + MHI_DEV_CH_UNINT, + MHI_DEV_CH_STARTED, + MHI_DEV_CH_PENDING_START, + MHI_DEV_CH_PENDING_STOP, + MHI_DEV_CH_STOPPED, + MHI_DEV_CH_CLOSED, +}; + +enum mhi_dev_ch_operation { + MHI_DEV_OPEN_CH, + MHI_DEV_CLOSE_CH, + MHI_DEV_READ_CH, + MHI_DEV_READ_WR, + MHI_DEV_POLL, +}; + +enum mhi_ctrl_info { + MHI_STATE_CONFIGURED = 0, + MHI_STATE_CONNECTED = 1, + MHI_STATE_DISCONNECTED = 2, + MHI_STATE_INVAL, +}; + +enum mhi_dev_tr_compl_evt_type { + SEND_EVENT_BUFFER, + SEND_EVENT_RD_OFFSET, +}; + +enum mhi_dev_transfer_type { + MHI_DEV_DMA_SYNC, + MHI_DEV_DMA_ASYNC, +}; + +#if 0 +/* SW channel client list */ +enum mhi_client_channel { + MHI_CLIENT_LOOPBACK_OUT = 0, + MHI_CLIENT_LOOPBACK_IN = 1, + MHI_CLIENT_SAHARA_OUT = 2, + MHI_CLIENT_SAHARA_IN = 3, + MHI_CLIENT_DIAG_OUT = 4, + MHI_CLIENT_DIAG_IN = 5, + MHI_CLIENT_SSR_OUT = 6, + MHI_CLIENT_SSR_IN = 7, + MHI_CLIENT_QDSS_OUT = 8, + MHI_CLIENT_QDSS_IN = 9, + MHI_CLIENT_EFS_OUT = 10, + MHI_CLIENT_EFS_IN = 11, + MHI_CLIENT_MBIM_OUT = 12, + MHI_CLIENT_MBIM_IN = 13, + MHI_CLIENT_QMI_OUT = 14, + MHI_CLIENT_QMI_IN = 15, + MHI_CLIENT_IP_CTRL_0_OUT = 16, + MHI_CLIENT_IP_CTRL_0_IN = 17, + MHI_CLIENT_IP_CTRL_1_OUT = 18, + MHI_CLIENT_IP_CTRL_1_IN = 19, + MHI_CLIENT_DCI_OUT = 20, + MHI_CLIENT_DCI_IN = 21, + MHI_CLIENT_IP_CTRL_3_OUT = 22, + MHI_CLIENT_IP_CTRL_3_IN = 23, + MHI_CLIENT_IP_CTRL_4_OUT = 24, + MHI_CLIENT_IP_CTRL_4_IN = 25, + MHI_CLIENT_IP_CTRL_5_OUT = 26, + MHI_CLIENT_IP_CTRL_5_IN = 27, + MHI_CLIENT_IP_CTRL_6_OUT = 28, + MHI_CLIENT_IP_CTRL_6_IN = 29, + MHI_CLIENT_IP_CTRL_7_OUT = 30, + MHI_CLIENT_IP_CTRL_7_IN = 31, + MHI_CLIENT_DUN_OUT = 32, + MHI_CLIENT_DUN_IN = 33, + MHI_CLIENT_IP_SW_0_OUT = 34, + MHI_CLIENT_IP_SW_0_IN = 35, + MHI_CLIENT_IP_SW_1_OUT = 36, + MHI_CLIENT_IP_SW_1_IN = 37, + MHI_CLIENT_IP_SW_2_OUT = 38, + MHI_CLIENT_IP_SW_2_IN = 39, + MHI_CLIENT_IP_SW_3_OUT = 40, + MHI_CLIENT_IP_SW_3_IN = 41, + MHI_CLIENT_CSVT_OUT = 42, + MHI_CLIENT_CSVT_IN = 43, + MHI_CLIENT_SMCT_OUT = 44, + MHI_CLIENT_SMCT_IN = 45, + MHI_CLIENT_IP_SW_4_OUT = 46, + MHI_CLIENT_IP_SW_4_IN = 47, + MHI_MAX_SOFTWARE_CHANNELS = 48, + MHI_CLIENT_TEST_OUT = 60, + MHI_CLIENT_TEST_IN = 61, + MHI_CLIENT_RESERVED_1_LOWER = 62, + MHI_CLIENT_RESERVED_1_UPPER = 99, + MHI_CLIENT_IP_HW_0_OUT = 100, + MHI_CLIENT_IP_HW_0_IN = 101, + MHI_CLIENT_RESERVED_2_LOWER = 102, + MHI_CLIENT_RESERVED_2_UPPER = 127, + MHI_MAX_CHANNELS = 102, +}; +#endif +#endif /* _SDX20_MHI_H_ */ diff --git a/package/wwan/driver/quectel_MHI/src/devices/Kconfig b/package/wwan/driver/quectel_MHI/src/devices/Kconfig new file mode 100644 index 000000000..d92e95b3f --- /dev/null +++ b/package/wwan/driver/quectel_MHI/src/devices/Kconfig @@ -0,0 +1,33 @@ +menu "MHI device support" + +config MHI_NETDEV + tristate "MHI NETDEV" + depends on MHI_BUS + help + MHI based net device driver for transferring IP traffic + between host and modem. By enabling this driver, clients + can transfer data using standard network interface. Over + the air traffic goes thru mhi netdev interface. + +config MHI_UCI + tristate "MHI UCI" + depends on MHI_BUS + help + MHI based uci driver is for transferring data between host and + modem using standard file operations from user space. Open, read, + write, ioctl, and close operations are supported by this driver. + Please check mhi_uci_match_table for all supported channels that + are exposed to userspace. + +config MHI_SATELLITE + tristate "MHI SATELLITE" + depends on MHI_BUS + help + MHI proxy satellite device driver enables NON-HLOS MHI satellite + drivers to communicate with device over PCIe link without host + involvement. Host facilitates propagation of events from device + to NON-HLOS MHI satellite drivers, channel states, and power + management over IPC communication. It helps in HLOS power + savings. + +endmenu diff --git a/package/wwan/driver/quectel_MHI/src/devices/Makefile b/package/wwan/driver/quectel_MHI/src/devices/Makefile new file mode 100644 index 000000000..e720069fd --- /dev/null +++ b/package/wwan/driver/quectel_MHI/src/devices/Makefile @@ -0,0 +1,3 @@ +obj-$(CONFIG_MHI_NETDEV) +=mhi_netdev.o +obj-$(CONFIG_MHI_UCI) +=mhi_uci.o +obj-$(CONFIG_MHI_SATELLITE) +=mhi_satellite.o diff --git a/package/wwan/driver/quectel_MHI/src/devices/mhi_netdev.c b/package/wwan/driver/quectel_MHI/src/devices/mhi_netdev.c new file mode 100644 index 000000000..ed7b24ba4 --- /dev/null +++ b/package/wwan/driver/quectel_MHI/src/devices/mhi_netdev.c @@ -0,0 +1,1063 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.*/ + +#include +#include +#include +#include +//#include +#include +#include +#include +//#include +#if 1 +static inline void *ipc_log_context_create(int max_num_pages, + const char *modname, uint16_t user_version) +{ return NULL; } +static inline int ipc_log_string(void *ilctxt, const char *fmt, ...) +{ return -EINVAL; } +#endif +#include +#include +#include +#include +#include +#include "../core/mhi.h" + +#define QUECTEL_NO_DTS + +extern void rmnet_data_init(struct net_device *real_dev, u32 nr_rmnet_devs); +extern void rmnet_data_deinit(struct net_device *real_dev, u32 nr_rmnet_devs); + +static const unsigned char node_id[ETH_ALEN] = {0x02, 0x50, 0xf4, 0x00, 0x00, 0x00}; +static const unsigned char default_modem_addr[ETH_ALEN] = {0x02, 0x50, 0xf3, 0x00, 0x00, 0x00}; +static void qmap_hex_dump(const char *tag, unsigned char *data, unsigned len) { + uint i; + uint *d = (uint *)data; + + printk(KERN_DEBUG "%s data=%p, len=%x\n", tag, data, len); + len = (len+3)/4; + for (i = 0; i < len; i+=4) { + printk(KERN_DEBUG "%08x %08x %08x %08x %08x\n", i*4, d[i+0], d[i+1], d[i+2], d[i+3]); + } +} + +#define MHI_NETDEV_DRIVER_NAME "mhi_netdev" +#define WATCHDOG_TIMEOUT (30 * HZ) +#define IPC_LOG_PAGES (100) +#define MAX_NETBUF_SIZE (128) + +#ifdef CONFIG_MHI_DEBUG + +#define IPC_LOG_LVL (MHI_MSG_LVL_VERBOSE) + +#define MHI_ASSERT(cond, msg) do { \ + if (cond) \ + panic(msg); \ +} while (0) + +#define MSG_VERB(fmt, ...) do { \ + if (mhi_netdev->msg_lvl <= MHI_MSG_LVL_VERBOSE) \ + pr_err("[D][%s] " fmt, __func__, ##__VA_ARGS__);\ + if (mhi_netdev->ipc_log && (mhi_netdev->ipc_log_lvl <= \ + MHI_MSG_LVL_VERBOSE)) \ + ipc_log_string(mhi_netdev->ipc_log, "[D][%s] " fmt, \ + __func__, ##__VA_ARGS__); \ +} while (0) + +#else + +#define IPC_LOG_LVL (MHI_MSG_LVL_ERROR) + +#define MHI_ASSERT(cond, msg) do { \ + if (cond) { \ + MSG_ERR(msg); \ + WARN_ON(cond); \ + } \ +} while (0) + +#define MSG_VERB(fmt, ...) + +#endif + +#define MSG_LOG(fmt, ...) do { \ + if (mhi_netdev->msg_lvl <= MHI_MSG_LVL_INFO) \ + pr_err("[I][%s] " fmt, __func__, ##__VA_ARGS__);\ + if (mhi_netdev->ipc_log && (mhi_netdev->ipc_log_lvl <= \ + MHI_MSG_LVL_INFO)) \ + ipc_log_string(mhi_netdev->ipc_log, "[I][%s] " fmt, \ + __func__, ##__VA_ARGS__); \ +} while (0) + +#define MSG_ERR(fmt, ...) do { \ + if (mhi_netdev->msg_lvl <= MHI_MSG_LVL_ERROR) \ + pr_err("[E][%s] " fmt, __func__, ##__VA_ARGS__); \ + if (mhi_netdev->ipc_log && (mhi_netdev->ipc_log_lvl <= \ + MHI_MSG_LVL_ERROR)) \ + ipc_log_string(mhi_netdev->ipc_log, "[E][%s] " fmt, \ + __func__, ##__VA_ARGS__); \ +} while (0) + +struct mhi_net_chain { + struct sk_buff *head, *tail; /* chained skb */ +}; + +struct mhi_netdev { + int alias; + struct mhi_device *mhi_dev; + struct mhi_netdev *rsc_dev; /* rsc linked node */ + bool is_rsc_dev; + int wake; + + u32 mru; + u32 order; + const char *interface_name; + struct napi_struct *napi; + struct net_device *ndev; + bool ethernet_interface; + + struct mhi_netbuf **netbuf_pool; + int pool_size; /* must be power of 2 */ + int current_index; + bool chain_skb; + struct mhi_net_chain *chain; + + struct dentry *dentry; + enum MHI_DEBUG_LEVEL msg_lvl; + enum MHI_DEBUG_LEVEL ipc_log_lvl; + void *ipc_log; + + //struct rmnet_port port; +}; + +struct mhi_netdev_priv { + struct mhi_netdev *mhi_netdev; +}; + +/* Try not to make this structure bigger than 128 bytes, since this take space + * in payload packet. + * Example: If MRU = 16K, effective MRU = 16K - sizeof(mhi_netbuf) + */ +struct mhi_netbuf { + struct mhi_buf mhi_buf; /* this must be first element */ + void (*unmap)(struct device *dev, dma_addr_t addr, size_t size, + enum dma_data_direction dir); +}; + +static struct mhi_driver mhi_netdev_driver; +static void mhi_netdev_create_debugfs(struct mhi_netdev *mhi_netdev); + +static __be16 mhi_netdev_ip_type_trans(u8 data) +{ + __be16 protocol = 0; + + /* determine L3 protocol */ + switch (data & 0xf0) { + case 0x40: + protocol = htons(ETH_P_IP); + break; + case 0x60: + protocol = htons(ETH_P_IPV6); + break; + default: + /* default is QMAP */ + protocol = htons(ETH_P_MAP); + break; + } + protocol = htons(ETH_P_MAP); //carl.yin fix set + return protocol; +} + +static struct mhi_netbuf *mhi_netdev_alloc(struct device *dev, + gfp_t gfp, + unsigned int order) +{ + struct page *page; + struct mhi_netbuf *netbuf; + struct mhi_buf *mhi_buf; + void *vaddr; + + page = __dev_alloc_pages(gfp, order); + if (!page) + return NULL; + + vaddr = page_address(page); + + /* we going to use the end of page to store cached data */ + netbuf = vaddr + (PAGE_SIZE << order) - sizeof(*netbuf); + + mhi_buf = (struct mhi_buf *)netbuf; + mhi_buf->page = page; + mhi_buf->buf = vaddr; + mhi_buf->len = (void *)netbuf - vaddr; + mhi_buf->dma_addr = dma_map_page(dev, page, 0, mhi_buf->len, + DMA_FROM_DEVICE); + if (dma_mapping_error(dev, mhi_buf->dma_addr)) { + __free_pages(mhi_buf->page, order); + return NULL; + } + + return netbuf; +} + +static void mhi_netdev_unmap_page(struct device *dev, + dma_addr_t dma_addr, + size_t len, + enum dma_data_direction dir) +{ + dma_unmap_page(dev, dma_addr, len, dir); +} + +static int mhi_netdev_tmp_alloc(struct mhi_netdev *mhi_netdev, int nr_tre) +{ + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + struct device *dev = mhi_dev->dev.parent; + const u32 order = mhi_netdev->order; + int i, ret; + + for (i = 0; i < nr_tre; i++) { + struct mhi_buf *mhi_buf; + struct mhi_netbuf *netbuf = mhi_netdev_alloc(dev, GFP_ATOMIC, + order); + if (!netbuf) + return -ENOMEM; + + mhi_buf = (struct mhi_buf *)netbuf; + netbuf->unmap = mhi_netdev_unmap_page; + + ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, mhi_buf, + mhi_buf->len, MHI_EOT); + if (unlikely(ret)) { + MSG_ERR("Failed to queue transfer, ret:%d\n", ret); + mhi_netdev_unmap_page(dev, mhi_buf->dma_addr, + mhi_buf->len, DMA_FROM_DEVICE); + __free_pages(mhi_buf->page, order); + return ret; + } + } + + return 0; +} + +static void mhi_netdev_queue(struct mhi_netdev *mhi_netdev) +{ + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + struct device *dev = mhi_dev->dev.parent; + struct mhi_netbuf *netbuf; + struct mhi_buf *mhi_buf; + struct mhi_netbuf **netbuf_pool = mhi_netdev->netbuf_pool; + int nr_tre = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE); + int i, peak, cur_index, ret; + const int pool_size = mhi_netdev->pool_size - 1, max_peak = 4; + + MSG_VERB("Enter free_desc:%d\n", nr_tre); + + if (!nr_tre) + return; + + /* try going thru reclaim pool first */ + for (i = 0; i < nr_tre; i++) { + /* peak for the next buffer, we going to peak several times, + * and we going to give up if buffers are not yet free + */ + cur_index = mhi_netdev->current_index; + netbuf = NULL; + for (peak = 0; peak < max_peak; peak++) { + struct mhi_netbuf *tmp = netbuf_pool[cur_index]; + + mhi_buf = &tmp->mhi_buf; + + cur_index = (cur_index + 1) & pool_size; + + /* page == 1 idle, buffer is free to reclaim */ + if (page_count(mhi_buf->page) == 1) { + netbuf = tmp; + break; + } + } + + /* could not find a free buffer */ + if (!netbuf) + break; + + /* increment reference count so when network stack is done + * with buffer, the buffer won't be freed + */ + get_page(mhi_buf->page); + dma_sync_single_for_device(dev, mhi_buf->dma_addr, mhi_buf->len, + DMA_FROM_DEVICE); + ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, mhi_buf, + mhi_buf->len, MHI_EOT); + if (unlikely(ret)) { + MSG_ERR("Failed to queue buffer, ret:%d\n", ret); + netbuf->unmap(dev, mhi_buf->dma_addr, mhi_buf->len, + DMA_FROM_DEVICE); + put_page(mhi_buf->page); + return; + } + mhi_netdev->current_index = cur_index; + } + + /* recyling did not work, buffers are still busy allocate temp pkts */ + if (i < nr_tre) + mhi_netdev_tmp_alloc(mhi_netdev, nr_tre - i); +} + +/* allocating pool of memory */ +static int mhi_netdev_alloc_pool(struct mhi_netdev *mhi_netdev) +{ + int i; + struct mhi_netbuf *netbuf, **netbuf_pool; + struct mhi_buf *mhi_buf; + const u32 order = mhi_netdev->order; + struct device *dev = mhi_netdev->mhi_dev->dev.parent; + + netbuf_pool = kmalloc_array(mhi_netdev->pool_size, sizeof(*netbuf_pool), + GFP_KERNEL); + if (!netbuf_pool) + return -ENOMEM; + + for (i = 0; i < mhi_netdev->pool_size; i++) { + /* allocate paged data */ + netbuf = mhi_netdev_alloc(dev, GFP_KERNEL, order); + if (!netbuf) + goto error_alloc_page; + + netbuf->unmap = dma_sync_single_for_cpu; + netbuf_pool[i] = netbuf; + } + + mhi_netdev->netbuf_pool = netbuf_pool; + + return 0; + +error_alloc_page: + for (--i; i >= 0; i--) { + netbuf = netbuf_pool[i]; + mhi_buf = &netbuf->mhi_buf; + dma_unmap_page(dev, mhi_buf->dma_addr, mhi_buf->len, + DMA_FROM_DEVICE); + __free_pages(mhi_buf->page, order); + } + + kfree(netbuf_pool); + + return -ENOMEM; +} + +static void mhi_netdev_free_pool(struct mhi_netdev *mhi_netdev) +{ + int i; + struct mhi_netbuf *netbuf, **netbuf_pool = mhi_netdev->netbuf_pool; + struct device *dev = mhi_netdev->mhi_dev->dev.parent; + struct mhi_buf *mhi_buf; + + for (i = 0; i < mhi_netdev->pool_size; i++) { + netbuf = netbuf_pool[i]; + mhi_buf = &netbuf->mhi_buf; + dma_unmap_page(dev, mhi_buf->dma_addr, mhi_buf->len, + DMA_FROM_DEVICE); + __free_pages(mhi_buf->page, mhi_netdev->order); + } + + kfree(mhi_netdev->netbuf_pool); + mhi_netdev->netbuf_pool = NULL; +} + +static int mhi_netdev_poll(struct napi_struct *napi, int budget) +{ + struct net_device *dev = napi->dev; + struct mhi_netdev_priv *mhi_netdev_priv = netdev_priv(dev); + struct mhi_netdev *mhi_netdev = mhi_netdev_priv->mhi_netdev; + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + struct mhi_netdev *rsc_dev = mhi_netdev->rsc_dev; + struct mhi_net_chain *chain = mhi_netdev->chain; + int rx_work = 0; + + MSG_VERB("Entered\n"); + + rx_work = mhi_poll(mhi_dev, budget); + + /* chained skb, push it to stack */ + if (chain && chain->head) { + netif_receive_skb(chain->head); + chain->head = NULL; + } + + if (rx_work < 0) { + MSG_ERR("Error polling ret:%d\n", rx_work); + napi_complete(napi); + return 0; + } + + /* queue new buffers */ + mhi_netdev_queue(mhi_netdev); + + if (rsc_dev) + mhi_netdev_queue(rsc_dev); + + /* complete work if # of packet processed less than allocated budget */ + if (rx_work < budget) + napi_complete(napi); + + MSG_VERB("polled %d pkts\n", rx_work); + + return rx_work; +} + +static int mhi_netdev_open(struct net_device *dev) +{ + struct mhi_netdev_priv *mhi_netdev_priv = netdev_priv(dev); + struct mhi_netdev *mhi_netdev = mhi_netdev_priv->mhi_netdev; + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + + MSG_LOG("Opened net dev interface\n"); + + /* tx queue may not necessarily be stopped already + * so stop the queue if tx path is not enabled + */ + if (!mhi_dev->ul_chan) + netif_stop_queue(dev); + else + netif_start_queue(dev); + + return 0; + +} + +static int mhi_netdev_change_mtu(struct net_device *dev, int new_mtu) +{ + struct mhi_netdev_priv *mhi_netdev_priv = netdev_priv(dev); + struct mhi_netdev *mhi_netdev = mhi_netdev_priv->mhi_netdev; + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + + if (new_mtu < 0 || mhi_dev->mtu < new_mtu) + return -EINVAL; + + dev->mtu = new_mtu; + return 0; +} + +static int mhi_netdev_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct mhi_netdev_priv *mhi_netdev_priv = netdev_priv(dev); + struct mhi_netdev *mhi_netdev = mhi_netdev_priv->mhi_netdev; + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + int res = 0; + + MSG_VERB("Entered\n"); + + //printk("%s skb=%p, len=%d, protocol=%x, hdr_len=%d\n", __func__, skb, skb->len, skb->protocol, skb->hdr_len); + //qmap_hex_dump(__func__, skb->data, 32); + if (skb->protocol != htons(ETH_P_MAP)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + res = mhi_queue_transfer(mhi_dev, DMA_TO_DEVICE, skb, skb->len, + MHI_EOT); + if (res) { + MSG_VERB("Failed to queue with reason:%d\n", res); + netif_stop_queue(dev); + res = NETDEV_TX_BUSY; + } + + MSG_VERB("Exited\n"); + + return res; +} + +#if 0 +static int mhi_netdev_ioctl_extended(struct net_device *dev, struct ifreq *ifr) +{ + struct rmnet_ioctl_extended_s ext_cmd; + int rc = 0; + struct mhi_netdev_priv *mhi_netdev_priv = netdev_priv(dev); + struct mhi_netdev *mhi_netdev = mhi_netdev_priv->mhi_netdev; + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + + rc = copy_from_user(&ext_cmd, ifr->ifr_ifru.ifru_data, + sizeof(struct rmnet_ioctl_extended_s)); + if (rc) + return rc; + + switch (ext_cmd.extended_ioctl) { + case RMNET_IOCTL_GET_SUPPORTED_FEATURES: + ext_cmd.u.data = 0; + break; + case RMNET_IOCTL_GET_DRIVER_NAME: + strlcpy(ext_cmd.u.if_name, mhi_netdev->interface_name, + sizeof(ext_cmd.u.if_name)); + break; + case RMNET_IOCTL_SET_SLEEP_STATE: + if (ext_cmd.u.data && mhi_netdev->wake) { + /* Request to enable LPM */ + MSG_VERB("Enable MHI LPM"); + mhi_netdev->wake--; + mhi_device_put(mhi_dev); + } else if (!ext_cmd.u.data && !mhi_netdev->wake) { + /* Request to disable LPM */ + MSG_VERB("Disable MHI LPM"); + mhi_netdev->wake++; + mhi_device_get(mhi_dev); + } + break; + default: + rc = -EINVAL; + break; + } + + rc = copy_to_user(ifr->ifr_ifru.ifru_data, &ext_cmd, + sizeof(struct rmnet_ioctl_extended_s)); + return rc; +} + +static int mhi_netdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + int rc = 0; + struct rmnet_ioctl_data_s ioctl_data; + + switch (cmd) { + case RMNET_IOCTL_SET_LLP_IP: /* set RAWIP protocol */ + break; + case RMNET_IOCTL_GET_LLP: /* get link protocol state */ + ioctl_data.u.operation_mode = RMNET_MODE_LLP_IP; + if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data, + sizeof(struct rmnet_ioctl_data_s))) + rc = -EFAULT; + break; + case RMNET_IOCTL_GET_OPMODE: /* get operation mode */ + ioctl_data.u.operation_mode = RMNET_MODE_LLP_IP; + if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data, + sizeof(struct rmnet_ioctl_data_s))) + rc = -EFAULT; + break; + case RMNET_IOCTL_SET_QOS_ENABLE: + rc = -EINVAL; + break; + case RMNET_IOCTL_SET_QOS_DISABLE: + rc = 0; + break; + case RMNET_IOCTL_OPEN: + case RMNET_IOCTL_CLOSE: + /* we just ignore them and return success */ + rc = 0; + break; + case RMNET_IOCTL_EXTENDED: + rc = mhi_netdev_ioctl_extended(dev, ifr); + break; + default: + /* don't fail any IOCTL right now */ + rc = 0; + break; + } + + return rc; +} +#endif + +static void mhi_netdev_get_drvinfo (struct net_device *ndev, struct ethtool_drvinfo *info) +{ + //struct mhi_netdev *mhi_netdev = ndev_to_mhi(ndev); + + strlcpy (info->driver, "pcie_mhi", sizeof info->driver); + strlcpy (info->version, PCIE_MHI_DRIVER_VERSION, sizeof info->version); +} + +static const struct ethtool_ops mhi_netdev_ethtool_ops = { + .get_drvinfo = mhi_netdev_get_drvinfo, +}; + +static const struct net_device_ops mhi_netdev_ops_ip = { + .ndo_open = mhi_netdev_open, + .ndo_start_xmit = mhi_netdev_xmit, + //.ndo_do_ioctl = mhi_netdev_ioctl, + .ndo_change_mtu = mhi_netdev_change_mtu, + .ndo_set_mac_address = 0, + .ndo_validate_addr = 0, +}; + +static void mhi_netdev_setup(struct net_device *dev) +{ + dev->netdev_ops = &mhi_netdev_ops_ip; + ether_setup(dev); + + /* set this after calling ether_setup */ + dev->header_ops = 0; /* No header */ + dev->type = ARPHRD_RAWIP; + dev->hard_header_len = 0; + dev->addr_len = 0; + dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST); + dev->watchdog_timeo = WATCHDOG_TIMEOUT; + + dev->ethtool_ops = &mhi_netdev_ethtool_ops; + memcpy (dev->dev_addr, node_id, sizeof node_id); + dev->flags |= IFF_NOARP; +} + +/* enable mhi_netdev netdev, call only after grabbing mhi_netdev.mutex */ +static int mhi_netdev_enable_iface(struct mhi_netdev *mhi_netdev) +{ + int ret = 0; + char ifalias[IFALIASZ]; + char ifname[IFNAMSIZ]; + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; +#ifndef QUECTEL_NO_DTS + struct device_node *of_node = mhi_dev->dev.of_node; +#endif + struct mhi_netdev_priv *mhi_netdev_priv; + + mhi_netdev->alias = 0;//of_alias_get_id(of_node, "mhi-netdev"); + if (mhi_netdev->alias < 0) + mhi_netdev->alias = 0; + +#ifdef QUECTEL_NO_DTS + mhi_netdev->interface_name = "rmnet_mhi"; +#else + + ret = of_property_read_string(of_node, "mhi,interface-name", + &mhi_netdev->interface_name); +#endif + if (ret) + mhi_netdev->interface_name = mhi_netdev_driver.driver.name; + + snprintf(ifalias, sizeof(ifalias), "%s_%04x_%02u.%02u.%02u_%u", + mhi_netdev->interface_name, mhi_dev->dev_id, mhi_dev->domain, + mhi_dev->bus, mhi_dev->slot, mhi_netdev->alias); + + snprintf(ifname, sizeof(ifname), "%s%%d", mhi_netdev->interface_name); + +#ifdef QUECTEL_NO_DTS + mhi_netdev->ethernet_interface = 0; +#else + mhi_netdev->ethernet_interface = of_property_read_bool(of_node, + "mhi,ethernet-interface"); +#endif + rtnl_lock(); + mhi_netdev->ndev = alloc_netdev(sizeof(*mhi_netdev_priv), + ifname, NET_NAME_PREDICTABLE, + mhi_netdev_setup); + if (!mhi_netdev->ndev) { + rtnl_unlock(); + return -ENOMEM; + } + + mhi_netdev->ndev->mtu = mhi_dev->mtu; + SET_NETDEV_DEV(mhi_netdev->ndev, &mhi_dev->dev); + //dev_set_alias(mhi_netdev->ndev, ifalias, strlen(ifalias)); + mhi_netdev_priv = netdev_priv(mhi_netdev->ndev); + mhi_netdev_priv->mhi_netdev = mhi_netdev; + rtnl_unlock(); + + mhi_netdev->napi = devm_kzalloc(&mhi_dev->dev, + sizeof(*mhi_netdev->napi), GFP_KERNEL); + if (!mhi_netdev->napi) { + ret = -ENOMEM; + goto napi_alloc_fail; + } + + netif_napi_add(mhi_netdev->ndev, mhi_netdev->napi, + mhi_netdev_poll, NAPI_POLL_WEIGHT); + ret = register_netdev(mhi_netdev->ndev); + if (ret) { + MSG_ERR("Network device registration failed\n"); + goto net_dev_reg_fail; + } + + napi_enable(mhi_netdev->napi); + + MSG_LOG("Exited.\n"); + + return 0; + +net_dev_reg_fail: + netif_napi_del(mhi_netdev->napi); + +napi_alloc_fail: + free_netdev(mhi_netdev->ndev); + mhi_netdev->ndev = NULL; + + return ret; +} + +static void mhi_netdev_xfer_ul_cb(struct mhi_device *mhi_dev, + struct mhi_result *mhi_result) +{ + struct mhi_netdev *mhi_netdev = mhi_device_get_devdata(mhi_dev); + struct sk_buff *skb = mhi_result->buf_addr; + struct net_device *ndev = mhi_netdev->ndev; + + ndev->stats.tx_packets++; + ndev->stats.tx_bytes += skb->len; + dev_kfree_skb(skb); + + if (netif_queue_stopped(ndev)) + netif_wake_queue(ndev); +} + +static void mhi_netdev_push_skb(struct mhi_netdev *mhi_netdev, + struct mhi_buf *mhi_buf, + struct mhi_result *mhi_result) +{ + struct sk_buff *skb; + + skb = alloc_skb(0, GFP_ATOMIC); + if (!skb) { + __free_pages(mhi_buf->page, mhi_netdev->order); + return; + } + + if (!mhi_netdev->ethernet_interface) { + skb_add_rx_frag(skb, 0, mhi_buf->page, 0, + mhi_result->bytes_xferd, mhi_netdev->mru); + skb->dev = mhi_netdev->ndev; + skb->protocol = mhi_netdev_ip_type_trans(*(u8 *)mhi_buf->buf); + if (skb_linearize(skb)) + return; + } else { + skb_add_rx_frag(skb, 0, mhi_buf->page, ETH_HLEN, + mhi_result->bytes_xferd - ETH_HLEN, + mhi_netdev->mru); + skb->dev = mhi_netdev->ndev; + skb->protocol = mhi_netdev_ip_type_trans(((u8 *)mhi_buf->buf)[ETH_HLEN]); + } + netif_receive_skb(skb); +} + +static void mhi_netdev_xfer_dl_cb(struct mhi_device *mhi_dev, + struct mhi_result *mhi_result) +{ + struct mhi_netdev *mhi_netdev = mhi_device_get_devdata(mhi_dev); + struct mhi_netbuf *netbuf = mhi_result->buf_addr; + struct mhi_buf *mhi_buf = &netbuf->mhi_buf; + struct sk_buff *skb; + struct net_device *ndev = mhi_netdev->ndev; + struct device *dev = mhi_dev->dev.parent; + struct mhi_net_chain *chain = mhi_netdev->chain; + + netbuf->unmap(dev, mhi_buf->dma_addr, mhi_buf->len, DMA_FROM_DEVICE); + + /* modem is down, drop the buffer */ + if (mhi_result->transaction_status == -ENOTCONN) { + __free_pages(mhi_buf->page, mhi_netdev->order); + return; + } + + ndev->stats.rx_packets++; + ndev->stats.rx_bytes += mhi_result->bytes_xferd; + + if (unlikely(!chain)) { + mhi_netdev_push_skb(mhi_netdev, mhi_buf, mhi_result); + return; + } + + /* we support chaining */ + skb = alloc_skb(0, GFP_ATOMIC); + if (likely(skb)) { + if (!mhi_netdev->ethernet_interface) { + skb_add_rx_frag(skb, 0, mhi_buf->page, 0, + mhi_result->bytes_xferd, mhi_netdev->mru); + } else { + skb_add_rx_frag(skb, 0, mhi_buf->page, ETH_HLEN, + mhi_result->bytes_xferd - ETH_HLEN, + mhi_netdev->mru); + } + + /* this is first on list */ + if (!chain->head) { + skb->dev = ndev; + if (!mhi_netdev->ethernet_interface) { + skb->protocol = + mhi_netdev_ip_type_trans(*(u8 *)mhi_buf->buf); + } else { + skb->protocol = + mhi_netdev_ip_type_trans(((u8 *)mhi_buf->buf)[ETH_HLEN]); + } + chain->head = skb; + } else { + skb_shinfo(chain->tail)->frag_list = skb; + } + + chain->tail = skb; + } else { + __free_pages(mhi_buf->page, mhi_netdev->order); + } +} + +static void mhi_netdev_status_cb(struct mhi_device *mhi_dev, enum MHI_CB mhi_cb) +{ + struct mhi_netdev *mhi_netdev = mhi_device_get_devdata(mhi_dev); + + if (mhi_cb != MHI_CB_PENDING_DATA) + return; + + napi_schedule(mhi_netdev->napi); +} + +#ifdef CONFIG_DEBUG_FS + +struct dentry *dentry; + +static void mhi_netdev_create_debugfs(struct mhi_netdev *mhi_netdev) +{ + char node_name[32]; + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + + /* Both tx & rx client handle contain same device info */ + snprintf(node_name, sizeof(node_name), "%s_%04x_%02u.%02u.%02u_%u", + mhi_netdev->interface_name, mhi_dev->dev_id, mhi_dev->domain, + mhi_dev->bus, mhi_dev->slot, mhi_netdev->alias); + + if (IS_ERR_OR_NULL(dentry)) + return; + + mhi_netdev->dentry = debugfs_create_dir(node_name, dentry); + if (IS_ERR_OR_NULL(mhi_netdev->dentry)) + return; +} + +static void mhi_netdev_create_debugfs_dir(void) +{ + dentry = debugfs_create_dir(MHI_NETDEV_DRIVER_NAME, 0); +} + +#else + +static void mhi_netdev_create_debugfs(struct mhi_netdev_private *mhi_netdev) +{ +} + +static void mhi_netdev_create_debugfs_dir(void) +{ +} + +#endif + +static void mhi_netdev_remove(struct mhi_device *mhi_dev) +{ + struct mhi_netdev *mhi_netdev = mhi_device_get_devdata(mhi_dev); + + MSG_LOG("Remove notification received\n"); + + /* rsc parent takes cares of the cleanup */ + if (mhi_netdev->is_rsc_dev) { + mhi_netdev_free_pool(mhi_netdev); + return; + } + + rmnet_data_deinit(mhi_netdev->ndev, 1); + netif_stop_queue(mhi_netdev->ndev); + napi_disable(mhi_netdev->napi); + unregister_netdev(mhi_netdev->ndev); + netif_napi_del(mhi_netdev->napi); + free_netdev(mhi_netdev->ndev); + mhi_netdev_free_pool(mhi_netdev); + + if (!IS_ERR_OR_NULL(mhi_netdev->dentry)) + debugfs_remove_recursive(mhi_netdev->dentry); +} + +static int mhi_netdev_match(struct device *dev, void *data) +{ + /* if phandle dt == device dt, we found a match */ + return (dev->of_node == data); +} + +static void mhi_netdev_clone_dev(struct mhi_netdev *mhi_netdev, + struct mhi_netdev *parent) +{ + mhi_netdev->ndev = parent->ndev; + mhi_netdev->napi = parent->napi; + mhi_netdev->ipc_log = parent->ipc_log; + mhi_netdev->msg_lvl = parent->msg_lvl; + mhi_netdev->ipc_log_lvl = parent->ipc_log_lvl; + mhi_netdev->is_rsc_dev = true; + mhi_netdev->chain = parent->chain; +} + +static int mhi_netdev_probe(struct mhi_device *mhi_dev, + const struct mhi_device_id *id) +{ + int ret; + struct mhi_netdev *mhi_netdev, *p_netdev = NULL; +#ifndef QUECTEL_NO_DTS + struct device_node *of_node = mhi_dev->dev.of_node; +#endif + int nr_tre; + char node_name[32]; + struct device_node *phandle; + bool no_chain; + +#ifndef QUECTEL_NO_DTS + if (!of_node) + return -ENODEV; +#endif + + mhi_netdev = devm_kzalloc(&mhi_dev->dev, sizeof(*mhi_netdev), + GFP_KERNEL); + if (!mhi_netdev) + return -ENOMEM; + + mhi_netdev->mhi_dev = mhi_dev; + mhi_device_set_devdata(mhi_dev, mhi_netdev); + +#ifdef QUECTEL_NO_DTS + mhi_netdev->mru = 16384; + ret = 0; +#else + ret = of_property_read_u32(of_node, "mhi,mru", &mhi_netdev->mru); +#endif + if (ret) + return -ENODEV; + + /* MRU must be multiplication of page size */ + mhi_netdev->order = __ilog2_u32(mhi_netdev->mru / PAGE_SIZE); + if ((PAGE_SIZE << mhi_netdev->order) < mhi_netdev->mru) + return -EINVAL; + + /* check if this device shared by a parent device */ +#ifdef QUECTEL_NO_DTS + phandle = NULL; +#else + phandle = of_parse_phandle(of_node, "mhi,rsc-parent", 0); +#endif + if (phandle) { + struct device *dev; + struct mhi_device *pdev; + /* find the parent device */ + dev = driver_find_device(mhi_dev->dev.driver, NULL, phandle, + mhi_netdev_match); + if (!dev) + return -ENODEV; + + /* this device is shared with parent device. so we won't be + * creating a new network interface. Clone parent + * information to child node + */ + pdev = to_mhi_device(dev); + p_netdev = mhi_device_get_devdata(pdev); + mhi_netdev_clone_dev(mhi_netdev, p_netdev); + put_device(dev); + } else { + mhi_netdev->msg_lvl = MHI_MSG_LVL_ERROR; +#ifdef QUECTEL_NO_DTS + no_chain = 0; +#else + no_chain = of_property_read_bool(of_node, + "mhi,disable-chain-skb"); +#endif + if (!no_chain) { + mhi_netdev->chain = devm_kzalloc(&mhi_dev->dev, + sizeof(*mhi_netdev->chain), + GFP_KERNEL); + if (!mhi_netdev->chain) + return -ENOMEM; + } + + ret = mhi_netdev_enable_iface(mhi_netdev); + if (ret) + return ret; + + /* create ipc log buffer */ + snprintf(node_name, sizeof(node_name), + "%s_%04x_%02u.%02u.%02u_%u", + mhi_netdev->interface_name, mhi_dev->dev_id, + mhi_dev->domain, mhi_dev->bus, mhi_dev->slot, + mhi_netdev->alias); + mhi_netdev->ipc_log = ipc_log_context_create(IPC_LOG_PAGES, + node_name, 0); + mhi_netdev->ipc_log_lvl = IPC_LOG_LVL; + + mhi_netdev_create_debugfs(mhi_netdev); + } + + /* move mhi channels to start state */ + ret = mhi_prepare_for_transfer(mhi_dev); + if (ret) { + MSG_ERR("Failed to start channels ret %d\n", ret); + goto error_start; + } + + rmnet_data_init(mhi_netdev->ndev, 1); + + /* setup pool size ~2x ring length*/ + nr_tre = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE); + mhi_netdev->pool_size = 1 << __ilog2_u32(nr_tre); + if (nr_tre > mhi_netdev->pool_size) + mhi_netdev->pool_size <<= 1; + mhi_netdev->pool_size <<= 1; + + /* allocate memory pool */ + ret = mhi_netdev_alloc_pool(mhi_netdev); + if (ret) + goto error_start; + + /* link child node with parent node if it's children dev */ + if (p_netdev) + p_netdev->rsc_dev = mhi_netdev; + + /* now we have a pool of buffers allocated, queue to hardware + * by triggering a napi_poll + */ + napi_schedule(mhi_netdev->napi); + + return 0; + +error_start: + if (phandle) + return ret; + + netif_stop_queue(mhi_netdev->ndev); + napi_disable(mhi_netdev->napi); + unregister_netdev(mhi_netdev->ndev); + netif_napi_del(mhi_netdev->napi); + free_netdev(mhi_netdev->ndev); + + return ret; +} + +static const struct mhi_device_id mhi_netdev_match_table[] = { + { .chan = "IP_HW0" }, + { .chan = "IP_HW_ADPL" }, + { .chan = "IP_HW0_RSC" }, + { .chan = "IP_SW0" }, + {}, +}; + +static struct mhi_driver mhi_netdev_driver = { + .id_table = mhi_netdev_match_table, + .probe = mhi_netdev_probe, + .remove = mhi_netdev_remove, + .ul_xfer_cb = mhi_netdev_xfer_ul_cb, + .dl_xfer_cb = mhi_netdev_xfer_dl_cb, + .status_cb = mhi_netdev_status_cb, + .driver = { + .name = "mhi_netdev", + .owner = THIS_MODULE, + } +}; + +static int __init mhi_netdev_init(void) +{ + BUILD_BUG_ON(sizeof(struct mhi_netbuf) > MAX_NETBUF_SIZE); + mhi_netdev_create_debugfs_dir(); + + return mhi_driver_register(&mhi_netdev_driver); +} +//module_init(mhi_netdev_init); + +int __init mhi_device_netdev_init(struct dentry *parent) +{ + BUILD_BUG_ON(sizeof(struct mhi_netbuf) > MAX_NETBUF_SIZE); + + return mhi_netdev_init(); +} + +void mhi_device_netdev_exit(void) +{ +#ifdef CONFIG_DEBUG_FS + debugfs_remove_recursive(dentry); +#endif + mhi_driver_unregister(&mhi_netdev_driver); +} + diff --git a/package/wwan/driver/quectel_MHI/src/devices/mhi_netdev_quectel.c b/package/wwan/driver/quectel_MHI/src/devices/mhi_netdev_quectel.c new file mode 100644 index 000000000..dbf3d6d87 --- /dev/null +++ b/package/wwan/driver/quectel_MHI/src/devices/mhi_netdev_quectel.c @@ -0,0 +1,2865 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../core/mhi.h" + +//#define MHI_NETDEV_ONE_CARD_MODE + +#ifndef ETH_P_MAP +#define ETH_P_MAP 0xDA1A +#endif + +#if (ETH_P_MAP == 0x00F9) +#undef ETH_P_MAP +#define ETH_P_MAP 0xDA1A +#endif + +#ifndef ARPHRD_RAWIP +#define ARPHRD_RAWIP ARPHRD_NONE +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION( 4,2,0 )) +static bool netdev_is_rx_handler_busy(struct net_device *dev) +{ + ASSERT_RTNL(); + return dev && rtnl_dereference(dev->rx_handler); +} +#endif + +struct rmnet_nss_cb { + int (*nss_create)(struct net_device *dev); + int (*nss_free)(struct net_device *dev); + int (*nss_tx)(struct sk_buff *skb); +}; +static struct rmnet_nss_cb __read_mostly *nss_cb = NULL; +#if defined(CONFIG_PINCTRL_IPQ807x) || defined(CONFIG_PINCTRL_IPQ5018) +#ifdef CONFIG_RMNET_DATA +#define CONFIG_QCA_NSS_DRV +/* define at qsdk/qca/src/linux-4.4/net/rmnet_data/rmnet_data_main.c */ +/* set at qsdk/qca/src/data-kernel/drivers/rmnet-nss/rmnet_nss.c */ +extern struct rmnet_nss_cb *rmnet_nss_callbacks __rcu __read_mostly; +#endif +#endif + +static const unsigned char node_id[ETH_ALEN] = {0x02, 0x50, 0xf4, 0x00, 0x00, 0x00}; +static const unsigned char default_modem_addr[ETH_ALEN] = {0x02, 0x50, 0xf3, 0x00, 0x00, 0x00}; + +#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) +#define QUECTEL_BRIDGE_MODE +#endif + +#define QUECTEL_RMNET_MODE + +#ifdef QUECTEL_BRIDGE_MODE +static uint __read_mostly bridge_mode = 0/*|BIT(1)*/; +module_param( bridge_mode, uint, S_IRUGO ); +#endif + +struct qmap_hdr { + u8 cd_rsvd_pad; + u8 mux_id; + u16 pkt_len; +} __packed; +#define QUECTEL_QMAP_MUX_ID 0x81 + +enum rmnet_map_v5_header_type { + RMNET_MAP_HEADER_TYPE_UNKNOWN, + RMNET_MAP_HEADER_TYPE_COALESCING = 0x1, + RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD = 0x2, + RMNET_MAP_HEADER_TYPE_ENUM_LENGTH +}; + +enum rmnet_map_commands { + RMNET_MAP_COMMAND_NONE, + RMNET_MAP_COMMAND_FLOW_DISABLE, + RMNET_MAP_COMMAND_FLOW_ENABLE, + RMNET_MAP_COMMAND_FLOW_START = 7, + RMNET_MAP_COMMAND_FLOW_END = 8, + /* These should always be the last 2 elements */ + RMNET_MAP_COMMAND_UNKNOWN, + RMNET_MAP_COMMAND_ENUM_LENGTH +}; + +#define RMNET_MAP_COMMAND_REQUEST 0 +#define RMNET_MAP_COMMAND_ACK 1 +#define RMNET_MAP_COMMAND_UNSUPPORTED 2 +#define RMNET_MAP_COMMAND_INVALID 3 + +/* Main QMAP header */ +struct rmnet_map_header { + u8 pad_len:6; + u8 next_hdr:1; + u8 cd_bit:1; + u8 mux_id; + __be16 pkt_len; +} __aligned(1); + +/* QMAP v5 headers */ +struct rmnet_map_v5_csum_header { + u8 next_hdr:1; + u8 header_type:7; + u8 hw_reserved:7; + u8 csum_valid_required:1; + __be16 reserved; +} __aligned(1); + +struct rmnet_map_control_command { + u8 command_name; + u8 cmd_type:2; + u8 reserved:6; + u16 reserved2; + u32 transaction_id; + union { + struct { + u8 reserved2; + u8 ip_family:2; + u8 reserved:6; + __be16 flow_control_seq_num; + __be32 qos_id; + } flow_control; + u8 data[0]; + }; +} __aligned(1); + +struct mhi_mbim_hdr { + struct usb_cdc_ncm_nth16 nth16; + struct usb_cdc_ncm_ndp16 ndp16; + struct usb_cdc_ncm_dpe16 dpe16[2]; +} __attribute__ ((packed)); + +#define QCUSB_MRECEIVE_MAX_BUFFER_SIZE (1024*32) //maybe 31KB is enough +#define QCUSB_MTRANSMIT_MAX_BUFFER_SIZE (1024*16) +#define NTB_OUT_MAX_DATAGRAMS 16 + +static const struct usb_cdc_ncm_ntb_parameters ncmNTBParams = { + .bmNtbFormatsSupported = USB_CDC_NCM_NTB16_SUPPORTED, + .dwNtbInMaxSize = QCUSB_MRECEIVE_MAX_BUFFER_SIZE, + .wNdpInDivisor = 0x04, + .wNdpInPayloadRemainder = 0x0, + .wNdpInAlignment = 0x4, + + .dwNtbOutMaxSize = QCUSB_MTRANSMIT_MAX_BUFFER_SIZE, + .wNdpOutDivisor = 0x04, + .wNdpOutPayloadRemainder = 0x0, + .wNdpOutAlignment = 0x4, + .wNtbOutMaxDatagrams = NTB_OUT_MAX_DATAGRAMS, +}; + +#if 0 +static void qmap_hex_dump(const char *tag, unsigned char *data, unsigned len) { + uint i; + uint *d = (uint *)data; + + printk(KERN_DEBUG "%s data=%p, len=%x\n", tag, data, len); + len = (len+3)/4; + for (i = 0; i < len; i+=4) { + printk(KERN_DEBUG "%08x %08x %08x %08x %08x\n", i*4, d[i+0], d[i+1], d[i+2], d[i+3]); + } +} +#else +static void qmap_hex_dump(const char *tag, unsigned char *data, unsigned len) { +} +#endif + +static uint __read_mostly mhi_mbim_enabled = 0; +module_param(mhi_mbim_enabled, uint, S_IRUGO); +int mhi_netdev_mbin_enabled(void) { return mhi_mbim_enabled; } + +static uint __read_mostly qmap_mode = 1; +module_param(qmap_mode, uint, S_IRUGO); + +static uint __read_mostly poll_weight = NAPI_POLL_WEIGHT; +module_param(poll_weight, uint, S_IRUGO); + +#define MHI_NETDEV_DRIVER_NAME "mhi_netdev" +#define WATCHDOG_TIMEOUT (30 * HZ) + +#define MSG_VERB(fmt, ...) do { \ + if (mhi_netdev->msg_lvl <= MHI_MSG_LVL_VERBOSE) \ + pr_err("[D][%s] " fmt, __func__, ##__VA_ARGS__);\ +} while (0) + +#define MHI_ASSERT(cond, msg) do { \ + if (cond) { \ + MSG_ERR(msg); \ + WARN_ON(cond); \ + } \ +} while (0) + +#define MSG_LOG(fmt, ...) do { \ + if (mhi_netdev->msg_lvl <= MHI_MSG_LVL_INFO) \ + pr_err("[I][%s] " fmt, __func__, ##__VA_ARGS__);\ +} while (0) + +#define MSG_ERR(fmt, ...) do { \ + if (mhi_netdev->msg_lvl <= MHI_MSG_LVL_ERROR) \ + pr_err("[E][%s] " fmt, __func__, ##__VA_ARGS__); \ +} while (0) + +struct mhi_stats { + u32 rx_int; + u32 tx_full; + u32 tx_pkts; + u32 rx_budget_overflow; + u32 tx_allocated; + u32 rx_allocated; + u32 alloc_failed; +}; + +/* important: do not exceed sk_buf->cb (48 bytes) */ +struct mhi_skb_priv { + void *buf; + size_t size; + struct mhi_netdev *bind_netdev; +}; + +struct skb_data { /* skb->cb is one of these */ + struct mhi_netdev *bind_netdev; + unsigned int length; + unsigned int packets; +}; + +#define MHI_NETDEV_STATUS64 1 + +typedef struct { + uint size; + uint rx_urb_size; + uint ep_type; + uint iface_id; + uint MuxId; + uint ul_data_aggregation_max_datagrams; //0x17 + uint ul_data_aggregation_max_size ;//0x18 + uint dl_minimum_padding; //0x1A +} QMAP_SETTING; + +typedef struct { + unsigned int size; + unsigned int rx_urb_size; + unsigned int ep_type; + unsigned int iface_id; + unsigned int qmap_mode; + unsigned int qmap_version; + unsigned int dl_minimum_padding; + char ifname[8][16]; + unsigned char mux_id[8]; +} RMNET_INFO; + +typedef struct { + u16 tx_seq; + u16 rx_seq; + u32 rx_max; +} MHI_MBIM_CTX; + +enum mhi_net_type { + MHI_NET_UNKNOW, + MHI_NET_RMNET, + MHI_NET_MBIM, + MHI_NET_ETHER +}; + +//#define TS_DEBUG +struct mhi_netdev { + int alias; + struct mhi_device *mhi_dev; + spinlock_t rx_lock; + bool enabled; + rwlock_t pm_lock; /* state change lock */ + int (*rx_queue)(struct mhi_netdev *mhi_netdev, gfp_t gfp_t); + struct delayed_work alloc_work; + int wake; + + struct sk_buff_head tx_allocated; + struct sk_buff_head rx_allocated; + struct sk_buff_head qmap_chain; + struct sk_buff_head skb_chain; +#ifdef TS_DEBUG + uint clear_ts; + struct timespec diff_ts; + struct timespec qmap_ts; + struct timespec skb_ts; +#endif + + MHI_MBIM_CTX mbim_ctx; + + u32 mru; + const char *interface_name; + struct napi_struct napi; + struct net_device *ndev; + enum mhi_net_type net_type; + struct sk_buff *frag_skb; + bool recycle_buf; + +#if defined(MHI_NETDEV_STATUS64) + struct pcpu_sw_netstats __percpu *stats64; +#endif + struct mhi_stats stats; + + struct dentry *dentry; + enum MHI_DEBUG_LEVEL msg_lvl; + + struct net_device *mpQmapNetDev[8]; + u32 qmap_mode; + u32 qmap_version; // 5 ~ QMAP V1, 9 ~ QMAP V5 + u32 qmap_size; + u32 link_state; + u32 flow_control; + u32 dl_minimum_padding; + +#ifdef QUECTEL_BRIDGE_MODE + uint bridge_mode; + uint bridge_ipv4; + unsigned char bridge_mac[ETH_ALEN]; +#endif + uint use_rmnet_usb; + RMNET_INFO rmnet_info; + +#if defined(CONFIG_PINCTRL_IPQ5018) + u64 first_jiffy; + u64 bytes_received_1; + u64 bytes_received_2; + u32 cntfrq_per_msec; + bool mhi_rate_control; +#endif + + u32 rmnet_map_command_stats[RMNET_MAP_COMMAND_ENUM_LENGTH]; +}; + +struct mhi_netdev_priv { + struct mhi_netdev *mhi_netdev; +}; + +struct qmap_priv { + void *pQmapDev; + struct net_device *real_dev; + struct net_device *self_dev; + u8 offset_id; + u8 mux_id; + u8 qmap_version; // 5~v1, 9~v5 + +#if defined(MHI_NETDEV_STATUS64) + struct pcpu_sw_netstats __percpu *stats64; +#endif + + spinlock_t agg_lock; + struct sk_buff *agg_skb; + unsigned agg_count; + struct timespec64 agg_time; + struct hrtimer agg_hrtimer; + struct work_struct agg_wq; + +#ifdef QUECTEL_BRIDGE_MODE + uint bridge_mode; + uint bridge_ipv4; + unsigned char bridge_mac[ETH_ALEN]; +#endif + uint use_qca_nss; +}; + +static struct mhi_netdev *ndev_to_mhi(struct net_device *ndev) { + struct mhi_netdev_priv *mhi_netdev_priv = netdev_priv(ndev); + struct mhi_netdev *mhi_netdev = mhi_netdev_priv->mhi_netdev; + return mhi_netdev; +} + +static struct mhi_driver mhi_netdev_driver; +static void mhi_netdev_create_debugfs(struct mhi_netdev *mhi_netdev); + +#if 0 +static void mhi_netdev_skb_destructor(struct sk_buff *skb) +{ + struct mhi_skb_priv *skb_priv = (struct mhi_skb_priv *)(skb->cb); + struct mhi_netdev *mhi_netdev = skb_priv->mhi_netdev; + + skb->data = skb->head; + skb_reset_tail_pointer(skb); + skb->len = 0; + MHI_ASSERT(skb->data != skb_priv->buf, "incorrect buf"); + skb_queue_tail(&mhi_netdev->rx_allocated, skb); +} +#endif + +#ifdef QUECTEL_BRIDGE_MODE +static const struct net_device_ops mhi_netdev_ops_ip; +static const struct net_device_ops rmnet_vnd_ops; + +static int is_qmap_netdev(const struct net_device *ndev) { + return ndev->netdev_ops == &rmnet_vnd_ops; +} + +static int bridge_arp_reply(struct net_device *net, struct sk_buff *skb, uint bridge_ipv4) { + struct arphdr *parp; + u8 *arpptr, *sha; + u8 sip[4], tip[4], ipv4[4]; + struct sk_buff *reply = NULL; + + ipv4[0] = (bridge_ipv4 >> 24) & 0xFF; + ipv4[1] = (bridge_ipv4 >> 16) & 0xFF; + ipv4[2] = (bridge_ipv4 >> 8) & 0xFF; + ipv4[3] = (bridge_ipv4 >> 0) & 0xFF; + + parp = arp_hdr(skb); + + if (parp->ar_hrd == htons(ARPHRD_ETHER) && parp->ar_pro == htons(ETH_P_IP) + && parp->ar_op == htons(ARPOP_REQUEST) && parp->ar_hln == 6 && parp->ar_pln == 4) { + arpptr = (u8 *)parp + sizeof(struct arphdr); + sha = arpptr; + arpptr += net->addr_len; /* sha */ + memcpy(sip, arpptr, sizeof(sip)); + arpptr += sizeof(sip); + arpptr += net->addr_len; /* tha */ + memcpy(tip, arpptr, sizeof(tip)); + + pr_info("%s sip = %d.%d.%d.%d, tip=%d.%d.%d.%d, ipv4=%d.%d.%d.%d\n", netdev_name(net), + sip[0], sip[1], sip[2], sip[3], tip[0], tip[1], tip[2], tip[3], ipv4[0], ipv4[1], ipv4[2], ipv4[3]); + //wwan0 sip = 10.151.137.255, tip=10.151.138.0, ipv4=10.151.137.255 + if (tip[0] == ipv4[0] && tip[1] == ipv4[1] && (tip[2]&0xFC) == (ipv4[2]&0xFC) && tip[3] != ipv4[3]) + reply = arp_create(ARPOP_REPLY, ETH_P_ARP, *((__be32 *)sip), net, *((__be32 *)tip), sha, default_modem_addr, sha); + + if (reply) { + skb_reset_mac_header(reply); + __skb_pull(reply, skb_network_offset(reply)); + reply->ip_summed = CHECKSUM_UNNECESSARY; + reply->pkt_type = PACKET_HOST; + + netif_rx_ni(reply); + } + return 1; + } + + return 0; +} + +static struct sk_buff *bridge_mode_tx_fixup(struct net_device *net, struct sk_buff *skb, uint bridge_ipv4, unsigned char *bridge_mac) { + struct ethhdr *ehdr; + const struct iphdr *iph; + + skb_reset_mac_header(skb); + ehdr = eth_hdr(skb); + + if (ehdr->h_proto == htons(ETH_P_ARP)) { + if (bridge_ipv4) + bridge_arp_reply(net, skb, bridge_ipv4); + return NULL; + } + + iph = ip_hdr(skb); + //DBG("iphdr: "); + //PrintHex((void *)iph, sizeof(struct iphdr)); + +// 1 0.000000000 0.0.0.0 255.255.255.255 DHCP 362 DHCP Request - Transaction ID 0xe7643ad7 + if (ehdr->h_proto == htons(ETH_P_IP) && iph->protocol == IPPROTO_UDP && iph->saddr == 0x00000000 && iph->daddr == 0xFFFFFFFF) { + //if (udp_hdr(skb)->dest == htons(67)) //DHCP Request + { + memcpy(bridge_mac, ehdr->h_source, ETH_ALEN); + pr_info("%s PC Mac Address: %02x:%02x:%02x:%02x:%02x:%02x\n", netdev_name(net), + bridge_mac[0], bridge_mac[1], bridge_mac[2], bridge_mac[3], bridge_mac[4], bridge_mac[5]); + } + } + + if (memcmp(ehdr->h_source, bridge_mac, ETH_ALEN)) { + return NULL; + } + + return skb; +} + +static void bridge_mode_rx_fixup(struct mhi_netdev *mhi_netdev, struct net_device *net, struct sk_buff *skb) { + uint bridge_mode = 0; + unsigned char *bridge_mac; + + if (mhi_netdev->qmap_mode > 0) { + struct qmap_priv *priv = netdev_priv(net); + bridge_mode = priv->bridge_mode; + bridge_mac = priv->bridge_mac; + } + else { + bridge_mode = mhi_netdev->bridge_mode; + bridge_mac = mhi_netdev->bridge_mac; + } + + if (bridge_mode) + memcpy(eth_hdr(skb)->h_dest, bridge_mac, ETH_ALEN); +} + +static ssize_t bridge_mode_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct net_device *ndev = to_net_dev(dev); + uint bridge_mode = 0; + + if (is_qmap_netdev(ndev)) { + struct qmap_priv *priv = netdev_priv(ndev); + bridge_mode = priv->bridge_mode; + } + else { + struct mhi_netdev *mhi_netdev = ndev_to_mhi(ndev); + bridge_mode = mhi_netdev->bridge_mode; + } + + return snprintf(buf, PAGE_SIZE, "%u\n", bridge_mode); +} + +static ssize_t bridge_mode_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { + struct net_device *ndev = to_net_dev(dev); + uint bridge_mode = simple_strtoul(buf, NULL, 0); + + if (ndev->type != ARPHRD_ETHER) { + if (bridge_mode) + netdev_err(ndev, "netdevice is not ARPHRD_ETHER\n"); + return count; + } + + if (is_qmap_netdev(ndev)) { + struct qmap_priv *priv = netdev_priv(ndev); + priv->bridge_mode = bridge_mode; + } + else { + struct mhi_netdev *mhi_netdev = ndev_to_mhi(ndev); + mhi_netdev->bridge_mode = bridge_mode; + } + + return count; +} + + +static ssize_t bridge_ipv4_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct net_device *ndev = to_net_dev(dev); + unsigned int bridge_ipv4 = 0; + unsigned char ipv4[4]; + + if (is_qmap_netdev(ndev)) { + struct qmap_priv *priv = netdev_priv(ndev); + bridge_ipv4 = priv->bridge_ipv4; + } + else { + struct mhi_netdev *mhi_netdev = ndev_to_mhi(ndev); + bridge_ipv4 = mhi_netdev->bridge_ipv4; + } + + ipv4[0] = (bridge_ipv4 >> 24) & 0xFF; + ipv4[1] = (bridge_ipv4 >> 16) & 0xFF; + ipv4[2] = (bridge_ipv4 >> 8) & 0xFF; + ipv4[3] = (bridge_ipv4 >> 0) & 0xFF; + + return snprintf(buf, PAGE_SIZE, "%d.%d.%d.%d\n", ipv4[0], ipv4[1], ipv4[2], ipv4[3]); +} + +static ssize_t bridge_ipv4_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { + struct net_device *ndev = to_net_dev(dev); + + if (is_qmap_netdev(ndev)) { + struct qmap_priv *priv = netdev_priv(ndev); + priv->bridge_ipv4 = simple_strtoul(buf, NULL, 16); + } + else { + struct mhi_netdev *mhi_netdev = ndev_to_mhi(ndev); + mhi_netdev->bridge_ipv4 = simple_strtoul(buf, NULL, 16); + } + + return count; +} + +static DEVICE_ATTR(bridge_mode, S_IWUSR | S_IRUGO, bridge_mode_show, bridge_mode_store); +static DEVICE_ATTR(bridge_ipv4, S_IWUSR | S_IRUGO, bridge_ipv4_show, bridge_ipv4_store); + +#ifndef MHI_NETDEV_ONE_CARD_MODE +static struct attribute *pcie_mhi_qmap_sysfs_attrs[] = { + &dev_attr_bridge_mode.attr, + &dev_attr_bridge_ipv4.attr, + NULL, +}; + +static struct attribute_group pcie_mhi_qmap_sysfs_attr_group = { + .attrs = pcie_mhi_qmap_sysfs_attrs, +}; +#endif +#endif + +static struct sk_buff * add_mbim_hdr(struct sk_buff *skb, u8 mux_id) { + struct mhi_mbim_hdr *mhdr; + __le32 sign; + u8 *c; + u16 tci = mux_id - QUECTEL_QMAP_MUX_ID; + unsigned int skb_len = skb->len; + + if (qmap_mode > 1) + tci += 1; //rmnet_mhi0.X map to session X + + if (skb_headroom(skb) < sizeof(struct mhi_mbim_hdr)) { + printk("skb_headroom small! headroom is %u, need %zd\n", skb_headroom(skb), sizeof(struct mhi_mbim_hdr)); + return NULL; + } + + skb_push(skb, sizeof(struct mhi_mbim_hdr)); + + mhdr = (struct mhi_mbim_hdr *)skb->data; + + //printk("%s %p\n", __func__, skb->data); + mhdr->nth16.dwSignature = cpu_to_le32(USB_CDC_NCM_NTH16_SIGN); + mhdr->nth16.wHeaderLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_nth16)); +/* + Sequence number. The transmitter of a block shall set this to zero in the first NTB transferred after every 'function reset' event, + and shall increment for every NTB subsequently transferred. + The effect of an out-of-sequence block on the receiver is not specified. + Thespecification allows the receiver to decide whether tocheck the sequence number, + and to decide how to respond if it's incorrect. The sequence number is pri-marily supplied for debugging purposes. +*/ + //mhdr->nth16.wSequence = cpu_to_le16(mhi_netdev->tx_seq++); +/* + Size of this NTB in bytes. Represented in little-endian form. + NTB size (IN/OUT) shall not exceed dwNtbInMaxSize or dwNtbOutMaxSize respectively +*/ + mhdr->nth16.wBlockLength = cpu_to_le16(skb->len); +/* + Offset, in little endian, of the first NDP16 from byte zeroof the NTB. + This value must be a multiple of 4, and must be >= 0x000C +*/ + mhdr->nth16.wNdpIndex = cpu_to_le16(sizeof(struct usb_cdc_ncm_nth16)); + + sign = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN); + c = (u8 *)&sign; + c[3] = tci; + + mhdr->ndp16.dwSignature = sign; + mhdr->ndp16.wLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_ndp16) + sizeof(struct usb_cdc_ncm_dpe16) * 2); + mhdr->ndp16.wNextNdpIndex = 0; + + mhdr->ndp16.dpe16[0].wDatagramIndex = sizeof(struct mhi_mbim_hdr); + mhdr->ndp16.dpe16[0].wDatagramLength = skb_len; + + mhdr->ndp16.dpe16[1].wDatagramIndex = 0; + mhdr->ndp16.dpe16[1].wDatagramLength = 0; + + return skb; +} + +static struct sk_buff * add_qhdr(struct sk_buff *skb, u8 mux_id) { + struct qmap_hdr *qhdr; + int pad = 0; + + pad = skb->len%4; + if (pad) { + pad = 4 - pad; + if (skb_tailroom(skb) < pad) { + printk("skb_tailroom small!\n"); + pad = 0; + } + if (pad) + __skb_put(skb, pad); + } + + qhdr = (struct qmap_hdr *)skb_push(skb, sizeof(struct qmap_hdr)); + qhdr->cd_rsvd_pad = pad; + qhdr->mux_id = mux_id; + qhdr->pkt_len = cpu_to_be16(skb->len - sizeof(struct qmap_hdr)); + + return skb; +} + +static struct sk_buff * add_qhdr_v5(struct sk_buff *skb, u8 mux_id) { + struct rmnet_map_header *map_header; + struct rmnet_map_v5_csum_header *ul_header; + u32 padding, map_datalen; + + map_datalen = skb->len; + padding = map_datalen%4; + if (padding) { + padding = 4 - padding; + if (skb_tailroom(skb) < padding) { + printk("skb_tailroom small!\n"); + padding = 0; + } + if (padding) + __skb_put(skb, padding); + } + + map_header = (struct rmnet_map_header *)skb_push(skb, (sizeof(struct rmnet_map_header) + sizeof(struct rmnet_map_v5_csum_header))); + map_header->cd_bit = 0; + map_header->next_hdr = 1; + map_header->pad_len = padding; + map_header->mux_id = mux_id; + map_header->pkt_len = htons(map_datalen + padding); + + ul_header = (struct rmnet_map_v5_csum_header *)(map_header + 1); + memset(ul_header, 0, sizeof(*ul_header)); + ul_header->header_type = RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD; + if (skb->ip_summed == CHECKSUM_PARTIAL) { +#if 0 //TODO + skb->ip_summed = CHECKSUM_NONE; + /* Ask for checksum offloading */ + ul_header->csum_valid_required = 1; +#endif + } + + return skb; +} + +static void rmnet_map_send_ack(struct mhi_netdev *pQmapDev, + unsigned char type, + struct rmnet_map_header *map_header) +{ + struct rmnet_map_control_command *cmd; + struct sk_buff *skb; + size_t skb_len = sizeof(struct rmnet_map_header) + sizeof(struct rmnet_map_control_command); + + skb = alloc_skb(skb_len, GFP_ATOMIC); + if (!skb) + return; + + skb_put(skb, skb_len); + memcpy(skb->data, map_header, skb_len); + cmd = (struct rmnet_map_control_command *)(skb->data + sizeof(struct rmnet_map_header)); + cmd->cmd_type = type & 0x03; + skb->protocol = htons(ETH_P_MAP); + skb->dev = pQmapDev->ndev; + dev_queue_xmit(skb); +} + +static int rmnet_data_vnd_do_flow_control(struct net_device *dev, + uint32_t map_flow_id, + uint16_t v4_seq, + uint16_t v6_seq, + int enable) +{ + //TODO + return 0; +} + +static uint8_t rmnet_map_do_flow_control(struct mhi_netdev *pQmapDev, + struct rmnet_map_header *map_header, + int enable) { + struct net_device *ndev = pQmapDev->ndev; + struct rmnet_map_control_command *cmd; + struct net_device *vnd; + uint8_t mux_id; + uint16_t ip_family; + uint16_t fc_seq; + uint32_t qos_id; + int r; + + cmd = (struct rmnet_map_control_command *)(map_header + 1); + + mux_id = map_header->mux_id - QUECTEL_QMAP_MUX_ID; + if (mux_id >= pQmapDev->qmap_mode) { + netdev_info(ndev, "drop qmap unknow mux_id %x\n", map_header->mux_id); + return RMNET_MAP_COMMAND_UNSUPPORTED; + } + + vnd = pQmapDev->mpQmapNetDev[mux_id]; + if (vnd == NULL) { + netdev_info(ndev, "drop qmap unknow mux_id %x\n", map_header->mux_id); + return RMNET_MAP_COMMAND_UNSUPPORTED; + } + + ip_family = cmd->flow_control.ip_family; + fc_seq = ntohs(cmd->flow_control.flow_control_seq_num); + qos_id = ntohl(cmd->flow_control.qos_id); + + if (enable) + pQmapDev->flow_control |= (1 << mux_id); + else + pQmapDev->flow_control &= ~(1 << mux_id); + /* Ignore the ip family and pass the sequence number for both v4 and v6 + * sequence. User space does not support creating dedicated flows for + * the 2 protocols + */ + r = rmnet_data_vnd_do_flow_control(vnd, qos_id, fc_seq, fc_seq, enable); + netdev_dbg(vnd, "qos_id:0x%08X, ip_family:%hd, fc_seq %hd, en:%d", + qos_id, ip_family & 3, fc_seq, enable); + + return RMNET_MAP_COMMAND_ACK; +} + +static void rmnet_data_map_command(struct mhi_netdev *pQmapDev, + struct rmnet_map_header *map_header) { + struct net_device *ndev = pQmapDev->ndev; + struct rmnet_map_control_command *cmd; + unsigned char command_name; + unsigned char rc = 0; + + cmd = (struct rmnet_map_control_command *)(map_header + 1); + command_name = cmd->command_name; + + if (command_name < RMNET_MAP_COMMAND_ENUM_LENGTH) + pQmapDev->rmnet_map_command_stats[command_name]++; + + switch (command_name) { + case RMNET_MAP_COMMAND_FLOW_ENABLE: + rc = rmnet_map_do_flow_control(pQmapDev, map_header, 1); + break; + + case RMNET_MAP_COMMAND_FLOW_DISABLE: + rc = rmnet_map_do_flow_control(pQmapDev, map_header, 0); + break; + + default: + pQmapDev->rmnet_map_command_stats[RMNET_MAP_COMMAND_UNKNOWN]++; + netdev_info(ndev, "UNSupport MAP command: %d", command_name); + rc = RMNET_MAP_COMMAND_UNSUPPORTED; + break; + } + + if (rc == RMNET_MAP_COMMAND_ACK) + rmnet_map_send_ack(pQmapDev, rc, map_header); + + return; +} + +#ifndef MHI_NETDEV_ONE_CARD_MODE +static void rmnet_vnd_upate_rx_stats(struct net_device *net, + unsigned rx_packets, unsigned rx_bytes) { +#if defined(MHI_NETDEV_STATUS64) + struct qmap_priv *dev = netdev_priv(net); + struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64); + + u64_stats_update_begin(&stats64->syncp); + stats64->rx_packets += rx_packets; + stats64->rx_bytes += rx_bytes; + u64_stats_update_end(&stats64->syncp); +#else + priv->self_dev->stats.rx_packets += rx_packets; + priv->self_dev->stats.rx_bytes += rx_bytes; +#endif +} + +static void rmnet_vnd_upate_tx_stats(struct net_device *net, + unsigned tx_packets, unsigned tx_bytes) { +#if defined(MHI_NETDEV_STATUS64) + struct qmap_priv *dev = netdev_priv(net); + struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64); + + u64_stats_update_begin(&stats64->syncp); + stats64->tx_packets += tx_packets; + stats64->tx_bytes += tx_bytes; + u64_stats_update_end(&stats64->syncp); +#else + net->stats.rx_packets += tx_packets; + net->stats.rx_bytes += tx_bytes; +#endif +} + +#if defined(MHI_NETDEV_STATUS64) +static struct rtnl_link_stats64 *_rmnet_vnd_get_stats64(struct net_device *net, struct rtnl_link_stats64 *stats) +{ + struct qmap_priv *dev = netdev_priv(net); + unsigned int start; + int cpu; + + netdev_stats_to_stats64(stats, &net->stats); + + if (nss_cb && dev->use_qca_nss) { // rmnet_nss.c:rmnet_nss_tx() will update rx stats + stats->rx_packets = 0; + stats->rx_bytes = 0; + } + + for_each_possible_cpu(cpu) { + struct pcpu_sw_netstats *stats64; + u64 rx_packets, rx_bytes; + u64 tx_packets, tx_bytes; + + stats64 = per_cpu_ptr(dev->stats64, cpu); + + do { + start = u64_stats_fetch_begin_irq(&stats64->syncp); + rx_packets = stats64->rx_packets; + rx_bytes = stats64->rx_bytes; + tx_packets = stats64->tx_packets; + tx_bytes = stats64->tx_bytes; + } while (u64_stats_fetch_retry_irq(&stats64->syncp, start)); + + stats->rx_packets += rx_packets; + stats->rx_bytes += rx_bytes; + stats->tx_packets += tx_packets; + stats->tx_bytes += tx_bytes; + } + + return stats; +} + +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 4,10,0 )) //bc1f44709cf27fb2a5766cadafe7e2ad5e9cb221 +static void rmnet_vnd_get_stats64(struct net_device *net, struct rtnl_link_stats64 *stats) { + _rmnet_vnd_get_stats64(net, stats); +} +#else +static struct rtnl_link_stats64 *rmnet_vnd_get_stats64(struct net_device *net, struct rtnl_link_stats64 *stats) { + return _rmnet_vnd_get_stats64(net, stats); +} +#endif +#endif + +static void rmnet_vnd_tx_agg_work(struct work_struct *work) +{ + struct qmap_priv *priv = + container_of(work, struct qmap_priv, agg_wq); + struct sk_buff *skb = NULL; + unsigned long flags; + + spin_lock_irqsave(&priv->agg_lock, flags); + if (likely(priv->agg_skb)) { + skb = priv->agg_skb; + priv->agg_skb = NULL; + priv->agg_count = 0; + skb->protocol = htons(ETH_P_MAP); + skb->dev = priv->real_dev; + ktime_get_ts64(&priv->agg_time); + } + spin_unlock_irqrestore(&priv->agg_lock, flags); + + if (skb) + dev_queue_xmit(skb); +} + +static enum hrtimer_restart rmnet_vnd_tx_agg_timer_cb(struct hrtimer *timer) +{ + struct qmap_priv *priv = + container_of(timer, struct qmap_priv, agg_hrtimer); + + schedule_work(&priv->agg_wq); + return HRTIMER_NORESTART; +} + +static int rmnet_vnd_tx_agg(struct sk_buff *skb, struct qmap_priv *priv) { + skb->protocol = htons(ETH_P_MAP); + skb->dev = priv->real_dev; + + return dev_queue_xmit(skb); +} + + +static int rmnet_vnd_open(struct net_device *dev) +{ + struct qmap_priv *priv = netdev_priv(dev); + struct net_device *real_dev = priv->real_dev; + + if (!(priv->real_dev->flags & IFF_UP)) + return -ENETDOWN; + + if (netif_carrier_ok(real_dev)) + netif_carrier_on(dev); + + return 0; +} + +static int rmnet_vnd_stop(struct net_device *pNet) +{ + netif_carrier_off(pNet); + return 0; +} + +static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb, + struct net_device *pNet) +{ + int err; + struct qmap_priv *priv = netdev_priv(pNet); + struct mhi_netdev *mhi_netdev = ndev_to_mhi(priv->real_dev); + int skb_len = skb->len; + + if (netif_queue_stopped(priv->real_dev)) { + netif_stop_queue(pNet); + return NETDEV_TX_BUSY; + } + + //printk("%s 1 skb=%p, len=%d, protocol=%x, hdr_len=%d\n", __func__, skb, skb->len, skb->protocol, skb->hdr_len); + if (pNet->type == ARPHRD_ETHER) { + skb_reset_mac_header(skb); + +#ifdef QUECTEL_BRIDGE_MODE + if (priv->bridge_mode && bridge_mode_tx_fixup(pNet, skb, priv->bridge_ipv4, priv->bridge_mac) == NULL) { + dev_kfree_skb_any (skb); + return NETDEV_TX_OK; + } +#endif + + if (skb_pull(skb, ETH_HLEN) == NULL) { + dev_kfree_skb_any (skb); + return NETDEV_TX_OK; + } + } + //printk("%s 2 skb=%p, len=%d, protocol=%x, hdr_len=%d\n", __func__, skb, skb->len, skb->protocol, skb->hdr_len); + if (mhi_netdev->net_type == MHI_NET_MBIM) { + if (add_mbim_hdr(skb, priv->mux_id) == NULL) { + dev_kfree_skb_any (skb); + return NETDEV_TX_OK; + } + } + else { + if (priv->qmap_version == 5) { + add_qhdr(skb, priv->mux_id); + } + else if (priv->qmap_version == 9) { + add_qhdr_v5(skb, priv->mux_id); + } + else { + dev_kfree_skb_any (skb); + return NETDEV_TX_OK; + } + } + //printk("%s skb=%p, len=%d, protocol=%x, hdr_len=%d\n", __func__, skb, skb->len, skb->protocol, skb->hdr_len); + + err = rmnet_vnd_tx_agg(skb, priv); + if (err == NET_XMIT_SUCCESS) { + rmnet_vnd_upate_tx_stats(pNet, 1, skb_len); + } else { + pNet->stats.tx_errors++; + } + + return err; +} + +static int rmnet_vnd_change_mtu(struct net_device *rmnet_dev, int new_mtu) +{ + if (new_mtu < 0 || new_mtu > 1500) + return -EINVAL; + + rmnet_dev->mtu = new_mtu; + return 0; +} + +/* drivers may override default ethtool_ops in their bind() routine */ +static const struct ethtool_ops rmnet_vnd_ethtool_ops = { + .get_link = ethtool_op_get_link, +}; + +static void rmnet_vnd_rawip_setup(struct net_device *rmnet_dev) +{ + rmnet_dev->needed_headroom = 16; + + /* Raw IP mode */ + rmnet_dev->header_ops = NULL; /* No header */ +//for Qualcomm's NSS, must set type as ARPHRD_RAWIP, or NSS performace is very bad. + rmnet_dev->type = ARPHRD_RAWIP; // do not support moify mac, for dev_set_mac_address() need ARPHRD_ETHER + rmnet_dev->hard_header_len = 0; +//for Qualcomm's SFE, do not add IFF_POINTOPOINT to type, or SFE donot work. + rmnet_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST); +} + +static const struct net_device_ops rmnet_vnd_ops = { + .ndo_open = rmnet_vnd_open, + .ndo_stop = rmnet_vnd_stop, + .ndo_start_xmit = rmnet_vnd_start_xmit, +#if defined(MHI_NETDEV_STATUS64) + .ndo_get_stats64 = rmnet_vnd_get_stats64, +#endif + .ndo_change_mtu = rmnet_vnd_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + +static rx_handler_result_t qca_nss_rx_handler(struct sk_buff **pskb) +{ + struct sk_buff *skb = *pskb; + + if (!skb) + return RX_HANDLER_CONSUMED; + + //printk("%s skb=%p, len=%d, protocol=%x, hdr_len=%d\n", __func__, skb, skb->len, skb->protocol, skb->hdr_len); + + if (skb->pkt_type == PACKET_LOOPBACK) + return RX_HANDLER_PASS; + + /* Check this so that we dont loop around netif_receive_skb */ + if (skb->cb[0] == 1) { + skb->cb[0] = 0; + + return RX_HANDLER_PASS; + } + + if (nss_cb) { + nss_cb->nss_tx(skb); + return RX_HANDLER_CONSUMED; + } + + return RX_HANDLER_PASS; +} +#endif + +static void rmnet_mbim_rx_handler(void *dev, struct sk_buff *skb_in) +{ + struct mhi_netdev *pQmapDev = (struct mhi_netdev *)dev; + struct mhi_netdev *mhi_netdev = (struct mhi_netdev *)dev; + MHI_MBIM_CTX *ctx = &pQmapDev->mbim_ctx; + //struct net_device *ndev = pQmapDev->ndev; + struct usb_cdc_ncm_nth16 *nth16; + int ndpoffset, len; + u16 wSequence; + struct sk_buff_head skb_chain; + struct sk_buff *qmap_skb; + + __skb_queue_head_init(&skb_chain); + + if (skb_in->len < (sizeof(struct usb_cdc_ncm_nth16) + sizeof(struct usb_cdc_ncm_ndp16))) { + MSG_ERR("frame too short\n"); + goto error; + } + + nth16 = (struct usb_cdc_ncm_nth16 *)skb_in->data; + + if (nth16->dwSignature != cpu_to_le32(USB_CDC_NCM_NTH16_SIGN)) { + MSG_ERR("invalid NTH16 signature <%#010x>\n", le32_to_cpu(nth16->dwSignature)); + goto error; + } + + len = le16_to_cpu(nth16->wBlockLength); + if (len > ctx->rx_max) { + MSG_ERR("unsupported NTB block length %u/%u\n", len, ctx->rx_max); + goto error; + } + + wSequence = le16_to_cpu(nth16->wSequence); + if (ctx->rx_seq != wSequence) { + MSG_ERR("sequence number glitch prev=%d curr=%d\n", ctx->rx_seq, wSequence); + } + ctx->rx_seq = wSequence + 1; + + ndpoffset = nth16->wNdpIndex; + + while (ndpoffset > 0) { + struct usb_cdc_ncm_ndp16 *ndp16 ; + struct usb_cdc_ncm_dpe16 *dpe16; + int nframes, x; + u8 *c; + u16 tci = 0; + struct net_device *qmap_net; + + if (skb_in->len < (ndpoffset + sizeof(struct usb_cdc_ncm_ndp16))) { + MSG_ERR("invalid NDP offset <%u>\n", ndpoffset); + goto error; + } + + ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb_in->data + ndpoffset); + + if (le16_to_cpu(ndp16->wLength) < 0x10) { + MSG_ERR("invalid DPT16 length <%u>\n", le16_to_cpu(ndp16->wLength)); + goto error; + } + + nframes = ((le16_to_cpu(ndp16->wLength) - sizeof(struct usb_cdc_ncm_ndp16)) / sizeof(struct usb_cdc_ncm_dpe16)); + + if (skb_in->len < (sizeof(struct usb_cdc_ncm_ndp16) + nframes * (sizeof(struct usb_cdc_ncm_dpe16)))) { + MSG_ERR("Invalid nframes = %d\n", nframes); + goto error; + } + + switch (ndp16->dwSignature & cpu_to_le32(0x00ffffff)) { + case cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN): + c = (u8 *)&ndp16->dwSignature; + tci = c[3]; + /* tag IPS<0> packets too if MBIM_IPS0_VID exists */ + //if (!tci && info->flags & FLAG_IPS0_VLAN) + // tci = MBIM_IPS0_VID; + break; + case cpu_to_le32(USB_CDC_MBIM_NDP16_DSS_SIGN): + c = (u8 *)&ndp16->dwSignature; + tci = c[3] + 256; + break; + default: + MSG_ERR("unsupported NDP signature <0x%08x>\n", le32_to_cpu(ndp16->dwSignature)); + goto error; + } + + if ((qmap_mode == 1 && tci != 0) || (qmap_mode > 1 && tci > qmap_mode)) { + MSG_ERR("unsupported tci %d by now\n", tci); + goto error; + } + + qmap_net = pQmapDev->mpQmapNetDev[qmap_mode == 1 ? 0 : tci - 1]; + + dpe16 = ndp16->dpe16; + + for (x = 0; x < nframes; x++, dpe16++) { + int offset = le16_to_cpu(dpe16->wDatagramIndex); + int skb_len = le16_to_cpu(dpe16->wDatagramLength); + + if (offset == 0 || skb_len == 0) { + break; + } + + /* sanity checking */ + if (((offset + skb_len) > skb_in->len) || (skb_len > ctx->rx_max)) { + MSG_ERR("invalid frame detected (ignored) x=%d, offset=%d, skb_len=%u\n", x, offset, skb_len); + goto error; + } + + qmap_skb = netdev_alloc_skb(qmap_net, skb_len); + if (!qmap_skb) { + mhi_netdev->stats.alloc_failed++; + //MSG_ERR("skb_clone fail\n"); //do not print in softirq + goto error; + } + + switch (skb_in->data[offset] & 0xf0) { + case 0x40: +#ifdef CONFIG_QCA_NSS_PACKET_FILTER + { + struct iphdr *ip4h = (struct iphdr *)(&skb_in->data[offset]); + if (ip4h->protocol == IPPROTO_ICMP) { + qmap_skb->cb[0] = 1; + } + } +#endif + qmap_skb->protocol = htons(ETH_P_IP); + break; + case 0x60: +#ifdef CONFIG_QCA_NSS_PACKET_FILTER + { + struct ipv6hdr *ip6h = (struct ipv6hdr *)(&skb_in->data[offset]); + if (ip6h->nexthdr == NEXTHDR_ICMP) { + qmap_skb->cb[0] = 1; + } + } +#endif + qmap_skb->protocol = htons(ETH_P_IPV6); + break; + default: + MSG_ERR("unknow skb->protocol %02x\n", skb_in->data[offset]); + goto error; + } + + skb_put(qmap_skb, skb_len); + memcpy(qmap_skb->data, skb_in->data + offset, skb_len); + + skb_reset_transport_header(qmap_skb); + skb_reset_network_header(qmap_skb); + qmap_skb->pkt_type = PACKET_HOST; + skb_set_mac_header(qmap_skb, 0); + + if (qmap_skb->dev->type == ARPHRD_ETHER) { + skb_push(qmap_skb, ETH_HLEN); + skb_reset_mac_header(qmap_skb); + memcpy(eth_hdr(qmap_skb)->h_source, default_modem_addr, ETH_ALEN); + memcpy(eth_hdr(qmap_skb)->h_dest, qmap_net->dev_addr, ETH_ALEN); + eth_hdr(qmap_skb)->h_proto = qmap_skb->protocol; +#ifdef QUECTEL_BRIDGE_MODE + bridge_mode_rx_fixup(pQmapDev, qmap_net, qmap_skb); +#endif + __skb_pull(qmap_skb, ETH_HLEN); + } + +#ifndef MHI_NETDEV_ONE_CARD_MODE + rmnet_vnd_upate_rx_stats(qmap_net, 1, skb_len); +#endif + __skb_queue_tail(&skb_chain, qmap_skb); + } + + /* are there more NDPs to process? */ + ndpoffset = le16_to_cpu(ndp16->wNextNdpIndex); + } + +error: + while ((qmap_skb = __skb_dequeue (&skb_chain))) { + netif_receive_skb(qmap_skb); + } +} + +static void rmnet_qmi_rx_handler(void *dev, struct sk_buff *skb_in) +{ + struct mhi_netdev *pQmapDev = (struct mhi_netdev *)dev; + struct net_device *ndev = pQmapDev->ndev; + struct sk_buff *qmap_skb; + struct sk_buff_head skb_chain; + uint dl_minimum_padding = 0; + + if (pQmapDev->qmap_version == 9) + dl_minimum_padding = pQmapDev->dl_minimum_padding; + + __skb_queue_head_init(&skb_chain); + + while (skb_in->len > sizeof(struct qmap_hdr)) { + struct rmnet_map_header *map_header = (struct rmnet_map_header *)skb_in->data; + struct rmnet_map_v5_csum_header *ul_header = NULL; + size_t hdr_size = sizeof(struct rmnet_map_header); + struct net_device *qmap_net; + int pkt_len = ntohs(map_header->pkt_len); + int skb_len; + __be16 protocol; + int mux_id; + int skip_nss = 0; + + if (map_header->next_hdr) { + ul_header = (struct rmnet_map_v5_csum_header *)(map_header + 1); + hdr_size += sizeof(struct rmnet_map_v5_csum_header); + } + + skb_len = pkt_len - (map_header->pad_len&0x3F); +#if 0 //just for debug dl_minimum_padding BUG + if ((skb_in->data[hdr_size] & 0xf0) == 0x45) { + struct iphdr *ip4h = (struct iphdr *)(&skb_in->data[hdr_size]); + if (ntohs(ip4h->tot_len) != skb_len) { + netdev_info(ndev, "tot_len=%d skb_len=%d\n", ntohs(ip4h->tot_len), skb_len); + } + } +#endif + skb_len -= dl_minimum_padding; + if (skb_len > 1500) { + netdev_info(ndev, "drop skb_len=%x larger than 1500\n", skb_len); + goto error_pkt; + } + + if (skb_in->len < (pkt_len + hdr_size)) { + netdev_info(ndev, "drop qmap unknow pkt, len=%d, pkt_len=%d\n", skb_in->len, pkt_len); + goto error_pkt; + } + + if (map_header->cd_bit) { + rmnet_data_map_command(pQmapDev, map_header); + goto skip_pkt; + } + + switch (skb_in->data[hdr_size] & 0xf0) { + case 0x40: +#ifdef CONFIG_QCA_NSS_PACKET_FILTER + { + struct iphdr *ip4h = (struct iphdr *)(&skb_in->data[hdr_size]); + if (ip4h->protocol == IPPROTO_ICMP) { + skip_nss = 1; + } + } +#endif + protocol = htons(ETH_P_IP); + break; + case 0x60: +#ifdef CONFIG_QCA_NSS_PACKET_FILTER + { + struct ipv6hdr *ip6h = (struct ipv6hdr *)(&skb_in->data[hdr_size]); + if (ip6h->nexthdr == NEXTHDR_ICMP) { + skip_nss = 1; + } + } +#endif + protocol = htons(ETH_P_IPV6); + break; + default: + netdev_info(ndev, "unknow skb->protocol %02x\n", skb_in->data[hdr_size]); + goto error_pkt; + } + + mux_id = map_header->mux_id - QUECTEL_QMAP_MUX_ID; + if (mux_id >= pQmapDev->qmap_mode) { + netdev_info(ndev, "drop qmap unknow mux_id %x\n", map_header->mux_id); + goto error_pkt; + } + + qmap_net = pQmapDev->mpQmapNetDev[mux_id]; + + if (qmap_net == NULL) { + netdev_info(ndev, "drop qmap unknow mux_id %x\n", map_header->mux_id); + goto skip_pkt; + } + +//for Qualcomm's SFE, do not use skb_clone(), or SFE 's performace is very bad. +//for Qualcomm's NSS, do not use skb_clone(), or NSS 's performace is very bad. + qmap_skb = netdev_alloc_skb(qmap_net, skb_len); + if (qmap_skb) { + skb_put(qmap_skb, skb_len); + memcpy(qmap_skb->data, skb_in->data + hdr_size, skb_len); + } + + if (qmap_skb == NULL) { + pQmapDev->stats.alloc_failed++; + //netdev_info(ndev, "fail to alloc skb, pkt_len = %d\n", skb_len); //do not print in softirq + goto error_pkt; + } + + skb_reset_transport_header(qmap_skb); + skb_reset_network_header(qmap_skb); + qmap_skb->pkt_type = PACKET_HOST; + skb_set_mac_header(qmap_skb, 0); + qmap_skb->protocol = protocol; + + if(skip_nss) + qmap_skb->cb[0] = 1; + + if (ul_header && ul_header->header_type == RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD + && ul_header->csum_valid_required) { +#if 0 //TODO + qmap_skb->ip_summed = CHECKSUM_UNNECESSARY; +#endif + } + + if (qmap_skb->dev->type == ARPHRD_ETHER) { + skb_push(qmap_skb, ETH_HLEN); + skb_reset_mac_header(qmap_skb); + memcpy(eth_hdr(qmap_skb)->h_source, default_modem_addr, ETH_ALEN); + memcpy(eth_hdr(qmap_skb)->h_dest, qmap_net->dev_addr, ETH_ALEN); + eth_hdr(qmap_skb)->h_proto = protocol; +#ifdef QUECTEL_BRIDGE_MODE + bridge_mode_rx_fixup(pQmapDev, qmap_net, qmap_skb); +#endif + __skb_pull(qmap_skb, ETH_HLEN); + } + +#ifndef MHI_NETDEV_ONE_CARD_MODE + rmnet_vnd_upate_rx_stats(qmap_net, 1, skb_len); +#endif + __skb_queue_tail(&skb_chain, qmap_skb); + +skip_pkt: + skb_pull(skb_in, pkt_len + hdr_size); + } + +error_pkt: + while ((qmap_skb = __skb_dequeue (&skb_chain))) { + netif_receive_skb(qmap_skb); + } +} + +#ifndef MHI_NETDEV_ONE_CARD_MODE +static rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb) +{ + struct sk_buff *skb = *pskb; + struct mhi_netdev *mhi_netdev; + + if (!skb) + goto done; + + //printk("%s skb=%p, protocol=%x, len=%d\n", __func__, skb, skb->protocol, skb->len); + + if (skb->pkt_type == PACKET_LOOPBACK) + return RX_HANDLER_PASS; + + if (skb->protocol != htons(ETH_P_MAP)) { + WARN_ON(1); + return RX_HANDLER_PASS; + } + /* when open hyfi function, run cm will make system crash */ + //dev = rcu_dereference(skb->dev->rx_handler_data); + mhi_netdev = (struct mhi_netdev *)ndev_to_mhi(skb->dev); + + if (mhi_netdev == NULL) { + WARN_ON(1); + return RX_HANDLER_PASS; + } + + if (mhi_netdev->net_type == MHI_NET_MBIM) + rmnet_mbim_rx_handler(mhi_netdev, skb); + else + rmnet_qmi_rx_handler(mhi_netdev, skb); + + if (!skb_cloned(skb)) { + if (skb_queue_len(&mhi_netdev->rx_allocated) < 128) { + skb->data = skb->head; + skb_reset_tail_pointer(skb); + skb->len = 0; + skb_queue_tail(&mhi_netdev->rx_allocated, skb); + return RX_HANDLER_CONSUMED; + } + } + + consume_skb(skb); + +done: + return RX_HANDLER_CONSUMED; +} + +static struct net_device * rmnet_vnd_register_device(struct mhi_netdev *pQmapDev, u8 offset_id, u8 mux_id) +{ + struct net_device *real_dev = pQmapDev->ndev; + struct net_device *qmap_net; + struct qmap_priv *priv; + int err; + int use_qca_nss = !!nss_cb; + + qmap_net = alloc_etherdev(sizeof(*priv)); + if (!qmap_net) + return NULL; + + SET_NETDEV_DEV(qmap_net, &real_dev->dev); + priv = netdev_priv(qmap_net); + priv->offset_id = offset_id; + priv->real_dev = pQmapDev->ndev; + priv->self_dev = qmap_net; + priv->pQmapDev = pQmapDev; + priv->qmap_version = pQmapDev->qmap_version; + priv->mux_id = mux_id; + sprintf(qmap_net->name, "%s.%d", real_dev->name, offset_id + 1); + memcpy (qmap_net->dev_addr, real_dev->dev_addr, ETH_ALEN); + qmap_net->dev_addr[5] = offset_id + 1; + //eth_random_addr(qmap_net->dev_addr); +#if defined(MHI_NETDEV_STATUS64) + priv->stats64 = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); + if (!priv->stats64) + goto out_free_newdev; +#endif + +#ifdef QUECTEL_BRIDGE_MODE + priv->bridge_mode = !!(pQmapDev->bridge_mode & BIT(offset_id)); + qmap_net->sysfs_groups[0] = &pcie_mhi_qmap_sysfs_attr_group; + if (priv->bridge_mode) + use_qca_nss = 0; +#endif + + priv->agg_skb = NULL; + priv->agg_count = 0; + hrtimer_init(&priv->agg_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + priv->agg_hrtimer.function = rmnet_vnd_tx_agg_timer_cb; + INIT_WORK(&priv->agg_wq, rmnet_vnd_tx_agg_work); + ktime_get_ts64(&priv->agg_time); + spin_lock_init(&priv->agg_lock); + priv->use_qca_nss = 0; + + qmap_net->ethtool_ops = &rmnet_vnd_ethtool_ops; + qmap_net->netdev_ops = &rmnet_vnd_ops; + qmap_net->flags |= IFF_NOARP; + qmap_net->flags &= ~(IFF_BROADCAST | IFF_MULTICAST); + + if (nss_cb && use_qca_nss) { + rmnet_vnd_rawip_setup(qmap_net); + } + + if (pQmapDev->net_type == MHI_NET_MBIM) { + qmap_net->needed_headroom = sizeof(struct mhi_mbim_hdr); + } + + err = register_netdev(qmap_net); + pr_info("%s(%s)=%d\n", __func__, qmap_net->name, err); + if (err == -EEXIST) { + //'ifdown wan' for openwrt, why? + } + if (err < 0) + goto out_free_newdev; + + netif_device_attach (qmap_net); + netif_carrier_off(qmap_net); + + if (nss_cb && use_qca_nss) { + int rc = nss_cb->nss_create(qmap_net); + WARN_ON(rc); + if (rc) { + /* Log, but don't fail the device creation */ + netdev_err(qmap_net, "Device will not use NSS path: %d\n", rc); + } else { + priv->use_qca_nss = 1; + netdev_info(qmap_net, "NSS context created\n"); + rtnl_lock(); + netdev_rx_handler_register(qmap_net, qca_nss_rx_handler, NULL); + rtnl_unlock(); + } + } + + return qmap_net; + +out_free_newdev: + free_netdev(qmap_net); + return qmap_net; +} + +static void rmnet_vnd_unregister_device(struct net_device *qmap_net) { + struct qmap_priv *priv; + unsigned long flags; + + pr_info("%s(%s)\n", __func__, qmap_net->name); + netif_carrier_off(qmap_net); + + priv = netdev_priv(qmap_net); + hrtimer_cancel(&priv->agg_hrtimer); + cancel_work_sync(&priv->agg_wq); + + spin_lock_irqsave(&priv->agg_lock, flags); + if (priv->agg_skb) { + kfree_skb(priv->agg_skb); + priv->agg_skb = NULL; + priv->agg_count = 0; + } + spin_unlock_irqrestore(&priv->agg_lock, flags); + + if (nss_cb && priv->use_qca_nss) { + rtnl_lock(); + netdev_rx_handler_unregister(qmap_net); + rtnl_unlock(); + nss_cb->nss_free(qmap_net); + } +#if defined(MHI_NETDEV_STATUS64) + free_percpu(priv->stats64); +#endif + unregister_netdev (qmap_net); + free_netdev(qmap_net); +} +#endif + +static void rmnet_info_set(struct mhi_netdev *pQmapDev, RMNET_INFO *rmnet_info) +{ + rmnet_info->size = sizeof(RMNET_INFO); + rmnet_info->rx_urb_size = pQmapDev->qmap_size; + rmnet_info->ep_type = 3; //DATA_EP_TYPE_PCIE + rmnet_info->iface_id = 4; + rmnet_info->qmap_mode = pQmapDev->qmap_mode; + rmnet_info->qmap_version = pQmapDev->qmap_version; + rmnet_info->dl_minimum_padding = 0; +} + +static ssize_t qmap_mode_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct net_device *ndev = to_net_dev(dev); + struct mhi_netdev *mhi_netdev = ndev_to_mhi(ndev); + + return snprintf(buf, PAGE_SIZE, "%u\n", mhi_netdev->qmap_mode); +} + +static DEVICE_ATTR(qmap_mode, S_IRUGO, qmap_mode_show, NULL); + +static ssize_t qmap_size_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct net_device *ndev = to_net_dev(dev); + struct mhi_netdev *mhi_netdev = ndev_to_mhi(ndev); + + return snprintf(buf, PAGE_SIZE, "%u\n", mhi_netdev->qmap_size); +} + +static DEVICE_ATTR(qmap_size, S_IRUGO, qmap_size_show, NULL); + +static ssize_t link_state_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct net_device *ndev = to_net_dev(dev); + struct mhi_netdev *mhi_netdev = ndev_to_mhi(ndev); + + return snprintf(buf, PAGE_SIZE, "0x%x\n", mhi_netdev->link_state); +} + +static ssize_t link_state_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { + struct net_device *ndev = to_net_dev(dev); + struct mhi_netdev *mhi_netdev = ndev_to_mhi(ndev); + //struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + unsigned link_state = 0; + unsigned old_link = mhi_netdev->link_state; + uint offset_id = 0; + + link_state = simple_strtoul(buf, NULL, 0); + if (mhi_netdev->qmap_mode > 1) { + offset_id = ((link_state&0xF) - 1); + + if (0 < link_state && link_state <= mhi_netdev->qmap_mode) + mhi_netdev->link_state |= (1 << offset_id); + else if (0x80 < link_state && link_state <= (0x80 + mhi_netdev->qmap_mode)) + mhi_netdev->link_state &= ~(1 << offset_id); + } + else { + mhi_netdev->link_state = !!link_state; + } + + if (old_link != mhi_netdev->link_state) { + struct net_device *qmap_net = mhi_netdev->mpQmapNetDev[offset_id]; + + if (mhi_netdev->link_state) + netif_carrier_on(mhi_netdev->ndev); + else { + netif_carrier_off(mhi_netdev->ndev); + } + + if (qmap_net) { + if (mhi_netdev->link_state & (1 << offset_id)) + netif_carrier_on(qmap_net); + else + netif_carrier_off(qmap_net); + } + + dev_info(dev, "link_state 0x%x -> 0x%x\n", old_link, mhi_netdev->link_state); + } + + return count; +} + +static DEVICE_ATTR(link_state, S_IWUSR | S_IRUGO, link_state_show, link_state_store); + +static struct attribute *pcie_mhi_sysfs_attrs[] = { + &dev_attr_qmap_mode.attr, + &dev_attr_qmap_size.attr, + &dev_attr_link_state.attr, +#ifdef QUECTEL_BRIDGE_MODE + &dev_attr_bridge_mode.attr, + &dev_attr_bridge_ipv4.attr, +#endif + NULL, +}; + +static struct attribute_group pcie_mhi_sysfs_attr_group = { + .attrs = pcie_mhi_sysfs_attrs, +}; + +static void mhi_netdev_upate_rx_stats(struct mhi_netdev *mhi_netdev, + unsigned rx_packets, unsigned rx_bytes) { +#if defined(MHI_NETDEV_STATUS64) + struct pcpu_sw_netstats *stats64 = this_cpu_ptr(mhi_netdev->stats64); + + u64_stats_update_begin(&stats64->syncp); + stats64->rx_packets += rx_packets; + stats64->rx_bytes += rx_bytes; + u64_stats_update_begin(&stats64->syncp); +#else + mhi_netdev->ndev->stats.rx_packets += rx_packets; + mhi_netdev->ndev->stats.rx_bytes += rx_bytes; +#endif +} + +static void mhi_netdev_upate_tx_stats(struct mhi_netdev *mhi_netdev, + unsigned tx_packets, unsigned tx_bytes) { +#if defined(MHI_NETDEV_STATUS64) + struct pcpu_sw_netstats *stats64 = this_cpu_ptr(mhi_netdev->stats64); + + u64_stats_update_begin(&stats64->syncp); + stats64->tx_packets += tx_packets; + stats64->tx_bytes += tx_bytes; + u64_stats_update_begin(&stats64->syncp); +#else + mhi_netdev->ndev->stats.tx_packets += tx_packets; + mhi_netdev->ndev->stats.tx_bytes += tx_bytes; +#endif +} + +static __be16 mhi_netdev_ip_type_trans(u8 data) +{ + __be16 protocol = 0; + + /* determine L3 protocol */ + switch (data & 0xf0) { + case 0x40: + protocol = htons(ETH_P_IP); + break; + case 0x60: + protocol = htons(ETH_P_IPV6); + break; + default: + protocol = htons(ETH_P_MAP); + break; + } + + return protocol; +} + +static int mhi_netdev_alloc_skb(struct mhi_netdev *mhi_netdev, gfp_t gfp_t) +{ + u32 cur_mru = mhi_netdev->mru; + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + struct mhi_skb_priv *skb_priv; + int ret; + struct sk_buff *skb; + int no_tre = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE); + int i; + + for (i = 0; i < no_tre; i++) { + skb = skb_dequeue(&mhi_netdev->rx_allocated); + if (!skb) { + skb = alloc_skb(/*32+*/cur_mru, gfp_t); + if (skb) + mhi_netdev->stats.rx_allocated++; + } + if (!skb) + return -ENOMEM; + + read_lock_bh(&mhi_netdev->pm_lock); + if (unlikely(!mhi_netdev->enabled)) { + MSG_ERR("Interface not enabled\n"); + ret = -EIO; + goto error_queue; + } + + skb_priv = (struct mhi_skb_priv *)skb->cb; + skb_priv->buf = skb->data; + skb_priv->size = cur_mru; + skb_priv->bind_netdev = mhi_netdev; + skb->dev = mhi_netdev->ndev; + //skb_reserve(skb, 32); //for ethernet header + + spin_lock_bh(&mhi_netdev->rx_lock); + ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, skb, + skb_priv->size, MHI_EOT); + spin_unlock_bh(&mhi_netdev->rx_lock); + + if (ret) { + skb_priv->bind_netdev = NULL; + MSG_ERR("Failed to queue skb, ret:%d\n", ret); + ret = -EIO; + goto error_queue; + } + + read_unlock_bh(&mhi_netdev->pm_lock); + } + + return 0; + +error_queue: + skb->destructor = NULL; + read_unlock_bh(&mhi_netdev->pm_lock); + dev_kfree_skb_any(skb); + + return ret; +} + +static void mhi_netdev_alloc_work(struct work_struct *work) +{ + struct mhi_netdev *mhi_netdev = container_of(work, struct mhi_netdev, + alloc_work.work); + /* sleep about 1 sec and retry, that should be enough time + * for system to reclaim freed memory back. + */ + const int sleep_ms = 1000; + int retry = 60; + int ret; + + MSG_LOG("Entered\n"); + do { + ret = mhi_netdev_alloc_skb(mhi_netdev, GFP_KERNEL); + /* sleep and try again */ + if (ret == -ENOMEM) { + schedule_delayed_work(&mhi_netdev->alloc_work, msecs_to_jiffies(20)); + return; + msleep(sleep_ms); + retry--; + } + } while (ret == -ENOMEM && retry); + + MSG_LOG("Exit with status:%d retry:%d\n", ret, retry); +} + +static void mhi_netdev_dealloc(struct mhi_netdev *mhi_netdev) +{ + struct sk_buff *skb; + + skb = skb_dequeue(&mhi_netdev->rx_allocated); + while (skb) { + skb->destructor = NULL; + kfree_skb(skb); + skb = skb_dequeue(&mhi_netdev->rx_allocated); + } +} + +static int mhi_netdev_poll(struct napi_struct *napi, int budget) +{ + struct net_device *dev = napi->dev; + struct mhi_netdev_priv *mhi_netdev_priv = netdev_priv(dev); + struct mhi_netdev *mhi_netdev = mhi_netdev_priv->mhi_netdev; + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + struct sk_buff *skb; + int rx_work = 0; + int ret; + + MSG_VERB("Entered\n"); + + rx_work = mhi_poll(mhi_dev, budget); + + if (rx_work < 0) { + MSG_ERR("Error polling ret:%d\n", rx_work); + napi_complete(napi); + return 0; + } + + if (mhi_netdev->net_type == MHI_NET_MBIM || mhi_netdev->net_type == MHI_NET_RMNET) { + while ((skb = skb_dequeue (&mhi_netdev->qmap_chain))) { +#ifdef MHI_NETDEV_ONE_CARD_MODE + int recly_skb = 0; + + mhi_netdev_upate_rx_stats(mhi_netdev, 1, skb->len); + if (mhi_netdev->net_type == MHI_NET_MBIM) + rmnet_mbim_rx_handler(mhi_netdev, skb); + else + rmnet_qmi_rx_handler(mhi_netdev, skb); + + if (!skb_cloned(skb)) { + if (skb_queue_len(&mhi_netdev->rx_allocated) < 128) { + skb->data = skb->head; + skb_reset_tail_pointer(skb); + skb->len = 0; + skb_queue_tail(&mhi_netdev->rx_allocated, skb); + recly_skb = 1; + } + } + if (recly_skb == 0) + dev_kfree_skb(skb); +#else + mhi_netdev_upate_rx_stats(mhi_netdev, 1, skb->len); + skb->dev = mhi_netdev->ndev; + skb->protocol = htons(ETH_P_MAP); + netif_receive_skb(skb); +#endif + } + } + else if (mhi_netdev->net_type == MHI_NET_ETHER) { + while ((skb = skb_dequeue (&mhi_netdev->qmap_chain))) { + mhi_netdev_upate_rx_stats(mhi_netdev, 1, skb->len); + skb->dev = mhi_netdev->ndev; + skb->protocol = mhi_netdev_ip_type_trans(skb->data[0]); + netif_receive_skb(skb); + } + } + + /* queue new buffers */ + if (!delayed_work_pending(&mhi_netdev->alloc_work)) { + ret = mhi_netdev->rx_queue(mhi_netdev, GFP_ATOMIC); + if (ret == -ENOMEM) { + //MSG_LOG("out of tre, queuing bg worker\n"); //do not print in softirq + mhi_netdev->stats.alloc_failed++; + schedule_delayed_work(&mhi_netdev->alloc_work, msecs_to_jiffies(20)); + } + } + + /* complete work if # of packet processed less than allocated budget */ + if (rx_work < budget) + napi_complete(napi); + + MSG_VERB("polled %d pkts\n", rx_work); + + return rx_work; +} + +static int mhi_netdev_open(struct net_device *ndev) +{ + struct mhi_netdev *mhi_netdev = ndev_to_mhi(ndev); + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + + MSG_LOG("Opened net dev interface\n"); + + /* tx queue may not necessarily be stopped already + * so stop the queue if tx path is not enabled + */ + if (!mhi_dev->ul_chan) + netif_stop_queue(ndev); + else + netif_start_queue(ndev); + + return 0; + +} + +static int mhi_netdev_change_mtu(struct net_device *ndev, int new_mtu) +{ + struct mhi_netdev *mhi_netdev = ndev_to_mhi(ndev); + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + + if (new_mtu < 0 || mhi_dev->mtu < new_mtu) + return -EINVAL; + + ndev->mtu = new_mtu; + return 0; +} + +static netdev_tx_t mhi_netdev_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct mhi_netdev_priv *mhi_netdev_priv = netdev_priv(dev); + struct mhi_netdev *mhi_netdev = mhi_netdev_priv->mhi_netdev; + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + int res = 0; + struct skb_data *entry = (struct skb_data *)(skb->cb); + + entry->packets = 1; + entry->length = skb->len; + entry->bind_netdev = mhi_netdev; + + MSG_VERB("Entered\n"); + + //printk("%s skb=%p, len=%d, protocol=%x, hdr_len=%d\n", __func__, skb, skb->len, skb->protocol, skb->hdr_len); + //qmap_hex_dump(__func__, skb->data, 32); + +#ifdef MHI_NETDEV_ONE_CARD_MODE + if (dev->type == ARPHRD_ETHER) { + skb_reset_mac_header(skb); + +#ifdef QUECTEL_BRIDGE_MODE + if (mhi_netdev->bridge_mode && bridge_mode_tx_fixup(dev, skb, mhi_netdev->bridge_ipv4, mhi_netdev->bridge_mac) == NULL) { + dev_kfree_skb_any (skb); + return NETDEV_TX_OK; + } +#endif + + if ((mhi_netdev->net_type == MHI_NET_RMNET || mhi_netdev->net_type == MHI_NET_MBIM) + && (skb_pull(skb, ETH_HLEN) == NULL)) { + dev_kfree_skb_any (skb); + return NETDEV_TX_OK; + } + } + + if (mhi_netdev->net_type == MHI_NET_MBIM) { + if (add_mbim_hdr(skb, QUECTEL_QMAP_MUX_ID) == NULL) { + dev_kfree_skb_any (skb); + return NETDEV_TX_OK; + } + } + else if (mhi_netdev->net_type == MHI_NET_RMNET) { + if (mhi_netdev->qmap_version == 5) { + add_qhdr(skb, QUECTEL_QMAP_MUX_ID); + } + else if (mhi_netdev->qmap_version == 9) { + add_qhdr_v5(skb, QUECTEL_QMAP_MUX_ID); + } + else { + dev_kfree_skb_any (skb); + return NETDEV_TX_OK; + } + } +#else + if ((mhi_netdev->net_type == MHI_NET_RMNET || mhi_netdev->net_type == MHI_NET_MBIM) + && skb->protocol != htons(ETH_P_MAP)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } +#endif + + if (mhi_netdev->net_type == MHI_NET_MBIM) { + struct mhi_mbim_hdr *mhdr = (struct mhi_mbim_hdr *)skb->data; + mhdr->nth16.wSequence = cpu_to_le16(mhi_netdev->mbim_ctx.tx_seq++); + } + + if (unlikely(mhi_get_no_free_descriptors(mhi_dev, DMA_TO_DEVICE) < 16)) { + u32 i = 0; + for (i = 0; i < mhi_netdev->qmap_mode; i++) { + struct net_device *qmap_net = mhi_netdev->mpQmapNetDev[i]; + if (qmap_net) { + netif_stop_queue(qmap_net); + } + } + + netif_stop_queue(dev); + } + + res = mhi_queue_transfer(mhi_dev, DMA_TO_DEVICE, skb, skb->len, + MHI_EOT); + if (unlikely(res)) { + dev_kfree_skb_any(skb); + dev->stats.tx_errors++; + } + + MSG_VERB("Exited\n"); + + return NETDEV_TX_OK; +} + +#if defined(MHI_NETDEV_STATUS64) +static struct rtnl_link_stats64 * _mhi_netdev_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *stats) +{ + struct mhi_netdev *mhi_netdev = ndev_to_mhi(ndev); + + unsigned int start; + int cpu; + + netdev_stats_to_stats64(stats, &ndev->stats); + + for_each_possible_cpu(cpu) { + struct pcpu_sw_netstats *stats64; + u64 rx_packets, rx_bytes; + u64 tx_packets, tx_bytes; + + stats64 = per_cpu_ptr(mhi_netdev->stats64, cpu); + + do { + start = u64_stats_fetch_begin_irq(&stats64->syncp); + rx_packets = stats64->rx_packets; + rx_bytes = stats64->rx_bytes; + tx_packets = stats64->tx_packets; + tx_bytes = stats64->tx_bytes; + } while (u64_stats_fetch_retry_irq(&stats64->syncp, start)); + + stats->rx_packets += rx_packets; + stats->rx_bytes += rx_bytes; + stats->tx_packets += tx_packets; + stats->tx_bytes += tx_bytes; + } + + return stats; +} + +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 4,10,0 )) //bc1f44709cf27fb2a5766cadafe7e2ad5e9cb221 +static void mhi_netdev_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *stats) { + _mhi_netdev_get_stats64(ndev, stats); +} +#else +static struct rtnl_link_stats64 * mhi_netdev_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *stats) { + _mhi_netdev_get_stats64(ndev, stats); + return stats; +} +#endif +#endif + +static int qmap_setting_store(struct mhi_netdev *mhi_netdev, QMAP_SETTING *qmap_settings, size_t size) { + if (qmap_settings->size != size) { + netdev_err(mhi_netdev->ndev, "ERROR: qmap_settings.size donot match!\n"); + return -EOPNOTSUPP; + } + + mhi_netdev->dl_minimum_padding = qmap_settings->dl_minimum_padding; + + return 0; +} + +static int qmap_ndo_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { + struct mhi_netdev *mhi_netdev = ndev_to_mhi(dev); + int rc = -EOPNOTSUPP; + uint link_state = 0; + QMAP_SETTING qmap_settings = {0}; + + switch (cmd) { + case 0x89F1: //SIOCDEVPRIVATE + rc = copy_from_user(&link_state, ifr->ifr_ifru.ifru_data, sizeof(link_state)); + if (!rc) { + char buf[32]; + snprintf(buf, sizeof(buf), "%u", link_state); + link_state_store(&dev->dev, NULL, buf, strlen(buf)); + } + break; + + case 0x89F2: //SIOCDEVPRIVATE + rc = copy_from_user(&qmap_settings, ifr->ifr_ifru.ifru_data, sizeof(qmap_settings)); + if (!rc) { + rc = qmap_setting_store(mhi_netdev, &qmap_settings, sizeof(qmap_settings)); + } + break; + + case 0x89F3: //SIOCDEVPRIVATE + if (mhi_netdev->use_rmnet_usb) { + rc = copy_to_user(ifr->ifr_ifru.ifru_data, &mhi_netdev->rmnet_info, sizeof(RMNET_INFO)); + } + break; + + default: + break; + } + + return rc; +} + +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 5,14,0 )) //b9067f5dc4a07c8e24e01a1b277c6722d91be39e +#define use_ndo_siocdevprivate +#endif +#ifdef use_ndo_siocdevprivate +static int qmap_ndo_siocdevprivate(struct net_device *dev, struct ifreq *ifr, void __user *data, int cmd) { + return qmap_ndo_do_ioctl(dev, ifr, cmd); +} +#endif + +static const struct net_device_ops mhi_netdev_ops_ip = { + .ndo_open = mhi_netdev_open, + .ndo_start_xmit = mhi_netdev_xmit, + //.ndo_do_ioctl = mhi_netdev_ioctl, + .ndo_change_mtu = mhi_netdev_change_mtu, +#if defined(MHI_NETDEV_STATUS64) + .ndo_get_stats64 = mhi_netdev_get_stats64, +#endif + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = qmap_ndo_do_ioctl, +#ifdef use_ndo_siocdevprivate + .ndo_siocdevprivate = qmap_ndo_siocdevprivate, +#endif +}; + +static void mhi_netdev_get_drvinfo (struct net_device *ndev, struct ethtool_drvinfo *info) +{ + //struct mhi_netdev *mhi_netdev = ndev_to_mhi(ndev); + + strlcpy (info->driver, "pcie_mhi", sizeof info->driver); + strlcpy (info->version, PCIE_MHI_DRIVER_VERSION, sizeof info->version); +} + +static const struct ethtool_ops mhi_netdev_ethtool_ops = { + .get_drvinfo = mhi_netdev_get_drvinfo, + .get_link = ethtool_op_get_link, +}; + +static void mhi_netdev_setup(struct net_device *dev) +{ + dev->netdev_ops = &mhi_netdev_ops_ip; + ether_setup(dev); + + dev->ethtool_ops = &mhi_netdev_ethtool_ops; + memcpy (dev->dev_addr, node_id, sizeof node_id); + /* set this after calling ether_setup */ + dev->header_ops = 0; /* No header */ + dev->hard_header_len = 0; + dev->type = ARPHRD_NONE; + dev->addr_len = 0; + dev->flags |= IFF_NOARP; + dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST); //POINTOPOINT will make SFE work wrong + dev->watchdog_timeo = WATCHDOG_TIMEOUT; + //on OpenWrt, if set rmnet_mhi0.1 as WAN, '/sbin/netifd' will auto create VLAN for rmnet_mhi0 + dev->features |= (NETIF_F_VLAN_CHALLENGED); + +#ifdef MHI_NETDEV_ONE_CARD_MODE + if (mhi_mbim_enabled) { + dev->needed_headroom = sizeof(struct mhi_mbim_hdr); + } +#endif +} + +/* enable mhi_netdev netdev, call only after grabbing mhi_netdev.mutex */ +static int mhi_netdev_enable_iface(struct mhi_netdev *mhi_netdev) +{ + int ret = 0; +#if 0 + char ifalias[IFALIASZ]; +#endif + char ifname[IFNAMSIZ]; + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + int no_tre; + + MSG_LOG("Prepare the channels for transfer\n"); + + ret = mhi_prepare_for_transfer(mhi_dev); + if (ret) { + MSG_ERR("Failed to start TX chan ret %d\n", ret); + goto mhi_failed_to_start; + } + + /* first time enabling the node */ + if (!mhi_netdev->ndev) { + struct mhi_netdev_priv *mhi_netdev_priv; + +#if 0 + snprintf(ifalias, sizeof(ifalias), "%s_%04x_%02u.%02u.%02u_%u", + mhi_netdev->interface_name, mhi_dev->dev_id, + mhi_dev->domain, mhi_dev->bus, mhi_dev->slot, + mhi_netdev->alias); +#endif + + snprintf(ifname, sizeof(ifname), "%s%d", + mhi_netdev->interface_name, mhi_netdev->mhi_dev->mhi_cntrl->cntrl_idx); + + rtnl_lock(); +#ifdef NET_NAME_PREDICTABLE + mhi_netdev->ndev = alloc_netdev(sizeof(*mhi_netdev_priv), + ifname, NET_NAME_PREDICTABLE, + mhi_netdev_setup); +#else + mhi_netdev->ndev = alloc_netdev(sizeof(*mhi_netdev_priv), + ifname, + mhi_netdev_setup); +#endif + + if (!mhi_netdev->ndev) { + ret = -ENOMEM; + rtnl_unlock(); + goto net_dev_alloc_fail; + } + + //mhi_netdev->ndev->mtu = mhi_dev->mtu; + SET_NETDEV_DEV(mhi_netdev->ndev, &mhi_dev->dev); +#if 0 + dev_set_alias(mhi_netdev->ndev, ifalias, strlen(ifalias)); +#endif + mhi_netdev_priv = netdev_priv(mhi_netdev->ndev); + mhi_netdev_priv->mhi_netdev = mhi_netdev; + + if (mhi_netdev->net_type == MHI_NET_RMNET || mhi_netdev->net_type == MHI_NET_MBIM) { +#ifdef QUECTEL_BRIDGE_MODE + mhi_netdev->bridge_mode = bridge_mode; +#endif + mhi_netdev->ndev->sysfs_groups[0] = &pcie_mhi_sysfs_attr_group; + } + else if (mhi_netdev->net_type == MHI_NET_ETHER) { + mhi_netdev->ndev->mtu = mhi_netdev->mru; + } + rtnl_unlock(); + + netif_napi_add(mhi_netdev->ndev, &mhi_netdev->napi, mhi_netdev_poll, poll_weight); + ret = register_netdev(mhi_netdev->ndev); + if (ret) { + MSG_ERR("Network device registration failed\n"); + goto net_dev_reg_fail; + } + + netif_carrier_off(mhi_netdev->ndev); + } + + write_lock_irq(&mhi_netdev->pm_lock); + mhi_netdev->enabled = true; + write_unlock_irq(&mhi_netdev->pm_lock); + + /* queue buffer for rx path */ + no_tre = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE); + ret = mhi_netdev_alloc_skb(mhi_netdev, GFP_KERNEL); + if (ret) + schedule_delayed_work(&mhi_netdev->alloc_work, msecs_to_jiffies(20)); + + napi_enable(&mhi_netdev->napi); + + MSG_LOG("Exited.\n"); + + return 0; + +net_dev_reg_fail: + netif_napi_del(&mhi_netdev->napi); + free_netdev(mhi_netdev->ndev); + mhi_netdev->ndev = NULL; + +net_dev_alloc_fail: + mhi_unprepare_from_transfer(mhi_dev); + +mhi_failed_to_start: + MSG_ERR("Exited ret %d.\n", ret); + + return ret; +} + +static void mhi_netdev_xfer_ul_cb(struct mhi_device *mhi_dev, + struct mhi_result *mhi_result) +{ + struct mhi_netdev *mhi_netdev = mhi_device_get_devdata(mhi_dev); + struct sk_buff *skb = mhi_result->buf_addr; + struct net_device *ndev = mhi_netdev->ndev; + struct skb_data *entry = (struct skb_data *)(skb->cb); + + if (entry->bind_netdev != mhi_netdev) { + MSG_ERR("%s error!\n", __func__); + return; + } + + if (likely(mhi_result->transaction_status == 0)) { + mhi_netdev_upate_tx_stats(mhi_netdev, entry->packets, entry->length); + + if (netif_queue_stopped(ndev) && mhi_netdev->enabled + && mhi_get_no_free_descriptors(mhi_dev, DMA_TO_DEVICE) > 32) { + int i = 0; + + netif_wake_queue(ndev); + for (i = 0; i < mhi_netdev->qmap_mode; i++) { + struct net_device *qmap_net = mhi_netdev->mpQmapNetDev[i]; + if (qmap_net) { + if (netif_queue_stopped(qmap_net)) + netif_wake_queue(qmap_net); + } + } + } + } + + entry->bind_netdev = NULL; + entry->packets = 1; + entry->length = 0; + dev_kfree_skb(skb); +} + +static void mhi_netdev_xfer_dl_cb(struct mhi_device *mhi_dev, + struct mhi_result *mhi_result) +{ + struct mhi_netdev *mhi_netdev = mhi_device_get_devdata(mhi_dev); + struct sk_buff *skb = mhi_result->buf_addr; + struct mhi_skb_priv *skb_priv = (struct mhi_skb_priv *)(skb->cb); + + if (unlikely(skb_priv->bind_netdev != mhi_netdev)) { + MSG_ERR("%s error!\n", __func__); + return; + } + + if (unlikely(mhi_result->transaction_status)) { + if (mhi_result->transaction_status != -ENOTCONN) + MSG_ERR("%s transaction_status = %d!\n", __func__, mhi_result->transaction_status); + skb_priv->bind_netdev = NULL; + dev_kfree_skb(skb); + return; + } + +#if defined(CONFIG_PINCTRL_IPQ5018) + if (likely(mhi_netdev->mhi_rate_control)) { + u32 time_interval = 0; + u32 time_difference = 0; + u32 cntfrq; + u64 second_jiffy; + u64 bytes_received_2; + struct net_device *ndev = mhi_netdev->ndev; + + if (mhi_netdev->first_jiffy) { + second_jiffy = arch_counter_get_cntvct(); + bytes_received_2 = mhi_netdev->bytes_received_2; + if ((second_jiffy > mhi_netdev->first_jiffy) && + (bytes_received_2 > mhi_netdev->bytes_received_1)) { + + time_difference = (second_jiffy - mhi_netdev->first_jiffy); + time_interval = (time_difference / mhi_netdev->cntfrq_per_msec); + + /* 1.8Gbps is 225,000,000bytes per second */ + /* We wills sample at 100ms interval */ + /* For 1ms 225000 bytes */ + /* For 100ms 22,500,000 bytes */ + /* For 10ms 2,250,000 bytes */ + + /* 1.7Gbps is 212,500,000bytes per second */ + /* We wills sample at 100ms interval */ + /* For 1ms 212500 bytes */ + /* For 100ms 21,250,000 bytes */ + /* For 10ms 2,125,000 bytes */ + + /* 1.6Gbps is 200,000,000bytes per second */ + /* We wills sample at 100ms interval */ + /* For 1ms 200,000 bytes */ + /* For 100ms 20,000,000 bytes */ + /* For 10ms 2,000,000 bytes */ + + if (time_interval < 100) { + if ((bytes_received_2 - mhi_netdev->bytes_received_1) > 22500000) { + ndev->stats.rx_dropped ++; + dev_kfree_skb(skb); + return; + } + } else { + mhi_netdev->first_jiffy = second_jiffy; + mhi_netdev->bytes_received_1 = bytes_received_2; + } + } else { + mhi_netdev->first_jiffy = second_jiffy; + mhi_netdev->bytes_received_1 = bytes_received_2; + } + } else { + mhi_netdev->first_jiffy = arch_counter_get_cntvct(); + cntfrq = arch_timer_get_cntfrq(); + mhi_netdev->cntfrq_per_msec = cntfrq / 1000; + } + mhi_netdev->bytes_received_2 += mhi_result->bytes_xferd; + } +#endif + +#if 0 + { + static size_t bytes_xferd = 0; + if (mhi_result->bytes_xferd > bytes_xferd) { + bytes_xferd = mhi_result->bytes_xferd; + printk(KERN_DEBUG "bytes_xferd=%zd\n", bytes_xferd); + } + } +#endif + + skb_put(skb, mhi_result->bytes_xferd); + + qmap_hex_dump(__func__, skb->data, skb->len); + + skb_priv->bind_netdev = NULL; + skb_queue_tail(&mhi_netdev->qmap_chain, skb); +} + +static void mhi_netdev_status_cb(struct mhi_device *mhi_dev, enum MHI_CB mhi_cb) +{ + struct mhi_netdev *mhi_netdev = mhi_device_get_devdata(mhi_dev); + + if (mhi_cb != MHI_CB_PENDING_DATA) + return; + + if (napi_schedule_prep(&mhi_netdev->napi)) { + __napi_schedule(&mhi_netdev->napi); + mhi_netdev->stats.rx_int++; + return; + } +} + +#ifdef CONFIG_DEBUG_FS + +struct dentry *mhi_netdev_debugfs_dentry; + +static int mhi_netdev_init_debugfs_states_show(struct seq_file *m, void *d) +{ + struct mhi_netdev *mhi_netdev = m->private; + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + +#ifdef TS_DEBUG + struct timespec now_ts, diff_ts; + getnstimeofday(&now_ts); + diff_ts = timespec_sub(now_ts, mhi_netdev->diff_ts); + mhi_netdev->diff_ts = now_ts; +#endif + + seq_printf(m, + "tx_tre:%d rx_tre:%d qmap_chain:%u skb_chain:%u tx_allocated:%u rx_allocated:%u\n", + mhi_get_no_free_descriptors(mhi_dev, DMA_TO_DEVICE), + mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE), + mhi_netdev->qmap_chain.qlen, + mhi_netdev->skb_chain.qlen, + mhi_netdev->tx_allocated.qlen, + mhi_netdev->rx_allocated.qlen); + + seq_printf(m, + "netif_queue_stopped:%d, link_state:0x%x, flow_control:0x%x\n", + netif_queue_stopped(mhi_netdev->ndev), mhi_netdev->link_state, mhi_netdev->flow_control); + + seq_printf(m, + "rmnet_map_command_stats: %u, %u, %u, %u, %u, %u, %u, %u, %u, %u\n", + mhi_netdev->rmnet_map_command_stats[RMNET_MAP_COMMAND_NONE], + mhi_netdev->rmnet_map_command_stats[RMNET_MAP_COMMAND_FLOW_DISABLE], + mhi_netdev->rmnet_map_command_stats[RMNET_MAP_COMMAND_FLOW_ENABLE], + mhi_netdev->rmnet_map_command_stats[3], + mhi_netdev->rmnet_map_command_stats[4], + mhi_netdev->rmnet_map_command_stats[5], + mhi_netdev->rmnet_map_command_stats[6], + mhi_netdev->rmnet_map_command_stats[RMNET_MAP_COMMAND_FLOW_START], + mhi_netdev->rmnet_map_command_stats[RMNET_MAP_COMMAND_FLOW_END], + mhi_netdev->rmnet_map_command_stats[RMNET_MAP_COMMAND_UNKNOWN]); + +#ifdef TS_DEBUG + seq_printf(m, + "qmap_ts:%ld.%ld, skb_ts:%ld.%ld, diff_ts:%ld.%ld\n", + mhi_netdev->qmap_ts.tv_sec, mhi_netdev->qmap_ts.tv_nsec, + mhi_netdev->skb_ts.tv_sec, mhi_netdev->skb_ts.tv_nsec, + diff_ts.tv_sec, diff_ts.tv_nsec); + mhi_netdev->clear_ts = 1; +#endif + + return 0; +} + +static int mhi_netdev_init_debugfs_states_open(struct inode *inode, + struct file *fp) +{ + return single_open(fp, mhi_netdev_init_debugfs_states_show, inode->i_private); +} + +static const struct file_operations mhi_netdev_debugfs_state_ops = { + .open = mhi_netdev_init_debugfs_states_open, + .release = single_release, + .read = seq_read, +}; + +static int mhi_netdev_debugfs_trigger_reset(void *data, u64 val) +{ + struct mhi_netdev *mhi_netdev = data; + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + int ret; + + MSG_LOG("Triggering channel reset\n"); + + /* disable the interface so no data processing */ + write_lock_irq(&mhi_netdev->pm_lock); + mhi_netdev->enabled = false; + write_unlock_irq(&mhi_netdev->pm_lock); + napi_disable(&mhi_netdev->napi); + + /* disable all hardware channels */ + mhi_unprepare_from_transfer(mhi_dev); + + /* clean up all alocated buffers */ + mhi_netdev_dealloc(mhi_netdev); + + MSG_LOG("Restarting iface\n"); + + ret = mhi_netdev_enable_iface(mhi_netdev); + if (ret) + return ret; + + return 0; +} +DEFINE_SIMPLE_ATTRIBUTE(mhi_netdev_debugfs_trigger_reset_fops, NULL, + mhi_netdev_debugfs_trigger_reset, "%llu\n"); + +static void mhi_netdev_create_debugfs(struct mhi_netdev *mhi_netdev) +{ + char node_name[32]; + int i; + const umode_t mode = 0600; + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + struct dentry *dentry = mhi_netdev_debugfs_dentry; + + const struct { + char *name; + u32 *ptr; + } debugfs_table[] = { + { + "rx_int", + &mhi_netdev->stats.rx_int + }, + { + "tx_full", + &mhi_netdev->stats.tx_full + }, + { + "tx_pkts", + &mhi_netdev->stats.tx_pkts + }, + { + "rx_budget_overflow", + &mhi_netdev->stats.rx_budget_overflow + }, + { + "rx_allocated", + &mhi_netdev->stats.rx_allocated + }, + { + "tx_allocated", + &mhi_netdev->stats.tx_allocated + }, + { + "alloc_failed", + &mhi_netdev->stats.alloc_failed + }, + { + NULL, NULL + }, + }; + + /* Both tx & rx client handle contain same device info */ + snprintf(node_name, sizeof(node_name), "%s_%04x_%02u.%02u.%02u_%u", + mhi_netdev->interface_name, mhi_dev->dev_id, mhi_dev->domain, + mhi_dev->bus, mhi_dev->slot, mhi_netdev->alias); + + if (IS_ERR_OR_NULL(dentry)) + return; + + mhi_netdev->dentry = debugfs_create_dir(node_name, dentry); + if (IS_ERR_OR_NULL(mhi_netdev->dentry)) + return; + + debugfs_create_u32("msg_lvl", mode, mhi_netdev->dentry, + (u32 *)&mhi_netdev->msg_lvl); + + /* Add debug stats table */ + for (i = 0; debugfs_table[i].name; i++) { + debugfs_create_u32(debugfs_table[i].name, mode, + mhi_netdev->dentry, + debugfs_table[i].ptr); + } + + debugfs_create_file("reset", mode, mhi_netdev->dentry, mhi_netdev, + &mhi_netdev_debugfs_trigger_reset_fops); + debugfs_create_file("states", 0444, mhi_netdev->dentry, mhi_netdev, + &mhi_netdev_debugfs_state_ops); +} + +static void mhi_netdev_create_debugfs_dir(struct dentry *parent) +{ + mhi_netdev_debugfs_dentry = debugfs_create_dir(MHI_NETDEV_DRIVER_NAME, parent); +} + +#else + +static void mhi_netdev_create_debugfs(struct mhi_netdev *mhi_netdev) +{ +} + +static void mhi_netdev_create_debugfs_dir(struct dentry *parent) +{ +} + +#endif + +static void mhi_netdev_remove(struct mhi_device *mhi_dev) +{ + struct mhi_netdev *mhi_netdev = mhi_device_get_devdata(mhi_dev); + struct sk_buff *skb; + + MSG_LOG("Remove notification received\n"); + + write_lock_irq(&mhi_netdev->pm_lock); + mhi_netdev->enabled = false; + write_unlock_irq(&mhi_netdev->pm_lock); + + if (mhi_netdev->use_rmnet_usb) { +#ifndef MHI_NETDEV_ONE_CARD_MODE + unsigned i; + + for (i = 0; i < mhi_netdev->qmap_mode; i++) { + if (mhi_netdev->mpQmapNetDev[i]) { + rmnet_vnd_unregister_device(mhi_netdev->mpQmapNetDev[i]); + mhi_netdev->mpQmapNetDev[i] = NULL; + } + } + + rtnl_lock(); + if (netdev_is_rx_handler_busy(mhi_netdev->ndev)) + netdev_rx_handler_unregister(mhi_netdev->ndev); + rtnl_unlock(); +#endif + } + + while ((skb = skb_dequeue (&mhi_netdev->skb_chain))) + dev_kfree_skb_any(skb); + while ((skb = skb_dequeue (&mhi_netdev->qmap_chain))) + dev_kfree_skb_any(skb); + while ((skb = skb_dequeue (&mhi_netdev->rx_allocated))) + dev_kfree_skb_any(skb); + while ((skb = skb_dequeue (&mhi_netdev->tx_allocated))) + dev_kfree_skb_any(skb); + + napi_disable(&mhi_netdev->napi); + netif_napi_del(&mhi_netdev->napi); + mhi_netdev_dealloc(mhi_netdev); + unregister_netdev(mhi_netdev->ndev); +#if defined(MHI_NETDEV_STATUS64) + free_percpu(mhi_netdev->stats64); +#endif + free_netdev(mhi_netdev->ndev); + flush_delayed_work(&mhi_netdev->alloc_work); + + if (!IS_ERR_OR_NULL(mhi_netdev->dentry)) + debugfs_remove_recursive(mhi_netdev->dentry); +} + +static int mhi_netdev_probe(struct mhi_device *mhi_dev, + const struct mhi_device_id *id) +{ + int ret; + struct mhi_netdev *mhi_netdev; + + mhi_netdev = devm_kzalloc(&mhi_dev->dev, sizeof(*mhi_netdev), + GFP_KERNEL); + if (!mhi_netdev) + return -ENOMEM; + + if (!strcmp(id->chan, "IP_HW0")) { + if (mhi_mbim_enabled) + mhi_netdev->net_type = MHI_NET_MBIM; + else + mhi_netdev->net_type = MHI_NET_RMNET; + } + else if (!strcmp(id->chan, "IP_SW0")) { + mhi_netdev->net_type = MHI_NET_ETHER; + } + else { + return -EINVAL; + } + + mhi_netdev->alias = 0; + + mhi_netdev->mhi_dev = mhi_dev; + mhi_device_set_devdata(mhi_dev, mhi_netdev); + + mhi_netdev->mru = 15360; ///etc/data/qnicorn_config.xml dataformat_agg_dl_size 15*1024 + if (mhi_netdev->net_type == MHI_NET_MBIM) { + mhi_netdev->mru = ncmNTBParams.dwNtbInMaxSize; + mhi_netdev->mbim_ctx.rx_max = mhi_netdev->mru; + } + else if (mhi_netdev->net_type == MHI_NET_ETHER) { + mhi_netdev->mru = 8*1024; + } + mhi_netdev->qmap_size = mhi_netdev->mru; + +#if defined(MHI_NETDEV_STATUS64) + mhi_netdev->stats64 = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); + if (!mhi_netdev->stats64) + return -ENOMEM; +#endif + + if (!strcmp(id->chan, "IP_HW0")) + mhi_netdev->interface_name = "rmnet_mhi"; + else if (!strcmp(id->chan, "IP_SW0")) + mhi_netdev->interface_name = "mhi_swip"; + else + mhi_netdev->interface_name = id->chan; + + mhi_netdev->qmap_mode = qmap_mode; + mhi_netdev->qmap_version = 5; + mhi_netdev->use_rmnet_usb = 1; + if ((mhi_dev->vendor == 0x17cb && mhi_dev->dev_id == 0x0306) + || (mhi_dev->vendor == 0x17cb && mhi_dev->dev_id == 0x0308) + || (mhi_dev->vendor == 0x1eac && mhi_dev->dev_id == 0x1004) + ) { + mhi_netdev->qmap_version = 9; + } + if (mhi_netdev->net_type == MHI_NET_ETHER) { + mhi_netdev->qmap_mode = 1; + mhi_netdev->qmap_version = 0; + mhi_netdev->use_rmnet_usb = 0; + } + rmnet_info_set(mhi_netdev, &mhi_netdev->rmnet_info); + + mhi_netdev->rx_queue = mhi_netdev_alloc_skb; + + spin_lock_init(&mhi_netdev->rx_lock); + rwlock_init(&mhi_netdev->pm_lock); + INIT_DELAYED_WORK(&mhi_netdev->alloc_work, mhi_netdev_alloc_work); + skb_queue_head_init(&mhi_netdev->qmap_chain); + skb_queue_head_init(&mhi_netdev->skb_chain); + skb_queue_head_init(&mhi_netdev->tx_allocated); + skb_queue_head_init(&mhi_netdev->rx_allocated); + + mhi_netdev->msg_lvl = MHI_MSG_LVL_INFO; + + /* setup network interface */ + ret = mhi_netdev_enable_iface(mhi_netdev); + if (ret) { + pr_err("Error mhi_netdev_enable_iface ret:%d\n", ret); + return ret; + } + + mhi_netdev_create_debugfs(mhi_netdev); + + if (mhi_netdev->net_type == MHI_NET_ETHER) { + mhi_netdev->mpQmapNetDev[0] = mhi_netdev->ndev; + netif_carrier_on(mhi_netdev->ndev); + } + else if (mhi_netdev->use_rmnet_usb) { +#ifdef MHI_NETDEV_ONE_CARD_MODE + mhi_netdev->mpQmapNetDev[0] = mhi_netdev->ndev; + strcpy(mhi_netdev->rmnet_info.ifname[0], mhi_netdev->mpQmapNetDev[0]->name); + mhi_netdev->rmnet_info.mux_id[0] = QUECTEL_QMAP_MUX_ID; +#else + unsigned i; + + for (i = 0; i < mhi_netdev->qmap_mode; i++) { + u8 mux_id = QUECTEL_QMAP_MUX_ID+i; + mhi_netdev->mpQmapNetDev[i] = rmnet_vnd_register_device(mhi_netdev, i, mux_id); + if (mhi_netdev->mpQmapNetDev[i]) { + strcpy(mhi_netdev->rmnet_info.ifname[i], mhi_netdev->mpQmapNetDev[i]->name); + mhi_netdev->rmnet_info.mux_id[i] = mux_id; + } + } + + rtnl_lock(); + /* when open hyfi function, run cm will make system crash */ + //netdev_rx_handler_register(mhi_netdev->ndev, rmnet_rx_handler, mhi_netdev); + netdev_rx_handler_register(mhi_netdev->ndev, rmnet_rx_handler, NULL); + rtnl_unlock(); +#endif + } + +#if defined(CONFIG_PINCTRL_IPQ5018) + mhi_netdev->mhi_rate_control = 1; +#endif + + return 0; +} + +static const struct mhi_device_id mhi_netdev_match_table[] = { + { .chan = "IP_HW0" }, + { .chan = "IP_SW0" }, + { .chan = "IP_HW_ADPL" }, + { }, +}; + +static struct mhi_driver mhi_netdev_driver = { + .id_table = mhi_netdev_match_table, + .probe = mhi_netdev_probe, + .remove = mhi_netdev_remove, + .ul_xfer_cb = mhi_netdev_xfer_ul_cb, + .dl_xfer_cb = mhi_netdev_xfer_dl_cb, + .status_cb = mhi_netdev_status_cb, + .driver = { + .name = "mhi_netdev", + .owner = THIS_MODULE, + } +}; + +int __init mhi_device_netdev_init(struct dentry *parent) +{ +#ifdef CONFIG_QCA_NSS_DRV + nss_cb = rcu_dereference(rmnet_nss_callbacks); + if (!nss_cb) { + printk(KERN_ERR "mhi_device_netdev_init: driver load must after '/etc/modules.d/42-rmnet-nss'\n"); + } +#endif + + mhi_netdev_create_debugfs_dir(parent); + + return mhi_driver_register(&mhi_netdev_driver); +} + +void mhi_device_netdev_exit(void) +{ +#ifdef CONFIG_DEBUG_FS + debugfs_remove_recursive(mhi_netdev_debugfs_dentry); +#endif + mhi_driver_unregister(&mhi_netdev_driver); +} diff --git a/package/wwan/driver/quectel_MHI/src/devices/mhi_satellite.c b/package/wwan/driver/quectel_MHI/src/devices/mhi_satellite.c new file mode 100644 index 000000000..d1071ecbe --- /dev/null +++ b/package/wwan/driver/quectel_MHI/src/devices/mhi_satellite.c @@ -0,0 +1,1153 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2019, The Linux Foundation. All rights reserved.*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define MHI_SAT_DRIVER_NAME "mhi_satellite" + +static bool mhi_sat_defer_init = true; /* set by default */ + +/* logging macros */ +#define IPC_LOG_PAGES (10) +#define IPC_LOG_LVL (MHI_MSG_LVL_INFO) +#define KLOG_LVL (MHI_MSG_LVL_ERROR) + +#define MHI_SUBSYS_LOG(fmt, ...) do { \ + if (!subsys) \ + break; \ + if (mhi_sat_driver.klog_lvl <= MHI_MSG_LVL_INFO) \ + pr_info("[I][%s][%s] " fmt, __func__, subsys->name, \ + ##__VA_ARGS__);\ + if (subsys->ipc_log && mhi_sat_driver.ipc_log_lvl <= \ + MHI_MSG_LVL_INFO) \ + ipc_log_string(subsys->ipc_log, "[I][%s] " fmt, __func__, \ + ##__VA_ARGS__); \ +} while (0) + +#define MHI_SAT_LOG(fmt, ...) do { \ + if (!subsys || !sat_cntrl) \ + break; \ + if (mhi_sat_driver.klog_lvl <= MHI_MSG_LVL_INFO) \ + pr_info("[I][%s][%s][%x] " fmt, __func__, subsys->name, \ + sat_cntrl->dev_id, ##__VA_ARGS__);\ + if (subsys->ipc_log && mhi_sat_driver.ipc_log_lvl <= \ + MHI_MSG_LVL_INFO) \ + ipc_log_string(subsys->ipc_log, "[I][%s][%x] " fmt, __func__, \ + sat_cntrl->dev_id, ##__VA_ARGS__); \ +} while (0) + +#define MHI_SAT_ERR(fmt, ...) do { \ + if (!subsys || !sat_cntrl) \ + break; \ + if (mhi_sat_driver.klog_lvl <= MHI_MSG_LVL_ERROR) \ + pr_err("[E][%s][%s][%x] " fmt, __func__, subsys->name, \ + sat_cntrl->dev_id, ##__VA_ARGS__); \ + if (subsys->ipc_log && mhi_sat_driver.ipc_log_lvl <= \ + MHI_MSG_LVL_ERROR) \ + ipc_log_string(subsys->ipc_log, "[E][%s][%x] " fmt, __func__, \ + sat_cntrl->dev_id, ##__VA_ARGS__); \ +} while (0) + +#define MHI_SAT_ASSERT(cond, msg) do { \ + if (cond) \ + panic(msg); \ +} while (0) + +/* mhi sys error command */ +#define MHI_TRE_CMD_SYS_ERR_PTR (0) +#define MHI_TRE_CMD_SYS_ERR_D0 (0) +#define MHI_TRE_CMD_SYS_ERR_D1 (MHI_PKT_TYPE_SYS_ERR_CMD << 16) + +/* mhi state change event */ +#define MHI_TRE_EVT_MHI_STATE_PTR (0) +#define MHI_TRE_EVT_MHI_STATE_D0(state) (state << 24) +#define MHI_TRE_EVT_MHI_STATE_D1 (MHI_PKT_TYPE_STATE_CHANGE_EVENT << 16) + +/* mhi exec env change event */ +#define MHI_TRE_EVT_EE_PTR (0) +#define MHI_TRE_EVT_EE_D0(ee) (ee << 24) +#define MHI_TRE_EVT_EE_D1 (MHI_PKT_TYPE_EE_EVENT << 16) + +/* mhi config event */ +#define MHI_TRE_EVT_CFG_PTR(base_addr) (base_addr) +#define MHI_TRE_EVT_CFG_D0(er_base, num) ((er_base << 16) | (num & 0xFFFF)) +#define MHI_TRE_EVT_CFG_D1 (MHI_PKT_TYPE_CFG_EVENT << 16) + +/* command completion event */ +#define MHI_TRE_EVT_CMD_COMPLETION_PTR(ptr) (ptr) +#define MHI_TRE_EVT_CMD_COMPLETION_D0(code) (code << 24) +#define MHI_TRE_EVT_CMD_COMPLETION_D1 (MHI_PKT_TYPE_CMD_COMPLETION_EVENT << 16) + +/* packet parser macros */ +#define MHI_TRE_GET_PTR(tre) ((tre)->ptr) +#define MHI_TRE_GET_SIZE(tre) ((tre)->dword[0]) +#define MHI_TRE_GET_CCS(tre) (((tre)->dword[0] >> 24) & 0xFF) +#define MHI_TRE_GET_ID(tre) (((tre)->dword[1] >> 24) & 0xFF) +#define MHI_TRE_GET_TYPE(tre) (((tre)->dword[1] >> 16) & 0xFF) +#define MHI_TRE_IS_ER_CTXT_TYPE(tre) (((tre)->dword[1]) & 0x1) + +/* creates unique device ID based on connection topology */ +#define MHI_SAT_CREATE_DEVICE_ID(dev, domain, bus, slot) \ + ((dev & 0xFFFF) << 16 | (domain & 0xF) << 12 | (bus & 0xFF) << 4 | \ + (slot & 0xF)) + +/* mhi core definitions */ +#define MHI_CTXT_TYPE_GENERIC (0xA) + +struct __packed mhi_generic_ctxt { + u32 reserved0; + u32 type; + u32 reserved1; + u64 ctxt_base; + u64 ctxt_size; + u64 reserved[2]; +}; + +enum mhi_pkt_type { + MHI_PKT_TYPE_INVALID = 0x0, + MHI_PKT_TYPE_RESET_CHAN_CMD = 0x10, + MHI_PKT_TYPE_STOP_CHAN_CMD = 0x11, + MHI_PKT_TYPE_START_CHAN_CMD = 0x12, + MHI_PKT_TYPE_STATE_CHANGE_EVENT = 0x20, + MHI_PKT_TYPE_CMD_COMPLETION_EVENT = 0x21, + MHI_PKT_TYPE_EE_EVENT = 0x40, + MHI_PKT_TYPE_CTXT_UPDATE_CMD = 0x64, + MHI_PKT_TYPE_IOMMU_MAP_CMD = 0x65, + MHI_PKT_TYPE_CFG_EVENT = 0x6E, + MHI_PKT_TYPE_SYS_ERR_CMD = 0xFF, +}; + +enum mhi_cmd_type { + MHI_CMD_TYPE_RESET = 0x10, + MHI_CMD_TYPE_STOP = 0x11, + MHI_CMD_TYPE_START = 0x12, +}; + +/* mhi event completion codes */ +enum mhi_ev_ccs { + MHI_EV_CC_INVALID = 0x0, + MHI_EV_CC_SUCCESS = 0x1, + MHI_EV_CC_BAD_TRE = 0x11, +}; + +/* satellite subsystem definitions */ +enum subsys_id { + SUBSYS_ADSP, + SUBSYS_CDSP, + SUBSYS_SLPI, + SUBSYS_MODEM, + SUBSYS_MAX, +}; + +static const char * const subsys_names[SUBSYS_MAX] = { + [SUBSYS_ADSP] = "adsp", + [SUBSYS_CDSP] = "cdsp", + [SUBSYS_SLPI] = "slpi", + [SUBSYS_MODEM] = "modem", +}; + +struct mhi_sat_subsys { + const char *name; + + struct rpmsg_device *rpdev; /* rpmsg device */ + + /* + * acquire either mutex or spinlock to walk controller list + * acquire both when modifying list + */ + struct list_head cntrl_list; /* controllers list */ + struct mutex cntrl_mutex; /* mutex to walk/modify controllers list */ + spinlock_t cntrl_lock; /* lock to walk/modify controllers list */ + + void *ipc_log; +}; + +/* satellite IPC definitions */ +#define SAT_MAJOR_VERSION (1) +#define SAT_MINOR_VERSION (0) +#define SAT_RESERVED_SEQ_NUM (0xFFFF) +#define SAT_MSG_SIZE(n) (sizeof(struct sat_header) + \ + (n * sizeof(struct sat_tre))) +#define SAT_TRE_SIZE(msg_size) (msg_size - sizeof(struct sat_header)) +#define SAT_TRE_OFFSET(msg) (msg + sizeof(struct sat_header)) +#define SAT_TRE_NUM_PKTS(payload_size) ((payload_size) / sizeof(struct sat_tre)) + +/* satellite IPC msg type */ +enum sat_msg_id { + SAT_MSG_ID_ACK = 0xA, + SAT_MSG_ID_CMD = 0xC, + SAT_MSG_ID_EVT = 0xE, +}; + +/* satellite IPC context type */ +enum sat_ctxt_type { + SAT_CTXT_TYPE_CHAN = 0x0, + SAT_CTXT_TYPE_EVENT = 0x1, + SAT_CTXT_TYPE_MAX, +}; + +/* satellite IPC context string */ +#define TO_SAT_CTXT_TYPE_STR(type) (type >= SAT_CTXT_TYPE_MAX ? "INVALID" : \ + sat_ctxt_str[type]) + +const char * const sat_ctxt_str[SAT_CTXT_TYPE_MAX] = { + [SAT_CTXT_TYPE_CHAN] = "CCA", + [SAT_CTXT_TYPE_EVENT] = "ECA", +}; + +/* satellite IPC transfer ring element */ +struct __packed sat_tre { + u64 ptr; + u32 dword[2]; +}; + +/* satellite IPC header */ +struct __packed sat_header { + u16 major_ver; + u16 minor_ver; + u16 msg_id; + u16 seq; + u16 reply_seq; + u16 payload_size; + u32 dev_id; + u8 reserved[8]; +}; + +/* satellite driver definitions */ +struct mhi_sat_packet { + struct list_head node; + + struct mhi_sat_cntrl *cntrl; /* satellite controller reference */ + void *msg; /* incoming message */ +}; + +struct mhi_sat_cntrl { + struct list_head node; + + struct mhi_controller *mhi_cntrl; /* device MHI controller reference */ + struct mhi_sat_subsys *subsys; + + struct list_head dev_list; + struct list_head addr_map_list; /* IOMMU mapped addresses list */ + struct mutex list_mutex; /* mutex for devices and address map lists */ + + struct list_head packet_list; + spinlock_t pkt_lock; /* lock to walk/modify received packets list */ + + struct work_struct connect_work; /* subsystem connection worker */ + struct work_struct process_work; /* incoming packets processor */ + + /* mhi core/controller configurations */ + u32 dev_id; /* unique device ID with BDF as per connection topology */ + int er_base; /* event rings base index */ + int er_max; /* event rings max index */ + int num_er; /* total number of event rings */ + + /* satellite controller function counts */ + int num_devices; /* mhi devices current count */ + int max_devices; /* count of maximum devices for subsys/controller */ + u16 seq; /* internal sequence number for all outgoing packets */ + bool active; /* flag set if hello packet/MHI_CFG event was sent */ + + /* command completion variables */ + u16 last_cmd_seq; /* sequence number of last sent command packet */ + enum mhi_ev_ccs last_cmd_ccs; /* last command completion event code */ + struct completion completion; /* command completion event wait */ + struct mutex cmd_wait_mutex; /* command completion wait mutex */ +}; + +struct mhi_sat_device { + struct list_head node; + + struct mhi_device *mhi_dev; /* mhi device pointer */ + struct mhi_sat_cntrl *cntrl; /* parent controller */ + + bool chan_started; +}; + +struct mhi_sat_driver { + enum MHI_DEBUG_LEVEL ipc_log_lvl; /* IPC log level */ + enum MHI_DEBUG_LEVEL klog_lvl; /* klog/dmesg levels */ + + struct mhi_sat_subsys *subsys; /* pointer to subsystem array */ + unsigned int num_subsys; + + struct dentry *dentry; /* debugfs directory */ + bool deferred_init_done; /* flag for deferred init protection */ +}; + +static struct mhi_sat_driver mhi_sat_driver; + +static struct mhi_sat_subsys *find_subsys_by_name(const char *name) +{ + int i; + struct mhi_sat_subsys *subsys = mhi_sat_driver.subsys; + + for (i = 0; i < mhi_sat_driver.num_subsys; i++, subsys++) { + if (!strcmp(name, subsys->name)) + return subsys; + } + + return NULL; +} + +static struct mhi_sat_cntrl *find_sat_cntrl_by_id(struct mhi_sat_subsys *subsys, + u32 dev_id) +{ + struct mhi_sat_cntrl *sat_cntrl; + unsigned long flags; + + spin_lock_irqsave(&subsys->cntrl_lock, flags); + list_for_each_entry(sat_cntrl, &subsys->cntrl_list, node) { + if (sat_cntrl->dev_id == dev_id) { + spin_unlock_irqrestore(&subsys->cntrl_lock, flags); + return sat_cntrl; + } + } + spin_unlock_irqrestore(&subsys->cntrl_lock, flags); + + return NULL; +} + +static struct mhi_sat_device *find_sat_dev_by_id( + struct mhi_sat_cntrl *sat_cntrl, int id, + enum sat_ctxt_type evt) +{ + struct mhi_sat_device *sat_dev; + int compare_id; + + mutex_lock(&sat_cntrl->list_mutex); + list_for_each_entry(sat_dev, &sat_cntrl->dev_list, node) { + compare_id = (evt == SAT_CTXT_TYPE_EVENT) ? + sat_dev->mhi_dev->dl_event_id : + sat_dev->mhi_dev->dl_chan_id; + + if (compare_id == id) { + mutex_unlock(&sat_cntrl->list_mutex); + return sat_dev; + } + } + mutex_unlock(&sat_cntrl->list_mutex); + + return NULL; +} + +static bool mhi_sat_isvalid_header(struct sat_header *hdr, int len) +{ + /* validate payload size */ + if (len >= sizeof(*hdr) && (len != hdr->payload_size + sizeof(*hdr))) + return false; + + /* validate SAT IPC version */ + if (hdr->major_ver != SAT_MAJOR_VERSION && + hdr->minor_ver != SAT_MINOR_VERSION) + return false; + + /* validate msg ID */ + if (hdr->msg_id != SAT_MSG_ID_CMD && hdr->msg_id != SAT_MSG_ID_EVT) + return false; + + return true; +} + +static int mhi_sat_wait_cmd_completion(struct mhi_sat_cntrl *sat_cntrl) +{ + struct mhi_sat_subsys *subsys = sat_cntrl->subsys; + int ret; + + reinit_completion(&sat_cntrl->completion); + + MHI_SAT_LOG("Wait for command completion\n"); + ret = wait_for_completion_timeout(&sat_cntrl->completion, + msecs_to_jiffies(sat_cntrl->mhi_cntrl->timeout_ms)); + if (!ret || sat_cntrl->last_cmd_ccs != MHI_EV_CC_SUCCESS) { + MHI_SAT_ERR("Command completion failure:seq:%u:ret:%d:ccs:%d\n", + sat_cntrl->last_cmd_seq, ret, sat_cntrl->last_cmd_ccs); + return -EIO; + } + + MHI_SAT_LOG("Command completion successful for seq:%u\n", + sat_cntrl->last_cmd_seq); + + return 0; +} + +static int mhi_sat_send_msg(struct mhi_sat_cntrl *sat_cntrl, + enum sat_msg_id type, u16 reply_seq, + void *msg, u16 msg_size) +{ + struct mhi_sat_subsys *subsys = sat_cntrl->subsys; + struct sat_header *hdr = msg; + + /* create sequence number for controller */ + sat_cntrl->seq++; + if (sat_cntrl->seq == SAT_RESERVED_SEQ_NUM) + sat_cntrl->seq = 0; + + /* populate header */ + hdr->major_ver = SAT_MAJOR_VERSION; + hdr->minor_ver = SAT_MINOR_VERSION; + hdr->msg_id = type; + hdr->seq = sat_cntrl->seq; + hdr->reply_seq = reply_seq; + hdr->payload_size = SAT_TRE_SIZE(msg_size); + hdr->dev_id = sat_cntrl->dev_id; + + /* save last sent command sequence number for completion event */ + if (type == SAT_MSG_ID_CMD) + sat_cntrl->last_cmd_seq = sat_cntrl->seq; + + return rpmsg_send(subsys->rpdev->ept, msg, msg_size); +} + +static void mhi_sat_process_cmds(struct mhi_sat_cntrl *sat_cntrl, + struct sat_header *hdr, struct sat_tre *pkt) +{ + struct mhi_sat_subsys *subsys = sat_cntrl->subsys; + int num_pkts = SAT_TRE_NUM_PKTS(hdr->payload_size), i; + + for (i = 0; i < num_pkts; i++, pkt++) { + enum mhi_ev_ccs code = MHI_EV_CC_INVALID; + + switch (MHI_TRE_GET_TYPE(pkt)) { + case MHI_PKT_TYPE_IOMMU_MAP_CMD: + { + struct mhi_buf *buf; + struct mhi_controller *mhi_cntrl = sat_cntrl->mhi_cntrl; + dma_addr_t iova = DMA_ERROR_CODE; + + buf = kmalloc(sizeof(*buf), GFP_ATOMIC); + if (!buf) + goto iommu_map_cmd_completion; + + buf->phys_addr = MHI_TRE_GET_PTR(pkt); + buf->len = MHI_TRE_GET_SIZE(pkt); + + iova = dma_map_resource(mhi_cntrl->dev, buf->phys_addr, + buf->len, DMA_BIDIRECTIONAL, 0); + if (dma_mapping_error(mhi_cntrl->dev, iova)) { + kfree(buf); + goto iommu_map_cmd_completion; + } + + buf->dma_addr = iova; + + mutex_lock(&sat_cntrl->list_mutex); + list_add_tail(&buf->node, + &sat_cntrl->addr_map_list); + mutex_unlock(&sat_cntrl->list_mutex); + + code = MHI_EV_CC_SUCCESS; + +iommu_map_cmd_completion: + MHI_SAT_LOG("IOMMU MAP 0x%llx CMD processing %s\n", + MHI_TRE_GET_PTR(pkt), + (code == MHI_EV_CC_SUCCESS) ? "successful" : + "failed"); + + pkt->ptr = MHI_TRE_EVT_CMD_COMPLETION_PTR(iova); + pkt->dword[0] = MHI_TRE_EVT_CMD_COMPLETION_D0(code); + pkt->dword[1] = MHI_TRE_EVT_CMD_COMPLETION_D1; + break; + } + case MHI_PKT_TYPE_CTXT_UPDATE_CMD: + { + u64 ctxt_ptr = MHI_TRE_GET_PTR(pkt); + u64 ctxt_size = MHI_TRE_GET_SIZE(pkt); + int id = MHI_TRE_GET_ID(pkt); + enum sat_ctxt_type evt = MHI_TRE_IS_ER_CTXT_TYPE(pkt); + struct mhi_generic_ctxt gen_ctxt; + struct mhi_buf buf; + struct mhi_sat_device *sat_dev = find_sat_dev_by_id( + sat_cntrl, id, evt); + int ret; + + MHI_SAT_ASSERT(!sat_dev, + "No device with given chan/evt ID"); + + memset(&gen_ctxt, 0, sizeof(gen_ctxt)); + memset(&buf, 0, sizeof(buf)); + + gen_ctxt.type = MHI_CTXT_TYPE_GENERIC; + gen_ctxt.ctxt_base = ctxt_ptr; + gen_ctxt.ctxt_size = ctxt_size; + + buf.buf = &gen_ctxt; + buf.len = sizeof(gen_ctxt); + buf.name = TO_SAT_CTXT_TYPE_STR(evt); + + ret = mhi_device_configure(sat_dev->mhi_dev, + DMA_BIDIRECTIONAL, &buf, 1); + if (!ret) + code = MHI_EV_CC_SUCCESS; + + MHI_SAT_LOG("CTXT UPDATE CMD %s:%d processing %s\n", + buf.name, id, (code == MHI_EV_CC_SUCCESS) ? + "successful" : "failed"); + + pkt->ptr = MHI_TRE_EVT_CMD_COMPLETION_PTR(0); + pkt->dword[0] = MHI_TRE_EVT_CMD_COMPLETION_D0(code); + pkt->dword[1] = MHI_TRE_EVT_CMD_COMPLETION_D1; + break; + } + case MHI_PKT_TYPE_START_CHAN_CMD: + { + int id = MHI_TRE_GET_ID(pkt); + struct mhi_sat_device *sat_dev = find_sat_dev_by_id( + sat_cntrl, id, + SAT_CTXT_TYPE_CHAN); + int ret; + + MHI_SAT_ASSERT(!sat_dev, + "No device with given channel ID\n"); + + MHI_SAT_ASSERT(sat_dev->chan_started, + "Channel already started!"); + + ret = mhi_prepare_for_transfer(sat_dev->mhi_dev); + if (!ret) { + sat_dev->chan_started = true; + code = MHI_EV_CC_SUCCESS; + } + + MHI_SAT_LOG("START CHANNEL %d CMD processing %s\n", + id, (code == MHI_EV_CC_SUCCESS) ? "successful" : + "failure"); + + pkt->ptr = MHI_TRE_EVT_CMD_COMPLETION_PTR(0); + pkt->dword[0] = MHI_TRE_EVT_CMD_COMPLETION_D0(code); + pkt->dword[1] = MHI_TRE_EVT_CMD_COMPLETION_D1; + break; + } + case MHI_PKT_TYPE_RESET_CHAN_CMD: + { + int id = MHI_TRE_GET_ID(pkt); + struct mhi_sat_device *sat_dev = + find_sat_dev_by_id(sat_cntrl, id, + SAT_CTXT_TYPE_CHAN); + + MHI_SAT_ASSERT(!sat_dev, + "No device with given channel ID\n"); + + MHI_SAT_ASSERT(!sat_dev->chan_started, + "Resetting unstarted channel!"); + + mhi_unprepare_from_transfer(sat_dev->mhi_dev); + sat_dev->chan_started = false; + + MHI_SAT_LOG( + "RESET CHANNEL %d CMD processing successful\n", + id); + + pkt->ptr = MHI_TRE_EVT_CMD_COMPLETION_PTR(0); + pkt->dword[0] = MHI_TRE_EVT_CMD_COMPLETION_D0( + MHI_EV_CC_SUCCESS); + pkt->dword[1] = MHI_TRE_EVT_CMD_COMPLETION_D1; + break; + } + default: + MHI_SAT_ASSERT(1, "Unhandled command!"); + break; + } + } +} + +static void mhi_sat_process_worker(struct work_struct *work) +{ + struct mhi_sat_cntrl *sat_cntrl = container_of(work, + struct mhi_sat_cntrl, process_work); + struct mhi_sat_subsys *subsys = sat_cntrl->subsys; + struct mhi_sat_packet *packet, *tmp; + struct sat_header *hdr; + struct sat_tre *pkt; + LIST_HEAD(head); + + MHI_SAT_LOG("Entered\n"); + + spin_lock_irq(&sat_cntrl->pkt_lock); + list_splice_tail_init(&sat_cntrl->packet_list, &head); + spin_unlock_irq(&sat_cntrl->pkt_lock); + + list_for_each_entry_safe(packet, tmp, &head, node) { + hdr = packet->msg; + pkt = SAT_TRE_OFFSET(packet->msg); + + list_del(&packet->node); + + mhi_sat_process_cmds(sat_cntrl, hdr, pkt); + + /* send response event(s) */ + mhi_sat_send_msg(sat_cntrl, SAT_MSG_ID_EVT, hdr->seq, + packet->msg, + SAT_MSG_SIZE(SAT_TRE_NUM_PKTS( + hdr->payload_size))); + + kfree(packet); + } + + MHI_SAT_LOG("Exited\n"); +} + +static void mhi_sat_connect_worker(struct work_struct *work) +{ + struct mhi_sat_cntrl *sat_cntrl = container_of(work, + struct mhi_sat_cntrl, connect_work); + struct mhi_sat_subsys *subsys = sat_cntrl->subsys; + struct sat_tre *pkt; + void *msg; + int ret; + + if (!subsys->rpdev || sat_cntrl->max_devices != sat_cntrl->num_devices + || sat_cntrl->active) + return; + + MHI_SAT_LOG("Entered\n"); + + msg = kmalloc(SAT_MSG_SIZE(3), GFP_ATOMIC); + if (!msg) + return; + + sat_cntrl->active = true; + + pkt = SAT_TRE_OFFSET(msg); + + /* prepare #1 MHI_CFG HELLO event */ + pkt->ptr = MHI_TRE_EVT_CFG_PTR(sat_cntrl->mhi_cntrl->base_addr); + pkt->dword[0] = MHI_TRE_EVT_CFG_D0(sat_cntrl->er_base, + sat_cntrl->num_er); + pkt->dword[1] = MHI_TRE_EVT_CFG_D1; + pkt++; + + /* prepare M0 event */ + pkt->ptr = MHI_TRE_EVT_MHI_STATE_PTR; + pkt->dword[0] = MHI_TRE_EVT_MHI_STATE_D0(MHI_STATE_M0); + pkt->dword[1] = MHI_TRE_EVT_MHI_STATE_D1; + pkt++; + + /* prepare AMSS event */ + pkt->ptr = MHI_TRE_EVT_EE_PTR; + pkt->dword[0] = MHI_TRE_EVT_EE_D0(MHI_EE_AMSS); + pkt->dword[1] = MHI_TRE_EVT_EE_D1; + + ret = mhi_sat_send_msg(sat_cntrl, SAT_MSG_ID_EVT, SAT_RESERVED_SEQ_NUM, + msg, SAT_MSG_SIZE(3)); + kfree(msg); + if (ret) { + MHI_SAT_ERR("Failed to send hello packet:%d\n", ret); + sat_cntrl->active = false; + return; + } + + MHI_SAT_LOG("Device 0x%x sent hello packet\n", sat_cntrl->dev_id); +} + +static void mhi_sat_process_events(struct mhi_sat_cntrl *sat_cntrl, + struct sat_header *hdr, struct sat_tre *pkt) +{ + int num_pkts = SAT_TRE_NUM_PKTS(hdr->payload_size); + int i; + + for (i = 0; i < num_pkts; i++, pkt++) { + if (MHI_TRE_GET_TYPE(pkt) == + MHI_PKT_TYPE_CMD_COMPLETION_EVENT) { + if (hdr->reply_seq != sat_cntrl->last_cmd_seq) + continue; + + sat_cntrl->last_cmd_ccs = MHI_TRE_GET_CCS(pkt); + complete(&sat_cntrl->completion); + } + } +} + +static int mhi_sat_rpmsg_cb(struct rpmsg_device *rpdev, void *data, int len, + void *priv, u32 src) +{ + struct mhi_sat_subsys *subsys = dev_get_drvdata(&rpdev->dev); + struct sat_header *hdr = data; + struct sat_tre *pkt = SAT_TRE_OFFSET(data); + struct mhi_sat_cntrl *sat_cntrl; + struct mhi_sat_packet *packet; + + MHI_SAT_ASSERT(!mhi_sat_isvalid_header(hdr, len), "Invalid header!\n"); + + /* find controller packet was sent for */ + sat_cntrl = find_sat_cntrl_by_id(subsys, hdr->dev_id); + + MHI_SAT_ASSERT(!sat_cntrl, "Packet for unknown device!\n"); + + /* handle events directly regardless of controller active state */ + if (hdr->msg_id == SAT_MSG_ID_EVT) { + mhi_sat_process_events(sat_cntrl, hdr, pkt); + return 0; + } + + /* Inactive controller cannot process incoming commands */ + if (unlikely(!sat_cntrl->active)) { + MHI_SAT_ERR("Message for inactive controller!\n"); + return 0; + } + + /* offload commands to process worker */ + packet = kmalloc(sizeof(*packet) + len, GFP_ATOMIC); + if (!packet) + return 0; + + packet->cntrl = sat_cntrl; + packet->msg = packet + 1; + memcpy(packet->msg, data, len); + + spin_lock_irq(&sat_cntrl->pkt_lock); + list_add_tail(&packet->node, &sat_cntrl->packet_list); + spin_unlock_irq(&sat_cntrl->pkt_lock); + + schedule_work(&sat_cntrl->process_work); + + return 0; +} + +static void mhi_sat_rpmsg_remove(struct rpmsg_device *rpdev) +{ + struct mhi_sat_subsys *subsys = dev_get_drvdata(&rpdev->dev); + struct mhi_sat_cntrl *sat_cntrl; + struct mhi_sat_device *sat_dev; + struct mhi_buf *buf, *tmp; + + MHI_SUBSYS_LOG("Enter\n"); + + /* unprepare each controller/device from transfer */ + mutex_lock(&subsys->cntrl_mutex); + list_for_each_entry(sat_cntrl, &subsys->cntrl_list, node) { + sat_cntrl->active = false; + + flush_work(&sat_cntrl->connect_work); + flush_work(&sat_cntrl->process_work); + + mutex_lock(&sat_cntrl->list_mutex); + list_for_each_entry(sat_dev, &sat_cntrl->dev_list, node) { + if (sat_dev->chan_started) { + mhi_unprepare_from_transfer(sat_dev->mhi_dev); + sat_dev->chan_started = false; + } + } + + list_for_each_entry_safe(buf, tmp, &sat_cntrl->addr_map_list, + node) { + dma_unmap_resource(sat_cntrl->mhi_cntrl->dev, + buf->dma_addr, buf->len, + DMA_BIDIRECTIONAL, 0); + list_del(&buf->node); + kfree(buf); + } + mutex_unlock(&sat_cntrl->list_mutex); + + MHI_SAT_LOG("Removed RPMSG link\n"); + } + mutex_unlock(&subsys->cntrl_mutex); + + subsys->rpdev = NULL; +} + +static int mhi_sat_rpmsg_probe(struct rpmsg_device *rpdev) +{ + struct mhi_sat_subsys *subsys; + struct mhi_sat_cntrl *sat_cntrl; + const char *subsys_name; + int ret; + + ret = of_property_read_string(rpdev->dev.parent->of_node, "label", + &subsys_name); + if (ret) + return ret; + + /* find which subsystem has probed */ + subsys = find_subsys_by_name(subsys_name); + if (!subsys) + return -EINVAL; + + MHI_SUBSYS_LOG("Received RPMSG probe\n"); + + dev_set_drvdata(&rpdev->dev, subsys); + + subsys->rpdev = rpdev; + + /* schedule work for each controller as GLINK has connected */ + spin_lock_irq(&subsys->cntrl_lock); + list_for_each_entry(sat_cntrl, &subsys->cntrl_list, node) + schedule_work(&sat_cntrl->connect_work); + spin_unlock_irq(&subsys->cntrl_lock); + + return 0; +} + +static struct rpmsg_device_id mhi_sat_rpmsg_match_table[] = { + { .name = "mhi_sat" }, + { }, +}; + +static struct rpmsg_driver mhi_sat_rpmsg_driver = { + .id_table = mhi_sat_rpmsg_match_table, + .probe = mhi_sat_rpmsg_probe, + .remove = mhi_sat_rpmsg_remove, + .callback = mhi_sat_rpmsg_cb, + .drv = { + .name = "mhi,sat_rpmsg", + }, +}; + +static void mhi_sat_dev_status_cb(struct mhi_device *mhi_dev, + enum MHI_CB mhi_cb) +{ +} + +static void mhi_sat_dev_remove(struct mhi_device *mhi_dev) +{ + struct mhi_sat_device *sat_dev = mhi_device_get_devdata(mhi_dev); + struct mhi_sat_cntrl *sat_cntrl = sat_dev->cntrl; + struct mhi_sat_subsys *subsys = sat_cntrl->subsys; + struct mhi_buf *buf, *tmp; + struct sat_tre *pkt; + void *msg; + int ret; + + /* remove device node from probed list */ + mutex_lock(&sat_cntrl->list_mutex); + list_del(&sat_dev->node); + mutex_unlock(&sat_cntrl->list_mutex); + + sat_cntrl->num_devices--; + + /* prepare SYS_ERR command if first device is being removed */ + if (sat_cntrl->active) { + sat_cntrl->active = false; + + /* flush all pending work */ + flush_work(&sat_cntrl->connect_work); + flush_work(&sat_cntrl->process_work); + + msg = kmalloc(SAT_MSG_SIZE(1), GFP_KERNEL); + + MHI_SAT_ASSERT(!msg, "Unable to malloc for SYS_ERR message!\n"); + + pkt = SAT_TRE_OFFSET(msg); + pkt->ptr = MHI_TRE_CMD_SYS_ERR_PTR; + pkt->dword[0] = MHI_TRE_CMD_SYS_ERR_D0; + pkt->dword[1] = MHI_TRE_CMD_SYS_ERR_D1; + + /* acquire cmd_wait_mutex before sending command */ + mutex_lock(&sat_cntrl->cmd_wait_mutex); + + ret = mhi_sat_send_msg(sat_cntrl, SAT_MSG_ID_CMD, + SAT_RESERVED_SEQ_NUM, msg, + SAT_MSG_SIZE(1)); + kfree(msg); + if (ret) { + MHI_SAT_ERR("Failed to notify SYS_ERR\n"); + mutex_unlock(&sat_cntrl->cmd_wait_mutex); + goto exit_sys_err_send; + } + + MHI_SAT_LOG("SYS_ERR command sent\n"); + + /* blocking call to wait for command completion event */ + mhi_sat_wait_cmd_completion(sat_cntrl); + + mutex_unlock(&sat_cntrl->cmd_wait_mutex); + } + +exit_sys_err_send: + /* exit if some devices are still present */ + if (sat_cntrl->num_devices) + return; + + /* remove address mappings */ + mutex_lock(&sat_cntrl->list_mutex); + list_for_each_entry_safe(buf, tmp, &sat_cntrl->addr_map_list, node) { + dma_unmap_resource(sat_cntrl->mhi_cntrl->dev, buf->dma_addr, + buf->len, DMA_BIDIRECTIONAL, 0); + list_del(&buf->node); + kfree(buf); + } + mutex_unlock(&sat_cntrl->list_mutex); + + /* remove controller */ + mutex_lock(&subsys->cntrl_mutex); + spin_lock_irq(&subsys->cntrl_lock); + list_del(&sat_cntrl->node); + spin_unlock_irq(&subsys->cntrl_lock); + mutex_unlock(&subsys->cntrl_mutex); + + mutex_destroy(&sat_cntrl->cmd_wait_mutex); + mutex_destroy(&sat_cntrl->list_mutex); + MHI_SAT_LOG("Satellite controller node removed\n"); + kfree(sat_cntrl); +} + +static int mhi_sat_dev_probe(struct mhi_device *mhi_dev, + const struct mhi_device_id *id) +{ + struct mhi_sat_device *sat_dev; + struct mhi_sat_cntrl *sat_cntrl; + struct device_node *of_node = mhi_dev->dev.of_node; + struct mhi_sat_subsys *subsys = &mhi_sat_driver.subsys[id->driver_data]; + u32 dev_id = MHI_SAT_CREATE_DEVICE_ID(mhi_dev->dev_id, mhi_dev->domain, + mhi_dev->bus, mhi_dev->slot); + int ret; + + /* find controller with unique device ID based on topology */ + sat_cntrl = find_sat_cntrl_by_id(subsys, dev_id); + if (!sat_cntrl) { + sat_cntrl = kzalloc(sizeof(*sat_cntrl), GFP_KERNEL); + if (!sat_cntrl) + return -ENOMEM; + + /* + * max_devices will be read from device tree node. Set it to + * -1 before it is populated to avoid false positive when + * RPMSG probe schedules connect worker but no device has + * probed in which case num_devices and max_devices are both + * zero. + */ + sat_cntrl->max_devices = -1; + sat_cntrl->dev_id = dev_id; + sat_cntrl->er_base = mhi_dev->dl_event_id; + sat_cntrl->mhi_cntrl = mhi_dev->mhi_cntrl; + sat_cntrl->last_cmd_seq = SAT_RESERVED_SEQ_NUM; + sat_cntrl->subsys = subsys; + init_completion(&sat_cntrl->completion); + mutex_init(&sat_cntrl->list_mutex); + mutex_init(&sat_cntrl->cmd_wait_mutex); + spin_lock_init(&sat_cntrl->pkt_lock); + INIT_WORK(&sat_cntrl->connect_work, mhi_sat_connect_worker); + INIT_WORK(&sat_cntrl->process_work, mhi_sat_process_worker); + INIT_LIST_HEAD(&sat_cntrl->dev_list); + INIT_LIST_HEAD(&sat_cntrl->addr_map_list); + INIT_LIST_HEAD(&sat_cntrl->packet_list); + + mutex_lock(&subsys->cntrl_mutex); + spin_lock_irq(&subsys->cntrl_lock); + list_add(&sat_cntrl->node, &subsys->cntrl_list); + spin_unlock_irq(&subsys->cntrl_lock); + mutex_unlock(&subsys->cntrl_mutex); + + MHI_SAT_LOG("Controller allocated for 0x%x\n", dev_id); + } + + /* set maximum devices for subsystem from device tree */ + if (of_node) { + ret = of_property_read_u32(of_node, "mhi,max-devices", + &sat_cntrl->max_devices); + if (ret) { + MHI_SAT_ERR("Could not find max-devices in DT node\n"); + return -EINVAL; + } + } + + /* get event ring base and max indexes */ + sat_cntrl->er_base = min(sat_cntrl->er_base, mhi_dev->dl_event_id); + sat_cntrl->er_max = max(sat_cntrl->er_base, mhi_dev->dl_event_id); + + sat_dev = devm_kzalloc(&mhi_dev->dev, sizeof(*sat_dev), GFP_KERNEL); + if (!sat_dev) + return -ENOMEM; + + sat_dev->mhi_dev = mhi_dev; + sat_dev->cntrl = sat_cntrl; + + mutex_lock(&sat_cntrl->list_mutex); + list_add(&sat_dev->node, &sat_cntrl->dev_list); + mutex_unlock(&sat_cntrl->list_mutex); + + mhi_device_set_devdata(mhi_dev, sat_dev); + + sat_cntrl->num_devices++; + + /* schedule connect worker if all devices for controller have probed */ + if (sat_cntrl->num_devices == sat_cntrl->max_devices) { + /* number of event rings is 1 more than difference in IDs */ + sat_cntrl->num_er = (sat_cntrl->er_max - sat_cntrl->er_base) + + 1; + MHI_SAT_LOG("All satellite channels probed!\n"); + schedule_work(&sat_cntrl->connect_work); + } + + return 0; +} + +/* .driver_data stores subsys id */ +static const struct mhi_device_id mhi_sat_dev_match_table[] = { + /* ADSP */ + { .chan = "ADSP_0", .driver_data = SUBSYS_ADSP }, + { .chan = "ADSP_1", .driver_data = SUBSYS_ADSP }, + { .chan = "ADSP_2", .driver_data = SUBSYS_ADSP }, + { .chan = "ADSP_3", .driver_data = SUBSYS_ADSP }, + { .chan = "ADSP_4", .driver_data = SUBSYS_ADSP }, + { .chan = "ADSP_5", .driver_data = SUBSYS_ADSP }, + { .chan = "ADSP_6", .driver_data = SUBSYS_ADSP }, + { .chan = "ADSP_7", .driver_data = SUBSYS_ADSP }, + { .chan = "ADSP_8", .driver_data = SUBSYS_ADSP }, + { .chan = "ADSP_9", .driver_data = SUBSYS_ADSP }, + /* CDSP */ + { .chan = "CDSP_0", .driver_data = SUBSYS_CDSP }, + { .chan = "CDSP_1", .driver_data = SUBSYS_CDSP }, + { .chan = "CDSP_2", .driver_data = SUBSYS_CDSP }, + { .chan = "CDSP_3", .driver_data = SUBSYS_CDSP }, + { .chan = "CDSP_4", .driver_data = SUBSYS_CDSP }, + { .chan = "CDSP_5", .driver_data = SUBSYS_CDSP }, + { .chan = "CDSP_6", .driver_data = SUBSYS_CDSP }, + { .chan = "CDSP_7", .driver_data = SUBSYS_CDSP }, + { .chan = "CDSP_8", .driver_data = SUBSYS_CDSP }, + { .chan = "CDSP_9", .driver_data = SUBSYS_CDSP }, + /* SLPI */ + { .chan = "SLPI_0", .driver_data = SUBSYS_SLPI }, + { .chan = "SLPI_1", .driver_data = SUBSYS_SLPI }, + { .chan = "SLPI_2", .driver_data = SUBSYS_SLPI }, + { .chan = "SLPI_3", .driver_data = SUBSYS_SLPI }, + { .chan = "SLPI_4", .driver_data = SUBSYS_SLPI }, + { .chan = "SLPI_5", .driver_data = SUBSYS_SLPI }, + { .chan = "SLPI_6", .driver_data = SUBSYS_SLPI }, + { .chan = "SLPI_7", .driver_data = SUBSYS_SLPI }, + { .chan = "SLPI_8", .driver_data = SUBSYS_SLPI }, + { .chan = "SLPI_9", .driver_data = SUBSYS_SLPI }, + /* MODEM */ + { .chan = "MODEM_0", .driver_data = SUBSYS_MODEM }, + { .chan = "MODEM_1", .driver_data = SUBSYS_MODEM }, + { .chan = "MODEM_2", .driver_data = SUBSYS_MODEM }, + { .chan = "MODEM_3", .driver_data = SUBSYS_MODEM }, + { .chan = "MODEM_4", .driver_data = SUBSYS_MODEM }, + { .chan = "MODEM_5", .driver_data = SUBSYS_MODEM }, + { .chan = "MODEM_6", .driver_data = SUBSYS_MODEM }, + { .chan = "MODEM_7", .driver_data = SUBSYS_MODEM }, + { .chan = "MODEM_8", .driver_data = SUBSYS_MODEM }, + { .chan = "MODEM_9", .driver_data = SUBSYS_MODEM }, + {}, +}; + +static struct mhi_driver mhi_sat_dev_driver = { + .id_table = mhi_sat_dev_match_table, + .probe = mhi_sat_dev_probe, + .remove = mhi_sat_dev_remove, + .status_cb = mhi_sat_dev_status_cb, + .driver = { + .name = MHI_SAT_DRIVER_NAME, + .owner = THIS_MODULE, + }, +}; + +int mhi_sat_trigger_init(void *data, u64 val) +{ + struct mhi_sat_subsys *subsys; + int i, ret; + + if (mhi_sat_driver.deferred_init_done) + return -EIO; + + ret = register_rpmsg_driver(&mhi_sat_rpmsg_driver); + if (ret) + goto error_sat_trigger_init; + + ret = mhi_driver_register(&mhi_sat_dev_driver); + if (ret) + goto error_sat_trigger_register; + + mhi_sat_driver.deferred_init_done = true; + + return 0; + +error_sat_trigger_register: + unregister_rpmsg_driver(&mhi_sat_rpmsg_driver); + +error_sat_trigger_init: + subsys = mhi_sat_driver.subsys; + for (i = 0; i < mhi_sat_driver.num_subsys; i++, subsys++) { + ipc_log_context_destroy(subsys->ipc_log); + mutex_destroy(&subsys->cntrl_mutex); + } + kfree(mhi_sat_driver.subsys); + mhi_sat_driver.subsys = NULL; + + return ret; +} + +DEFINE_SIMPLE_ATTRIBUTE(mhi_sat_debugfs_fops, NULL, + mhi_sat_trigger_init, "%llu\n"); + +static int mhi_sat_init(void) +{ + struct mhi_sat_subsys *subsys; + int i, ret; + + subsys = kcalloc(SUBSYS_MAX, sizeof(*subsys), GFP_KERNEL); + if (!subsys) + return -ENOMEM; + + mhi_sat_driver.subsys = subsys; + mhi_sat_driver.num_subsys = SUBSYS_MAX; + mhi_sat_driver.klog_lvl = KLOG_LVL; + mhi_sat_driver.ipc_log_lvl = IPC_LOG_LVL; + + for (i = 0; i < mhi_sat_driver.num_subsys; i++, subsys++) { + char log[32]; + + subsys->name = subsys_names[i]; + mutex_init(&subsys->cntrl_mutex); + spin_lock_init(&subsys->cntrl_lock); + INIT_LIST_HEAD(&subsys->cntrl_list); + scnprintf(log, sizeof(log), "mhi_sat_%s", subsys->name); + subsys->ipc_log = ipc_log_context_create(IPC_LOG_PAGES, log, 0); + } + + /* create debugfs entry if defer_init is enabled */ + if (mhi_sat_defer_init) { + mhi_sat_driver.dentry = debugfs_create_dir("mhi_sat", NULL); + if (IS_ERR_OR_NULL(mhi_sat_driver.dentry)) { + ret = -ENODEV; + goto error_sat_init; + } + + debugfs_create_file("debug", 0444, mhi_sat_driver.dentry, NULL, + &mhi_sat_debugfs_fops); + + return 0; + } + + ret = register_rpmsg_driver(&mhi_sat_rpmsg_driver); + if (ret) + goto error_sat_init; + + ret = mhi_driver_register(&mhi_sat_dev_driver); + if (ret) + goto error_sat_register; + + return 0; + +error_sat_register: + unregister_rpmsg_driver(&mhi_sat_rpmsg_driver); + +error_sat_init: + subsys = mhi_sat_driver.subsys; + for (i = 0; i < mhi_sat_driver.num_subsys; i++, subsys++) { + ipc_log_context_destroy(subsys->ipc_log); + mutex_destroy(&subsys->cntrl_mutex); + } + kfree(mhi_sat_driver.subsys); + mhi_sat_driver.subsys = NULL; + + return ret; +} + +module_init(mhi_sat_init); + diff --git a/package/wwan/driver/quectel_MHI/src/devices/mhi_uci.c b/package/wwan/driver/quectel_MHI/src/devices/mhi_uci.c new file mode 100644 index 000000000..01a846bb9 --- /dev/null +++ b/package/wwan/driver/quectel_MHI/src/devices/mhi_uci.c @@ -0,0 +1,922 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.*/ + +#include +#include +#include +#include +#include +#include +#include +#if 1 +static inline void *ipc_log_context_create(int max_num_pages, + const char *modname, uint16_t user_version) +{ return NULL; } +static inline int ipc_log_string(void *ilctxt, const char *fmt, ...) +{ return -EINVAL; } +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../core/mhi.h" + +#define DEVICE_NAME "mhi" +#define MHI_UCI_DRIVER_NAME "mhi_uci_q" + +struct uci_chan { + wait_queue_head_t wq; + spinlock_t lock; + struct list_head pending; /* user space waiting to read */ + struct uci_buf *cur_buf; /* current buffer user space reading */ + size_t rx_size; +}; + +struct uci_buf { + struct page *page; + void *data; + size_t len; + unsigned nr_trb; + struct list_head node; +}; + +struct uci_dev { + struct list_head node; + dev_t devt; + struct device *dev; + struct mhi_device *mhi_dev; + const char *chan; + struct mutex mutex; /* sync open and close */ + struct mutex r_mutex; + struct mutex w_mutex; + struct uci_chan ul_chan; + struct uci_chan dl_chan; + size_t mtu; + int ref_count; + bool enabled; + unsigned rx_error; + unsigned nr_trb; + unsigned nr_trbs; + struct uci_buf *uci_buf; + struct ktermios termios; + size_t bytes_xferd; +}; + +struct mhi_uci_drv { + struct list_head head; + struct mutex lock; + struct class *class; + int major; + dev_t dev_t; +}; + +static int uci_msg_lvl = MHI_MSG_LVL_ERROR; +module_param( uci_msg_lvl, uint, S_IRUGO | S_IWUSR); + +#define MSG_VERB(fmt, ...) do { \ + if (uci_msg_lvl <= MHI_MSG_LVL_VERBOSE) \ + pr_err("[D][%s] " fmt, __func__, ##__VA_ARGS__); \ + } while (0) + +#define MSG_LOG(fmt, ...) do { \ + if (uci_msg_lvl <= MHI_MSG_LVL_INFO) \ + pr_err("[I][%s] " fmt, __func__, ##__VA_ARGS__); \ + } while (0) + +#define MSG_ERR(fmt, ...) do { \ + if (uci_msg_lvl <= MHI_MSG_LVL_ERROR) \ + pr_err("[E][%s] " fmt, __func__, ##__VA_ARGS__); \ + } while (0) + +#define MAX_UCI_DEVICES (64) +#define QUEC_MHI_UCI_ALWAYS_OPEN //by now, sdx20 can not handle "start-reset-start" operation, so the simply solution is keep start state + +static DECLARE_BITMAP(uci_minors, MAX_UCI_DEVICES); +static struct mhi_uci_drv mhi_uci_drv; + +static int mhi_queue_inbound(struct uci_dev *uci_dev) +{ + struct mhi_device *mhi_dev = uci_dev->mhi_dev; + int nr_trbs = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE); + size_t mtu = uci_dev->mtu; + void *buf; + struct uci_buf *uci_buf; + int ret = -EIO, i; + + if (uci_dev->uci_buf == NULL) { + uci_dev->nr_trb = 0; + uci_dev->nr_trbs = (nr_trbs + 1); + uci_dev->uci_buf = kmalloc_array(uci_dev->nr_trbs, sizeof(*uci_buf), GFP_KERNEL); + if (!uci_dev->uci_buf) + return -ENOMEM; + + uci_buf = uci_dev->uci_buf; + for (i = 0; i < uci_dev->nr_trbs; i++, uci_buf++) { + uci_buf->page = alloc_pages(GFP_KERNEL, get_order(mtu)); + if (!uci_buf->page) + return -ENOMEM; + uci_buf->data = page_address(uci_buf->page); + uci_buf->len = 0; + uci_buf->nr_trb = i; + if (mhi_dev->dl_chan_id == MHI_CLIENT_DUN_IN) { + //MSG_ERR("[%d] = %p\n", i, uci_buf->data); + } + } + } + + for (i = 0; i < nr_trbs; i++) { + #if 0 + buf = kmalloc(mtu + sizeof(*uci_buf), GFP_KERNEL); + if (!buf) + return -ENOMEM; + + uci_buf = buf + mtu; + uci_buf->data = buf; + #else + uci_buf = &uci_dev->uci_buf[i]; + buf = uci_buf->data; + #endif + + MSG_VERB("Allocated buf %d of %d size %zu\n", i, nr_trbs, mtu); + + ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, buf, mtu, + MHI_EOT); + if (ret) { + #if 0 + kfree(buf); + #endif + MSG_ERR("Failed to queue buffer %d\n", i); + return ret; + } + } + + return ret; +} + +static long mhi_uci_ioctl(struct file *file, + unsigned int cmd, + unsigned long arg) +{ + struct uci_dev *uci_dev = file->private_data; + struct mhi_device *mhi_dev = uci_dev->mhi_dev; + long ret = -ERESTARTSYS; + + mutex_lock(&uci_dev->mutex); + if (uci_dev->enabled) + ret = mhi_ioctl(mhi_dev, cmd, arg); + + if (uci_dev->enabled) { + switch (cmd) { + case TCGETS: +#ifndef TCGETS2 + ret = kernel_termios_to_user_termios((struct termios __user *)arg, &uci_dev->termios); +#else + ret = kernel_termios_to_user_termios_1((struct termios __user *)arg, &uci_dev->termios); +#endif + break; + + case TCSETSF: + case TCSETS: +#ifndef TCGETS2 + ret = user_termios_to_kernel_termios(&uci_dev->termios, (struct termios __user *)arg); +#else + ret = user_termios_to_kernel_termios_1(&uci_dev->termios, (struct termios __user *)arg); +#endif + break; + + case TCFLSH: + ret = 0; + break; + + default: + break; + } + } + mutex_unlock(&uci_dev->mutex); + + return ret; +} + +static int mhi_uci_release(struct inode *inode, struct file *file) +{ + struct uci_dev *uci_dev = file->private_data; + + mutex_lock(&uci_dev->mutex); + uci_dev->ref_count--; + if (!uci_dev->ref_count) { + struct uci_chan *uci_chan; + + MSG_LOG("Last client left, closing node\n"); + + if (uci_dev->enabled) + mhi_unprepare_from_transfer(uci_dev->mhi_dev); + + /* clean inbound channel */ + uci_chan = &uci_dev->dl_chan; + if (uci_dev->uci_buf) { + unsigned nr_trb = 0; + + for (nr_trb = 0; nr_trb < uci_dev->nr_trbs; nr_trb++) { + if (uci_dev->uci_buf[nr_trb].page) + __free_pages(uci_dev->uci_buf[nr_trb].page, get_order(uci_dev->mtu)); + } + kfree(uci_dev->uci_buf); + } + + uci_chan->cur_buf = NULL; + + if (!uci_dev->enabled) { + MSG_LOG("Node is deleted, freeing dev node\n"); + mutex_unlock(&uci_dev->mutex); + mutex_destroy(&uci_dev->mutex); + clear_bit(MINOR(uci_dev->devt), uci_minors); + kfree(uci_dev); + return 0; + } + } + + MSG_LOG("exit: ref_count:%d\n", uci_dev->ref_count); + + mutex_unlock(&uci_dev->mutex); + + return 0; +} + +static unsigned int mhi_uci_poll(struct file *file, poll_table *wait) +{ + struct uci_dev *uci_dev = file->private_data; + struct mhi_device *mhi_dev = uci_dev->mhi_dev; + struct uci_chan *uci_chan; + unsigned int mask = 0; + + poll_wait(file, &uci_dev->dl_chan.wq, wait); + poll_wait(file, &uci_dev->ul_chan.wq, wait); + + uci_chan = &uci_dev->dl_chan; + spin_lock_bh(&uci_chan->lock); + if (!uci_dev->enabled) { + mask = POLLERR; + } else if (!list_empty(&uci_chan->pending) || uci_chan->cur_buf) { + MSG_VERB("Client can read from node\n"); + mask |= POLLIN | POLLRDNORM; + } + spin_unlock_bh(&uci_chan->lock); + + uci_chan = &uci_dev->ul_chan; + spin_lock_bh(&uci_chan->lock); + if (!uci_dev->enabled) { + mask |= POLLERR; + } else if (mhi_get_no_free_descriptors(mhi_dev, DMA_TO_DEVICE) > 0) { + MSG_VERB("Client can write to node\n"); + mask |= POLLOUT | POLLWRNORM; + } + + if (!uci_dev->enabled) + mask |= POLLHUP; + if (uci_dev->rx_error) + mask |= POLLERR; + + spin_unlock_bh(&uci_chan->lock); + + MSG_LOG("Client attempted to poll, returning mask 0x%x\n", mask); + + return mask; +} + +static ssize_t mhi_uci_write(struct file *file, + const char __user *buf, + size_t count, + loff_t *offp) +{ + struct uci_dev *uci_dev = file->private_data; + struct mhi_device *mhi_dev = uci_dev->mhi_dev; + struct uci_chan *uci_chan = &uci_dev->ul_chan; + size_t bytes_xfered = 0; + int ret, nr_avail; + + if (!buf || !count || uci_dev->rx_error) + return -EINVAL; + + /* confirm channel is active */ + spin_lock_bh(&uci_chan->lock); + if (!uci_dev->enabled) { + spin_unlock_bh(&uci_chan->lock); + return -ERESTARTSYS; + } + + MSG_VERB("Enter: to xfer:%zu bytes\n", count); + + while (count) { + size_t xfer_size; + void *kbuf; + enum MHI_FLAGS flags; + + spin_unlock_bh(&uci_chan->lock); + + nr_avail = mhi_get_no_free_descriptors(mhi_dev, DMA_TO_DEVICE); + if ((nr_avail == 0) && (file->f_flags & O_NONBLOCK)) + return -EAGAIN; + + /* wait for free descriptors */ + ret = wait_event_interruptible(uci_chan->wq, + (!uci_dev->enabled) || + (nr_avail = mhi_get_no_free_descriptors(mhi_dev, + DMA_TO_DEVICE)) > 0); + + if (ret == -ERESTARTSYS || !uci_dev->enabled) { + MSG_LOG("Exit signal caught for node or not enabled\n"); + return -ERESTARTSYS; + } + + xfer_size = min_t(size_t, count, uci_dev->mtu); + kbuf = kmalloc(xfer_size, GFP_KERNEL); + if (!kbuf) { + MSG_ERR("Failed to allocate memory %zu\n", xfer_size); + return -ENOMEM; + } + + ret = copy_from_user(kbuf, buf, xfer_size); + if (unlikely(ret)) { + kfree(kbuf); + return ret; + } + + spin_lock_bh(&uci_chan->lock); + + /* if ring is full after this force EOT */ + if (nr_avail > 1 && (count - xfer_size)) + flags = MHI_CHAIN; + else + flags = MHI_EOT; + + if (uci_dev->enabled) + ret = mhi_queue_transfer(mhi_dev, DMA_TO_DEVICE, kbuf, + xfer_size, flags); + else + ret = -ERESTARTSYS; + + if (ret) { + kfree(kbuf); + goto sys_interrupt; + } + + bytes_xfered += xfer_size; + count -= xfer_size; + buf += xfer_size; + } + + spin_unlock_bh(&uci_chan->lock); + MSG_VERB("Exit: Number of bytes xferred:%zu\n", bytes_xfered); + + return bytes_xfered; + +sys_interrupt: + spin_unlock_bh(&uci_chan->lock); + + return ret; +} + +static ssize_t mhi_uci_read(struct file *file, + char __user *buf, + size_t count, + loff_t *ppos) +{ + struct uci_dev *uci_dev = file->private_data; + struct mhi_device *mhi_dev = uci_dev->mhi_dev; + struct uci_chan *uci_chan = &uci_dev->dl_chan; + struct uci_buf *uci_buf; + char *ptr; + size_t to_copy; + int ret = 0; + + if (!buf || uci_dev->rx_error) + return -EINVAL; + + MSG_VERB("Client provided buf len:%zu\n", count); + + /* confirm channel is active */ + spin_lock_bh(&uci_chan->lock); + if (!uci_dev->enabled) { + spin_unlock_bh(&uci_chan->lock); + return -ERESTARTSYS; + } + + /* No data available to read, wait */ + if (!uci_chan->cur_buf && list_empty(&uci_chan->pending)) { + MSG_VERB("No data available to read waiting\n"); + + spin_unlock_bh(&uci_chan->lock); + + if (file->f_flags & O_NONBLOCK) + return -EAGAIN; + + ret = wait_event_interruptible(uci_chan->wq, + (!uci_dev->enabled || + !list_empty(&uci_chan->pending))); + if (ret == -ERESTARTSYS) { + MSG_LOG("Exit signal caught for node\n"); + return -ERESTARTSYS; + } + + spin_lock_bh(&uci_chan->lock); + if (!uci_dev->enabled) { + MSG_LOG("node is disabled\n"); + ret = -ERESTARTSYS; + goto read_error; + } + } + + /* new read, get the next descriptor from the list */ + if (!uci_chan->cur_buf) { + uci_buf = list_first_entry_or_null(&uci_chan->pending, + struct uci_buf, node); + if (unlikely(!uci_buf)) { + ret = -EIO; + goto read_error; + } + + if (uci_buf->node.next == LIST_POISON1 || uci_buf->node.prev == LIST_POISON1) { + dump_stack(); + ret = -EIO; + MSG_ERR("chan[%d] data=%p, len=%zd, nr_trb=%d\n", + mhi_dev->dl_chan_id, uci_buf->data, uci_buf->len, uci_buf->nr_trb); + goto read_error; + } + + list_del(&uci_buf->node); + uci_chan->cur_buf = uci_buf; + uci_chan->rx_size = uci_buf->len; + MSG_VERB("Got pkt of size:%zu\n", uci_chan->rx_size); + } + + uci_buf = uci_chan->cur_buf; + spin_unlock_bh(&uci_chan->lock); + + /* Copy the buffer to user space */ + to_copy = min_t(size_t, count, uci_chan->rx_size); + ptr = uci_buf->data + (uci_buf->len - uci_chan->rx_size); + ret = copy_to_user(buf, ptr, to_copy); + if (ret) + return ret; + + MSG_VERB("Copied %zu of %zu bytes\n", to_copy, uci_chan->rx_size); + uci_chan->rx_size -= to_copy; + + /* we finished with this buffer, queue it back to hardware */ + if (!uci_chan->rx_size) { + spin_lock_bh(&uci_chan->lock); + uci_chan->cur_buf = NULL; + + if (uci_dev->enabled) +#if 1 //this can make the address in ring do not change + { + if (uci_buf->page) { + unsigned nr_trb = uci_buf->nr_trb ? (uci_buf->nr_trb - 1) : (uci_dev->nr_trbs - 1); + + uci_buf = &uci_dev->uci_buf[nr_trb]; + ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, + uci_buf->data, uci_dev->mtu, + MHI_EOT); + } else { + kfree(uci_buf); + ret = 0; + } + } +#endif + else + ret = -ERESTARTSYS; + + if (ret) { + MSG_ERR("Failed to recycle element for chan:%d , ret=%d\n", mhi_dev->ul_chan_id, ret); +#if 0 + kfree(uci_buf->data); +#endif + goto read_error; + } + + spin_unlock_bh(&uci_chan->lock); + } + + MSG_VERB("Returning %zu bytes\n", to_copy); + + return to_copy; + +read_error: + spin_unlock_bh(&uci_chan->lock); + + return ret; +} + +static ssize_t mhi_uci_write_mutex(struct file *file, + const char __user *buf, + size_t count, + loff_t *offp) +{ + struct uci_dev *uci_dev = file->private_data; + int ret; + + ret = mutex_lock_interruptible(&uci_dev->w_mutex); /*concurrent writes */ + if (ret < 0) + return -ERESTARTSYS; + + ret = mhi_uci_write(file, buf, count, offp); + mutex_unlock(&uci_dev->w_mutex); + + return ret; +} + +static ssize_t mhi_uci_read_mutex(struct file *file, + char __user *buf, + size_t count, + loff_t *ppos) +{ + struct uci_dev *uci_dev = file->private_data; + int ret; + + ret = mutex_lock_interruptible(&uci_dev->r_mutex); /*concurrent reads */ + if (ret < 0) + return -ERESTARTSYS; + + ret = mhi_uci_read(file, buf, count, ppos); + mutex_unlock(&uci_dev->r_mutex); + + return ret; +} + +static int mhi_uci_open(struct inode *inode, struct file *filp) +{ + struct uci_dev *uci_dev = NULL, *tmp_dev; + int ret = -EIO; + struct uci_chan *dl_chan; + + mutex_lock(&mhi_uci_drv.lock); + list_for_each_entry(tmp_dev, &mhi_uci_drv.head, node) { + if (tmp_dev->devt == inode->i_rdev) { + uci_dev = tmp_dev; + break; + } + } + + /* could not find a minor node */ + if (!uci_dev) + goto error_exit; + + mutex_lock(&uci_dev->mutex); + if (!uci_dev->enabled) { + MSG_ERR("Node exist, but not in active state!\n"); + goto error_open_chan; + } + + uci_dev->ref_count++; + + MSG_LOG("Node open, ref counts %u\n", uci_dev->ref_count); + + if (uci_dev->ref_count == 1) { + MSG_LOG("Starting channel\n"); + ret = mhi_prepare_for_transfer(uci_dev->mhi_dev); + if (ret) { + MSG_ERR("Error starting transfer channels\n"); + uci_dev->ref_count--; + goto error_open_chan; + } + + ret = mhi_queue_inbound(uci_dev); + if (ret) + goto error_rx_queue; + +#ifdef QUEC_MHI_UCI_ALWAYS_OPEN + uci_dev->ref_count++; +#endif + } + + filp->private_data = uci_dev; + mutex_unlock(&uci_dev->mutex); + mutex_unlock(&mhi_uci_drv.lock); + + return 0; + + error_rx_queue: + dl_chan = &uci_dev->dl_chan; + mhi_unprepare_from_transfer(uci_dev->mhi_dev); + if (uci_dev->uci_buf) { + unsigned nr_trb = 0; + + for (nr_trb = 0; nr_trb < uci_dev->nr_trbs; nr_trb++) { + if (uci_dev->uci_buf[nr_trb].page) + __free_pages(uci_dev->uci_buf[nr_trb].page, get_order(uci_dev->mtu)); + } + kfree(uci_dev->uci_buf); + } + + error_open_chan: + mutex_unlock(&uci_dev->mutex); + +error_exit: + mutex_unlock(&mhi_uci_drv.lock); + + return ret; +} + +static const struct file_operations mhidev_fops = { + .open = mhi_uci_open, + .release = mhi_uci_release, + .read = mhi_uci_read_mutex, + .write = mhi_uci_write_mutex, + .poll = mhi_uci_poll, + .unlocked_ioctl = mhi_uci_ioctl, +}; + +static void mhi_uci_remove(struct mhi_device *mhi_dev) +{ + struct uci_dev *uci_dev = mhi_device_get_devdata(mhi_dev); + + MSG_LOG("Enter\n"); + + + mutex_lock(&mhi_uci_drv.lock); + mutex_lock(&uci_dev->mutex); + + /* disable the node */ + spin_lock_irq(&uci_dev->dl_chan.lock); + spin_lock_irq(&uci_dev->ul_chan.lock); + uci_dev->enabled = false; + spin_unlock_irq(&uci_dev->ul_chan.lock); + spin_unlock_irq(&uci_dev->dl_chan.lock); + wake_up(&uci_dev->dl_chan.wq); + wake_up(&uci_dev->ul_chan.wq); + + /* delete the node to prevent new opens */ + device_destroy(mhi_uci_drv.class, uci_dev->devt); + uci_dev->dev = NULL; + list_del(&uci_dev->node); + +#ifdef QUEC_MHI_UCI_ALWAYS_OPEN + if (uci_dev->ref_count > 0) + uci_dev->ref_count--; +#endif + + /* safe to free memory only if all file nodes are closed */ + if (!uci_dev->ref_count) { + mutex_unlock(&uci_dev->mutex); + mutex_destroy(&uci_dev->mutex); + clear_bit(MINOR(uci_dev->devt), uci_minors); + kfree(uci_dev); + mutex_unlock(&mhi_uci_drv.lock); + return; + } + + MSG_LOG("Exit\n"); + mutex_unlock(&uci_dev->mutex); + mutex_unlock(&mhi_uci_drv.lock); + +} + +static int mhi_uci_probe(struct mhi_device *mhi_dev, + const struct mhi_device_id *id) +{ + struct uci_dev *uci_dev; + int minor; + char node_name[32]; + int dir; + + uci_dev = kzalloc(sizeof(*uci_dev), GFP_KERNEL); + if (!uci_dev) + return -ENOMEM; + + mutex_init(&uci_dev->mutex); + mutex_init(&uci_dev->r_mutex); + mutex_init(&uci_dev->w_mutex); + uci_dev->mhi_dev = mhi_dev; + + minor = find_first_zero_bit(uci_minors, MAX_UCI_DEVICES); + if (minor >= MAX_UCI_DEVICES) { + kfree(uci_dev); + return -ENOSPC; + } + + mutex_lock(&uci_dev->mutex); + mutex_lock(&mhi_uci_drv.lock); + + uci_dev->devt = MKDEV(mhi_uci_drv.major, minor); +#if 1 + if (mhi_dev->mhi_cntrl->cntrl_idx) + uci_dev->dev = device_create(mhi_uci_drv.class, &mhi_dev->dev, + uci_dev->devt, uci_dev, + DEVICE_NAME "_%s%d", + mhi_dev->chan_name, mhi_dev->mhi_cntrl->cntrl_idx); + else + uci_dev->dev = device_create(mhi_uci_drv.class, &mhi_dev->dev, + uci_dev->devt, uci_dev, + DEVICE_NAME "_%s", + mhi_dev->chan_name); +#else + uci_dev->dev = device_create(mhi_uci_drv.class, &mhi_dev->dev, + uci_dev->devt, uci_dev, + DEVICE_NAME "_%04x_%02u.%02u.%02u%s%d", + mhi_dev->dev_id, mhi_dev->domain, + mhi_dev->bus, mhi_dev->slot, "_pipe_", + mhi_dev->ul_chan_id); +#endif + + set_bit(minor, uci_minors); + + /* create debugging buffer */ + snprintf(node_name, sizeof(node_name), "mhi_uci_%04x_%02u.%02u.%02u_%d", + mhi_dev->dev_id, mhi_dev->domain, mhi_dev->bus, mhi_dev->slot, + mhi_dev->ul_chan_id); + + for (dir = 0; dir < 2; dir++) { + struct uci_chan *uci_chan = (dir) ? + &uci_dev->ul_chan : &uci_dev->dl_chan; + spin_lock_init(&uci_chan->lock); + init_waitqueue_head(&uci_chan->wq); + INIT_LIST_HEAD(&uci_chan->pending); + } + + uci_dev->termios = tty_std_termios; + + uci_dev->mtu = min_t(size_t, id->driver_data, mhi_dev->mtu); + mhi_device_set_devdata(mhi_dev, uci_dev); + uci_dev->enabled = true; + + list_add(&uci_dev->node, &mhi_uci_drv.head); + mutex_unlock(&mhi_uci_drv.lock); + mutex_unlock(&uci_dev->mutex); + + MSG_LOG("channel:%s successfully probed\n", mhi_dev->chan_name); + + return 0; +}; + +static void mhi_ul_xfer_cb(struct mhi_device *mhi_dev, + struct mhi_result *mhi_result) +{ + struct uci_dev *uci_dev = mhi_device_get_devdata(mhi_dev); + struct uci_chan *uci_chan = &uci_dev->ul_chan; + + MSG_VERB("status:%d xfer_len:%zu\n", mhi_result->transaction_status, + mhi_result->bytes_xferd); + + kfree(mhi_result->buf_addr); + if (!mhi_result->transaction_status) + wake_up(&uci_chan->wq); +} + +static void mhi_dl_xfer_cb(struct mhi_device *mhi_dev, + struct mhi_result *mhi_result) +{ + struct uci_dev *uci_dev = mhi_device_get_devdata(mhi_dev); + struct uci_chan *uci_chan = &uci_dev->dl_chan; + unsigned long flags; + struct uci_buf *buf; + unsigned nr_trb = uci_dev->nr_trb; + + buf = &uci_dev->uci_buf[nr_trb]; + if (buf->nr_trb != nr_trb || buf->data != mhi_result->buf_addr) + { + uci_dev->rx_error++; + MSG_ERR("chan[%d]: uci_buf[%u] = %p , mhi_result[%u] = %p\n", + mhi_dev->dl_chan_id, buf->nr_trb, buf->data, nr_trb, mhi_result->buf_addr); + return; + } + + uci_dev->nr_trb++; + if (uci_dev->nr_trb == uci_dev->nr_trbs) + uci_dev->nr_trb = 0; + + if (mhi_result->transaction_status == -ENOTCONN) { + return; + } + + if (mhi_result->bytes_xferd > uci_dev->mtu || mhi_result->bytes_xferd <= 0) + { + MSG_ERR("chan[%d]: bytes_xferd = %zd , mtu = %zd\n", + mhi_dev->dl_chan_id, mhi_result->bytes_xferd, uci_dev->mtu); + return; + } + if (mhi_result->bytes_xferd > uci_dev->bytes_xferd) + { + uci_dev->bytes_xferd = mhi_result->bytes_xferd; + //MSG_ERR("chan[%d]: bytes_xferd = %zd , mtu = %zd\n", + // mhi_dev->dl_chan_id, mhi_result->bytes_xferd, uci_dev->mtu); + } + + MSG_VERB("status:%d receive_len:%zu\n", mhi_result->transaction_status, + mhi_result->bytes_xferd); + + spin_lock_irqsave(&uci_chan->lock, flags); +#if 0 + buf = mhi_result->buf_addr + uci_dev->mtu; + buf->data = mhi_result->buf_addr; +#endif + buf->len = mhi_result->bytes_xferd; + if (mhi_dev->dl_chan_id == MHI_CLIENT_DUN_IN + || mhi_dev->dl_chan_id == MHI_CLIENT_QMI_IN + || mhi_dev->dl_chan_id == MHI_CLIENT_MBIM_IN) + { + struct uci_buf *tmp_buf = NULL; + int skip_buf = 0; + +#ifdef QUEC_MHI_UCI_ALWAYS_OPEN + if (uci_dev->ref_count == 1) + skip_buf++; +#endif + if (!skip_buf) + tmp_buf = (struct uci_buf *)kmalloc(buf->len + sizeof(struct uci_buf), GFP_ATOMIC);; + + if (tmp_buf) { + tmp_buf->page = NULL; + tmp_buf->data = ((void *)tmp_buf) + sizeof(struct uci_buf); + tmp_buf->len = buf->len; + memcpy(tmp_buf->data, buf->data, buf->len); + } + + if (buf) { + struct uci_buf *uci_buf = buf; + unsigned nr_trb = uci_buf->nr_trb ? (uci_buf->nr_trb - 1) : (uci_dev->nr_trbs - 1); + + uci_buf = &uci_dev->uci_buf[nr_trb]; + mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, uci_buf->data, uci_dev->mtu, MHI_EOT); + } + + buf = tmp_buf; + } + + if (buf) + list_add_tail(&buf->node, &uci_chan->pending); + spin_unlock_irqrestore(&uci_chan->lock, flags); + +#ifdef CONFIG_PM_SLEEP + if (mhi_dev->dev.power.wakeup) + __pm_wakeup_event(mhi_dev->dev.power.wakeup, 0); +#endif + + wake_up(&uci_chan->wq); +} + +#define DIAG_MAX_PCIE_PKT_SZ 2048 //define by module + +/* .driver_data stores max mtu */ +static const struct mhi_device_id mhi_uci_match_table[] = { + { .chan = "LOOPBACK", .driver_data = 0x1000 }, + { .chan = "SAHARA", .driver_data = 0x4000 }, + { .chan = "EDL", .driver_data = 0x4000 }, + { .chan = "DIAG", .driver_data = DIAG_MAX_PCIE_PKT_SZ }, + { .chan = "MBIM", .driver_data = 0x1000 }, + { .chan = "QMI0", .driver_data = 0x1000 }, + { .chan = "QMI1", .driver_data = 0x1000 }, + { .chan = "DUN", .driver_data = 0x1000 }, + {}, +}; + +static struct mhi_driver mhi_uci_driver = { + .id_table = mhi_uci_match_table, + .remove = mhi_uci_remove, + .probe = mhi_uci_probe, + .ul_xfer_cb = mhi_ul_xfer_cb, + .dl_xfer_cb = mhi_dl_xfer_cb, + .driver = { + .name = MHI_UCI_DRIVER_NAME, + .owner = THIS_MODULE, + }, +}; + +int mhi_device_uci_init(void) +{ + int ret; + + ret = register_chrdev(0, MHI_UCI_DRIVER_NAME, &mhidev_fops); + if (ret < 0) + return ret; + + mhi_uci_drv.major = ret; + mhi_uci_drv.class = class_create(THIS_MODULE, MHI_UCI_DRIVER_NAME); + if (IS_ERR(mhi_uci_drv.class)) { + unregister_chrdev(mhi_uci_drv.major, MHI_UCI_DRIVER_NAME); + return -ENODEV; + } + + mutex_init(&mhi_uci_drv.lock); + INIT_LIST_HEAD(&mhi_uci_drv.head); + + ret = mhi_driver_register(&mhi_uci_driver); + if (ret) { + class_destroy(mhi_uci_drv.class); + unregister_chrdev(mhi_uci_drv.major, MHI_UCI_DRIVER_NAME); + } + + return ret; +} + +void mhi_device_uci_exit(void) +{ + mhi_driver_unregister(&mhi_uci_driver); + class_destroy(mhi_uci_drv.class); + unregister_chrdev(mhi_uci_drv.major, MHI_UCI_DRIVER_NAME); +} diff --git a/package/wwan/driver/quectel_MHI/src/devices/rmnet/Kconfig b/package/wwan/driver/quectel_MHI/src/devices/rmnet/Kconfig new file mode 100644 index 000000000..9bb06d284 --- /dev/null +++ b/package/wwan/driver/quectel_MHI/src/devices/rmnet/Kconfig @@ -0,0 +1,13 @@ +# +# RMNET MAP driver +# + +menuconfig RMNET + tristate "RmNet MAP driver" + default n + select GRO_CELLS + ---help--- + If you select this, you will enable the RMNET module which is used + for handling data in the multiplexing and aggregation protocol (MAP) + format in the embedded data path. RMNET devices can be attached to + any IP mode physical device. diff --git a/package/wwan/driver/quectel_MHI/src/devices/rmnet/Makefile b/package/wwan/driver/quectel_MHI/src/devices/rmnet/Makefile new file mode 100644 index 000000000..b175fbb7f --- /dev/null +++ b/package/wwan/driver/quectel_MHI/src/devices/rmnet/Makefile @@ -0,0 +1,11 @@ +# +# Makefile for the RMNET module +# + +rmnet-y := rmnet_config.o +rmnet-y += rmnet_vnd.o +rmnet-y += rmnet_handlers.o +rmnet-y += rmnet_map_data.o +rmnet-y += rmnet_map_command.o +rmnet-y += rmnet_descriptor.o +obj-$(CONFIG_RMNET) += rmnet.o diff --git a/package/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_config.c b/package/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_config.c new file mode 100644 index 000000000..c5ec0c892 --- /dev/null +++ b/package/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_config.c @@ -0,0 +1,141 @@ +/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * RMNET configuration engine + * + */ + +#include +#include +#include +#include +#include +#include "rmnet_config.h" +#include "rmnet_handlers.h" +#include "rmnet_vnd.h" +#include "rmnet_private.h" +#include "rmnet_map.h" +#include "rmnet_descriptor.h" + +/* Locking scheme - + * The shared resource which needs to be protected is realdev->rx_handler_data. + * For the writer path, this is using rtnl_lock(). The writer paths are + * rmnet_newlink(), rmnet_dellink() and rmnet_force_unassociate_device(). These + * paths are already called with rtnl_lock() acquired in. There is also an + * ASSERT_RTNL() to ensure that we are calling with rtnl acquired. For + * dereference here, we will need to use rtnl_dereference(). Dev list writing + * needs to happen with rtnl_lock() acquired for netdev_master_upper_dev_link(). + * For the reader path, the real_dev->rx_handler_data is called in the TX / RX + * path. We only need rcu_read_lock() for these scenarios. In these cases, + * the rcu_read_lock() is held in __dev_queue_xmit() and + * netif_receive_skb_internal(), so readers need to use rcu_dereference_rtnl() + * to get the relevant information. For dev list reading, we again acquire + * rcu_read_lock() in rmnet_dellink() for netdev_master_upper_dev_get_rcu(). + * We also use unregister_netdevice_many() to free all rmnet devices in + * rmnet_force_unassociate_device() so we dont lose the rtnl_lock() and free in + * same context. + */ + +/* Local Definitions and Declarations */ + +static int rmnet_is_real_dev_registered(const struct net_device *real_dev) +{ + return rcu_access_pointer(real_dev->rx_handler) == rmnet_rx_handler; +} + +/* Needs rtnl lock */ +static struct rmnet_port* +rmnet_get_port_rtnl(const struct net_device *real_dev) +{ + return rtnl_dereference(real_dev->rx_handler_data); +} + +static int rmnet_unregister_real_device(struct net_device *real_dev, + struct rmnet_port *port) +{ + if (port->nr_rmnet_devs) + return -EINVAL; + + rmnet_map_cmd_exit(port); + rmnet_map_tx_aggregate_exit(port); + + rmnet_descriptor_deinit(port); + + kfree(port); + + netdev_rx_handler_unregister(real_dev); + + /* release reference on real_dev */ + dev_put(real_dev); + + netdev_dbg(real_dev, "Removed from rmnet\n"); + return 0; +} + +static int rmnet_register_real_device(struct net_device *real_dev) +{ + struct rmnet_port *port; + int rc, entry; + + ASSERT_RTNL(); + + if (rmnet_is_real_dev_registered(real_dev)) + return 0; + + port = kzalloc(sizeof(*port), GFP_ATOMIC); + if (!port) + return -ENOMEM; + + port->dev = real_dev; + rc = netdev_rx_handler_register(real_dev, rmnet_rx_handler, port); + if (rc) { + kfree(port); + return -EBUSY; + } + /* hold on to real dev for MAP data */ + dev_hold(real_dev); + + for (entry = 0; entry < RMNET_MAX_LOGICAL_EP; entry++) + INIT_HLIST_HEAD(&port->muxed_ep[entry]); + + rc = rmnet_descriptor_init(port); + if (rc) { + rmnet_descriptor_deinit(port); + return rc; + } + + rmnet_map_tx_aggregate_init(port); + rmnet_map_cmd_init(port); + + netdev_dbg(real_dev, "registered with rmnet\n"); + return 0; +} + +/* Needs either rcu_read_lock() or rtnl lock */ +static struct rmnet_port *rmnet_get_port(struct net_device *real_dev) +{ + if (rmnet_is_real_dev_registered(real_dev)) + return rcu_dereference_rtnl(real_dev->rx_handler_data); + else + return NULL; +} + +static struct rmnet_endpoint *rmnet_get_endpoint(struct rmnet_port *port, u8 mux_id) +{ + struct rmnet_endpoint *ep; + + hlist_for_each_entry_rcu(ep, &port->muxed_ep[mux_id], hlnode) { + if (ep->mux_id == mux_id) + return ep; + } + + return NULL; +} diff --git a/package/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_config.h b/package/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_config.h new file mode 100644 index 000000000..c74fcdf21 --- /dev/null +++ b/package/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_config.h @@ -0,0 +1,174 @@ +/* Copyright (c) 2013-2017, 2019 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * RMNET Data configuration engine + * + */ + +#include +#include + +#ifndef _RMNET_CONFIG_H_ +#define _RMNET_CONFIG_H_ + +#define RMNET_MAX_LOGICAL_EP 255 +#define RMNET_MAX_VEID 4 + +struct rmnet_endpoint { + u8 mux_id; + struct net_device *egress_dev; + struct hlist_node hlnode; +}; + +struct rmnet_port_priv_stats { + u64 dl_hdr_last_qmap_vers; + u64 dl_hdr_last_ep_id; + u64 dl_hdr_last_trans_id; + u64 dl_hdr_last_seq; + u64 dl_hdr_last_bytes; + u64 dl_hdr_last_pkts; + u64 dl_hdr_last_flows; + u64 dl_hdr_count; + u64 dl_hdr_total_bytes; + u64 dl_hdr_total_pkts; + u64 dl_trl_last_seq; + u64 dl_trl_count; +}; + +struct rmnet_egress_agg_params { + u16 agg_size; + u16 agg_count; + u32 agg_time; +}; + +/* One instance of this structure is instantiated for each real_dev associated + * with rmnet. + */ +struct rmnet_port { + struct net_device *dev; + u32 data_format; + u8 nr_rmnet_devs; + u8 rmnet_mode; + struct hlist_head muxed_ep[RMNET_MAX_LOGICAL_EP]; + struct net_device *bridge_ep; + void *rmnet_perf; + + struct rmnet_egress_agg_params egress_agg_params; + + /* Protect aggregation related elements */ + spinlock_t agg_lock; + + struct sk_buff *agg_skb; + int agg_state; + u8 agg_count; + struct timespec agg_time; + struct timespec agg_last; + struct hrtimer hrtimer; + struct work_struct agg_wq; + + /* dl marker elements */ + struct list_head dl_list; + struct rmnet_port_priv_stats stats; + int dl_marker_flush; + + /* Descriptor pool */ + spinlock_t desc_pool_lock; + struct rmnet_frag_descriptor_pool *frag_desc_pool; + struct sk_buff *chain_head; + struct sk_buff *chain_tail; +}; + +extern struct rtnl_link_ops rmnet_link_ops; + +struct rmnet_vnd_stats { + u64 rx_pkts; + u64 rx_bytes; + u64 tx_pkts; + u64 tx_bytes; + u32 tx_drops; +}; + +struct rmnet_pcpu_stats { + struct rmnet_vnd_stats stats; + struct u64_stats_sync syncp; +}; + +struct rmnet_coal_close_stats { + u64 non_coal; + u64 ip_miss; + u64 trans_miss; + u64 hw_nl; + u64 hw_pkt; + u64 hw_byte; + u64 hw_time; + u64 hw_evict; + u64 coal; +}; + +struct rmnet_coal_stats { + u64 coal_rx; + u64 coal_pkts; + u64 coal_hdr_nlo_err; + u64 coal_hdr_pkt_err; + u64 coal_csum_err; + u64 coal_reconstruct; + u64 coal_ip_invalid; + u64 coal_trans_invalid; + struct rmnet_coal_close_stats close; + u64 coal_veid[RMNET_MAX_VEID]; +}; + +struct rmnet_priv_stats { + u64 csum_ok; + u64 csum_valid_unset; + u64 csum_validation_failed; + u64 csum_err_bad_buffer; + u64 csum_err_invalid_ip_version; + u64 csum_err_invalid_transport; + u64 csum_fragmented_pkt; + u64 csum_skipped; + u64 csum_sw; + u64 csum_hw; + struct rmnet_coal_stats coal; +}; + +struct rmnet_priv { + u8 mux_id; + struct net_device *real_dev; + struct rmnet_pcpu_stats __percpu *pcpu_stats; + struct gro_cells gro_cells; + struct rmnet_priv_stats stats; +}; + +enum rmnet_dl_marker_prio { + RMNET_PERF, + RMNET_SHS, +}; + +enum rmnet_trace_func { + RMNET_MODULE, + NW_STACK_MODULE, +}; + +enum rmnet_trace_evt { + RMNET_DLVR_SKB, + RMNET_RCV_FROM_PND, + RMNET_TX_UL_PKT, + NW_STACK_DEV_Q_XMIT, + NW_STACK_NAPI_GRO_FLUSH, + NW_STACK_RX, + NW_STACK_TX, +}; + +static int rmnet_is_real_dev_registered(const struct net_device *real_dev); +static struct rmnet_port *rmnet_get_port(struct net_device *real_dev); +static struct rmnet_endpoint *rmnet_get_endpoint(struct rmnet_port *port, u8 mux_id); +#endif /* _RMNET_CONFIG_H_ */ diff --git a/package/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_data.c b/package/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_data.c new file mode 100644 index 000000000..ad8953c30 --- /dev/null +++ b/package/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_data.c @@ -0,0 +1,1150 @@ +#if 0 + +#define RMNET_MAX_PACKET_SIZE 16384 +#define RMNET_DFLT_PACKET_SIZE 1500 +#define RMNET_NEEDED_HEADROOM 16 +#define RMNET_TX_QUEUE_LEN 1000 + +#define RMNET_MAX_LOGICAL_EP 255 +#define RMNET_MAP_DESC_HEADROOM 128 +#define RMNET_FRAG_DESCRIPTOR_POOL_SIZE 64 + +/* Pass the frame up the stack with no modifications to skb->dev */ +#define RMNET_EPMODE_NONE (0) +/* Replace skb->dev to a virtual rmnet device and pass up the stack */ +#define RMNET_EPMODE_VND (1) +/* Pass the frame directly to another device with dev_queue_xmit() */ +#define RMNET_EPMODE_BRIDGE (2) + +/* rmnet section */ + +#define RMNET_FLAGS_INGRESS_DEAGGREGATION (1U << 0) +#define RMNET_FLAGS_INGRESS_MAP_COMMANDS (1U << 1) +#define RMNET_FLAGS_INGRESS_MAP_CKSUMV4 (1U << 2) +#define RMNET_FLAGS_EGRESS_MAP_CKSUMV4 (1U << 3) +#define RMNET_FLAGS_INGRESS_COALESCE (1U << 4) +#define RMNET_FLAGS_INGRESS_MAP_CKSUMV5 (1U << 5) +#define RMNET_FLAGS_EGRESS_MAP_CKSUMV5 (1U << 6) + +enum rmnet_map_v5_header_type { + RMNET_MAP_HEADER_TYPE_UNKNOWN, + RMNET_MAP_HEADER_TYPE_COALESCING = 0x1, + RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD = 0x2, + RMNET_MAP_HEADER_TYPE_ENUM_LENGTH +}; + +/* Main QMAP header */ +struct rmnet_map_header { + u8 pad_len:6; + u8 next_hdr:1; + u8 cd_bit:1; + u8 mux_id; + __be16 pkt_len; +} __aligned(1); + +/* QMAP v5 headers */ +struct rmnet_map_v5_csum_header { + u8 next_hdr:1; + u8 header_type:7; + u8 hw_reserved:7; + u8 csum_valid_required:1; + __be16 reserved; +} __aligned(1); + +struct rmnet_map_v5_nl_pair { + __be16 pkt_len; + u8 csum_error_bitmap; + u8 num_packets; +} __aligned(1); + +/* NLO: Number-length object */ +#define RMNET_MAP_V5_MAX_NLOS (6) +#define RMNET_MAP_V5_MAX_PACKETS (48) + +struct rmnet_map_v5_coal_header { + u8 next_hdr:1; + u8 header_type:7; + u8 reserved1:4; + u8 num_nlos:3; + u8 csum_valid:1; + u8 close_type:4; + u8 close_value:4; + u8 reserved2:4; + u8 virtual_channel_id:4; + + struct rmnet_map_v5_nl_pair nl_pairs[RMNET_MAP_V5_MAX_NLOS]; +} __aligned(1); + +/* QMAP v4 headers */ +struct rmnet_map_dl_csum_trailer { + u8 reserved1; + u8 valid:1; + u8 reserved2:7; + u16 csum_start_offset; + u16 csum_length; + __be16 csum_value; +} __aligned(1); + +struct rmnet_frag_descriptor_pool { + struct list_head free_list; + u32 pool_size; +}; + +struct rmnet_frag_descriptor { + struct list_head list; + struct list_head sub_frags; + skb_frag_t frag; + u8 *hdr_ptr; + struct net_device *dev; + u32 hash; + __be32 tcp_seq; + __be16 ip_id; + u16 data_offset; + u16 gso_size; + u16 gso_segs; + u16 ip_len; + u16 trans_len; + u8 ip_proto; + u8 trans_proto; + u8 pkt_id; + u8 csum_valid:1, + hdrs_valid:1, + ip_id_set:1, + tcp_seq_set:1, + flush_shs:1, + reserved:3; +}; + +struct rmnet_endpoint { + u8 rmnet_mode; + u8 mux_id; + struct net_device *rmnet_dev; +}; + +/* One instance of this structure is instantiated for each real_dev associated + * with rmnet. + */ +struct rmnet_port { + struct net_device *dev; + u8 rmnet_mode; + u32 data_format; + u32 nr_rmnet_devs; + struct rmnet_endpoint muxed_ep[16]; + + /* Descriptor pool */ + spinlock_t desc_pool_lock; + struct rmnet_frag_descriptor_pool *frag_desc_pool; + struct sk_buff *chain_head; + struct sk_buff *chain_tail; +}; + +static struct sk_buff * add_qhdr_v5(struct sk_buff *skb, u8 mux_id) +{ + struct rmnet_map_header *map_header; + struct rmnet_map_v5_csum_header *ul_header; + u32 padding, map_datalen; + + map_datalen = skb->len; + padding = map_datalen%4; + if (padding) { + padding = 4 - padding; + if (skb_tailroom(skb) < padding) { + printk("skb_tailroom small!\n"); + padding = 0; + } + if (padding) + __skb_put(skb, padding); + } + + map_header = (struct rmnet_map_header *)skb_push(skb, (sizeof(struct rmnet_map_header) + sizeof(struct rmnet_map_v5_csum_header))); + + BUILD_BUG_ON((sizeof(struct rmnet_map_header) + sizeof(struct rmnet_map_v5_csum_header)) != 8); + + map_header->cd_bit = 0; + map_header->next_hdr = 1; + map_header->pad_len = padding; + map_header->mux_id = mux_id; + map_header->pkt_len = htons(map_datalen + padding); + + ul_header = (struct rmnet_map_v5_csum_header *)(map_header + 1); + memset(ul_header, 0, sizeof(*ul_header)); + ul_header->header_type = RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD; + + return skb; +} + +struct rmnet_endpoint *rmnet_get_endpoint(struct rmnet_port *port, u8 mux_id) +{ + return &port->muxed_ep[0]; +} + +static void +rmnet_deliver_skb(struct sk_buff *skb, struct rmnet_port *port) +{ + struct rmnet_nss_cb *nss_cb; + + //rmnet_vnd_rx_fixup(skb->dev, skb->len); + + /* Pass off the packet to NSS driver if we can */ + nss_cb = rcu_dereference(rmnet_nss_callbacks); + if (nss_cb) { + if (!port->chain_head) + port->chain_head = skb; + else + skb_shinfo(port->chain_tail)->frag_list = skb; + + port->chain_tail = skb; + return; + } + + skb_reset_transport_header(skb); + skb_reset_network_header(skb); + + skb->pkt_type = PACKET_HOST; + skb_set_mac_header(skb, 0); + + //if (port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER) { + //} else { + //if (!rmnet_check_skb_can_gro(skb)) + // gro_cells_receive(&priv->gro_cells, skb); + //else + netif_receive_skb(skb); + //} +} + +static inline unsigned char *rmnet_map_data_ptr(struct sk_buff *skb) +{ + /* Nonlinear packets we receive are entirely within frag 0 */ + if (skb_is_nonlinear(skb) && skb->len == skb->data_len) + return skb_frag_address(skb_shinfo(skb)->frags); + + return skb->data; +} + +static inline void *rmnet_frag_data_ptr(struct rmnet_frag_descriptor *frag_desc) +{ + return skb_frag_address(&frag_desc->frag); +} + +static struct rmnet_frag_descriptor * +rmnet_get_frag_descriptor(struct rmnet_port *port) +{ + struct rmnet_frag_descriptor_pool *pool = port->frag_desc_pool; + struct rmnet_frag_descriptor *frag_desc; + + spin_lock(&port->desc_pool_lock); + if (!list_empty(&pool->free_list)) { + frag_desc = list_first_entry(&pool->free_list, + struct rmnet_frag_descriptor, + list); + list_del_init(&frag_desc->list); + } else { + frag_desc = kzalloc(sizeof(*frag_desc), GFP_ATOMIC); + if (!frag_desc) + goto out; + + INIT_LIST_HEAD(&frag_desc->list); + INIT_LIST_HEAD(&frag_desc->sub_frags); + pool->pool_size++; + } + +out: + spin_unlock(&port->desc_pool_lock); + return frag_desc; +} + +static void rmnet_recycle_frag_descriptor(struct rmnet_frag_descriptor *frag_desc, + struct rmnet_port *port) +{ + struct rmnet_frag_descriptor_pool *pool = port->frag_desc_pool; + struct page *page = skb_frag_page(&frag_desc->frag); + + list_del(&frag_desc->list); + if (page) + put_page(page); + + memset(frag_desc, 0, sizeof(*frag_desc)); + INIT_LIST_HEAD(&frag_desc->list); + INIT_LIST_HEAD(&frag_desc->sub_frags); + spin_lock(&port->desc_pool_lock); + list_add_tail(&frag_desc->list, &pool->free_list); + spin_unlock(&port->desc_pool_lock); +} + +static inline void rmnet_frag_fill(struct rmnet_frag_descriptor *frag_desc, + struct page *p, u32 page_offset, u32 len) +{ + get_page(p); + __skb_frag_set_page(&frag_desc->frag, p); + skb_frag_size_set(&frag_desc->frag, len); + frag_desc->frag.page_offset = page_offset; +} + +static inline void *rmnet_frag_pull(struct rmnet_frag_descriptor *frag_desc, + struct rmnet_port *port, + unsigned int size) +{ + if (size >= skb_frag_size(&frag_desc->frag)) { + pr_info("%s(): Pulling %u bytes from %u byte pkt. Dropping\n", + __func__, size, skb_frag_size(&frag_desc->frag)); + rmnet_recycle_frag_descriptor(frag_desc, port); + return NULL; + } + + frag_desc->frag.page_offset += size; + skb_frag_size_sub(&frag_desc->frag, size); + + return rmnet_frag_data_ptr(frag_desc); +} + +static inline void *rmnet_frag_trim(struct rmnet_frag_descriptor *frag_desc, + struct rmnet_port *port, + unsigned int size) +{ + if (!size) { + pr_info("%s(): Trimming %u byte pkt to 0. Dropping\n", + __func__, skb_frag_size(&frag_desc->frag)); + rmnet_recycle_frag_descriptor(frag_desc, port); + return NULL; + } + + if (size < skb_frag_size(&frag_desc->frag)) + skb_frag_size_set(&frag_desc->frag, size); + + return rmnet_frag_data_ptr(frag_desc); +} + +static inline u8 +rmnet_frag_get_next_hdr_type(struct rmnet_frag_descriptor *frag_desc) +{ + unsigned char *data = rmnet_frag_data_ptr(frag_desc); + + data += sizeof(struct rmnet_map_header); + return ((struct rmnet_map_v5_coal_header *)data)->header_type; +} + +static inline bool +rmnet_frag_get_csum_valid(struct rmnet_frag_descriptor *frag_desc) +{ + unsigned char *data = rmnet_frag_data_ptr(frag_desc); + + data += sizeof(struct rmnet_map_header); + return ((struct rmnet_map_v5_csum_header *)data)->csum_valid_required; +} + +static void rmnet_descriptor_add_frag(struct rmnet_port *port, struct list_head *list, + struct page *p, u32 page_offset, u32 len) +{ + struct rmnet_frag_descriptor *frag_desc; + + frag_desc = rmnet_get_frag_descriptor(port); + if (!frag_desc) + return; + + rmnet_frag_fill(frag_desc, p, page_offset, len); + list_add_tail(&frag_desc->list, list); +} + +static void rmnet_frag_deaggregate(skb_frag_t *frag, struct rmnet_port *port, + struct list_head *list) +{ + struct rmnet_map_header *maph; + u8 *data = skb_frag_address(frag); + u32 offset = 0; + u32 packet_len; + + while (offset < skb_frag_size(frag)) { + maph = (struct rmnet_map_header *)data; + packet_len = ntohs(maph->pkt_len); + + /* Some hardware can send us empty frames. Catch them */ + if (packet_len == 0) + return; + + packet_len += sizeof(*maph); + + if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) { + packet_len += sizeof(struct rmnet_map_dl_csum_trailer); + WARN_ON(1); + } else if (port->data_format & + (RMNET_FLAGS_INGRESS_MAP_CKSUMV5 | + RMNET_FLAGS_INGRESS_COALESCE) && !maph->cd_bit) { + u32 hsize = 0; + u8 type; + + type = ((struct rmnet_map_v5_coal_header *) + (data + sizeof(*maph)))->header_type; + switch (type) { + case RMNET_MAP_HEADER_TYPE_COALESCING: + hsize = sizeof(struct rmnet_map_v5_coal_header); + break; + case RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD: + hsize = sizeof(struct rmnet_map_v5_csum_header); + break; + } + + packet_len += hsize; + } + else { + qmap_hex_dump(__func__, data, 64); + WARN_ON(1); + } + + if ((int)skb_frag_size(frag) - (int)packet_len < 0) + return; + + rmnet_descriptor_add_frag(port, list, skb_frag_page(frag), + frag->page_offset + offset, + packet_len); + + offset += packet_len; + data += packet_len; + } +} + + +#define RMNET_IP_VERSION_4 0x40 +#define RMNET_IP_VERSION_6 0x60 + +/* Helper Functions */ + +static void rmnet_set_skb_proto(struct sk_buff *skb) +{ + switch (rmnet_map_data_ptr(skb)[0] & 0xF0) { + case RMNET_IP_VERSION_4: + skb->protocol = htons(ETH_P_IP); + break; + case RMNET_IP_VERSION_6: + skb->protocol = htons(ETH_P_IPV6); + break; + default: + skb->protocol = htons(ETH_P_MAP); + WARN_ON(1); + break; + } +} + +/* Allocate and populate an skb to contain the packet represented by the + * frag descriptor. + */ +static struct sk_buff *rmnet_alloc_skb(struct rmnet_frag_descriptor *frag_desc, + struct rmnet_port *port) +{ + struct sk_buff *head_skb, *current_skb, *skb; + struct skb_shared_info *shinfo; + struct rmnet_frag_descriptor *sub_frag, *tmp; + + /* Use the exact sizes if we know them (i.e. RSB/RSC, rmnet_perf) */ + if (frag_desc->hdrs_valid) { + u16 hdr_len = frag_desc->ip_len + frag_desc->trans_len; + + head_skb = alloc_skb(hdr_len + RMNET_MAP_DESC_HEADROOM, + GFP_ATOMIC); + if (!head_skb) + return NULL; + + skb_reserve(head_skb, RMNET_MAP_DESC_HEADROOM); + skb_put_data(head_skb, frag_desc->hdr_ptr, hdr_len); + skb_reset_network_header(head_skb); + + if (frag_desc->trans_len) + skb_set_transport_header(head_skb, frag_desc->ip_len); + + /* Packets that have no data portion don't need any frags */ + if (hdr_len == skb_frag_size(&frag_desc->frag)) + goto skip_frags; + + /* If the headers we added are the start of the page, + * we don't want to add them twice + */ + if (frag_desc->hdr_ptr == rmnet_frag_data_ptr(frag_desc)) { + if (!rmnet_frag_pull(frag_desc, port, hdr_len)) { + kfree_skb(head_skb); + return NULL; + } + } + } else { + /* Allocate enough space to avoid penalties in the stack + * from __pskb_pull_tail() + */ + head_skb = alloc_skb(256 + RMNET_MAP_DESC_HEADROOM, + GFP_ATOMIC); + if (!head_skb) + return NULL; + + skb_reserve(head_skb, RMNET_MAP_DESC_HEADROOM); + } + + /* Add main fragment */ + get_page(skb_frag_page(&frag_desc->frag)); + skb_add_rx_frag(head_skb, 0, skb_frag_page(&frag_desc->frag), + frag_desc->frag.page_offset, + skb_frag_size(&frag_desc->frag), + skb_frag_size(&frag_desc->frag)); + + shinfo = skb_shinfo(head_skb); + current_skb = head_skb; + + /* Add in any frags from rmnet_perf */ + list_for_each_entry_safe(sub_frag, tmp, &frag_desc->sub_frags, list) { + skb_frag_t *frag; + u32 frag_size; + + frag = &sub_frag->frag; + frag_size = skb_frag_size(frag); + +add_frag: + if (shinfo->nr_frags < MAX_SKB_FRAGS) { + get_page(skb_frag_page(frag)); + skb_add_rx_frag(current_skb, shinfo->nr_frags, + skb_frag_page(frag), frag->page_offset, + frag_size, frag_size); + if (current_skb != head_skb) { + head_skb->len += frag_size; + head_skb->data_len += frag_size; + } + } else { + /* Alloc a new skb and try again */ + skb = alloc_skb(0, GFP_ATOMIC); + if (!skb) + break; + + if (current_skb == head_skb) + shinfo->frag_list = skb; + else + current_skb->next = skb; + + current_skb = skb; + shinfo = skb_shinfo(current_skb); + goto add_frag; + } + + rmnet_recycle_frag_descriptor(sub_frag, port); + } + +skip_frags: + head_skb->dev = frag_desc->dev; + rmnet_set_skb_proto(head_skb); + + /* Handle any header metadata that needs to be updated after RSB/RSC + * segmentation + */ + if (frag_desc->ip_id_set) { + struct iphdr *iph; + + iph = (struct iphdr *)rmnet_map_data_ptr(head_skb); + csum_replace2(&iph->check, iph->id, frag_desc->ip_id); + iph->id = frag_desc->ip_id; + } + + if (frag_desc->tcp_seq_set) { + struct tcphdr *th; + + th = (struct tcphdr *) + (rmnet_map_data_ptr(head_skb) + frag_desc->ip_len); + th->seq = frag_desc->tcp_seq; + } + + /* Handle csum offloading */ + if (frag_desc->csum_valid && frag_desc->hdrs_valid) { + /* Set the partial checksum information */ + //rmnet_frag_partial_csum(head_skb, frag_desc); + WARN_ON(1); + } else if (frag_desc->csum_valid) { + /* Non-RSB/RSC/perf packet. The current checksum is fine */ + head_skb->ip_summed = CHECKSUM_UNNECESSARY; + } else if (frag_desc->hdrs_valid && + (frag_desc->trans_proto == IPPROTO_TCP || + frag_desc->trans_proto == IPPROTO_UDP)) { + /* Unfortunately, we have to fake a bad checksum here, since + * the original bad value is lost by the hardware. The only + * reliable way to do it is to calculate the actual checksum + * and corrupt it. + */ + __sum16 *check; + __wsum csum; + unsigned int offset = skb_transport_offset(head_skb); + __sum16 pseudo; + + WARN_ON(1); + /* Calculate pseudo header and update header fields */ + if (frag_desc->ip_proto == 4) { + struct iphdr *iph = ip_hdr(head_skb); + __be16 tot_len = htons(head_skb->len); + + csum_replace2(&iph->check, iph->tot_len, tot_len); + iph->tot_len = tot_len; + pseudo = ~csum_tcpudp_magic(iph->saddr, iph->daddr, + head_skb->len - + frag_desc->ip_len, + frag_desc->trans_proto, 0); + } else { + struct ipv6hdr *ip6h = ipv6_hdr(head_skb); + + ip6h->payload_len = htons(head_skb->len - + sizeof(*ip6h)); + pseudo = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, + head_skb->len - + frag_desc->ip_len, + frag_desc->trans_proto, 0); + } + + if (frag_desc->trans_proto == IPPROTO_TCP) { + check = &tcp_hdr(head_skb)->check; + } else { + udp_hdr(head_skb)->len = htons(head_skb->len - + frag_desc->ip_len); + check = &udp_hdr(head_skb)->check; + } + + *check = pseudo; + csum = skb_checksum(head_skb, offset, head_skb->len - offset, + 0); + /* Add 1 to corrupt. This cannot produce a final value of 0 + * since csum_fold() can't return a value of 0xFFFF + */ + *check = csum16_add(csum_fold(csum), htons(1)); + head_skb->ip_summed = CHECKSUM_NONE; + } + + /* Handle any rmnet_perf metadata */ + if (frag_desc->hash) { + head_skb->hash = frag_desc->hash; + head_skb->sw_hash = 1; + } + + if (frag_desc->flush_shs) + head_skb->cb[0] = 1; + + /* Handle coalesced packets */ + //if (frag_desc->gso_segs > 1) + // rmnet_frag_gso_stamp(head_skb, frag_desc); + + return head_skb; +} + +/* Deliver the packets contained within a frag descriptor */ +static void rmnet_frag_deliver(struct rmnet_frag_descriptor *frag_desc, + struct rmnet_port *port) +{ + struct sk_buff *skb; + + skb = rmnet_alloc_skb(frag_desc, port); + if (skb) + rmnet_deliver_skb(skb, port); + rmnet_recycle_frag_descriptor(frag_desc, port); +} + +/* Process a QMAPv5 packet header */ +static int rmnet_frag_process_next_hdr_packet(struct rmnet_frag_descriptor *frag_desc, + struct rmnet_port *port, + struct list_head *list, + u16 len) +{ + int rc = 0; + + switch (rmnet_frag_get_next_hdr_type(frag_desc)) { + case RMNET_MAP_HEADER_TYPE_COALESCING: + rc = -1; + WARN_ON(1); + break; + case RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD: + if (rmnet_frag_get_csum_valid(frag_desc)) { + frag_desc->csum_valid = true; + } else { + } + + if (!rmnet_frag_pull(frag_desc, port, + sizeof(struct rmnet_map_header) + + sizeof(struct rmnet_map_v5_csum_header))) { + rc = -EINVAL; + break; + } + + frag_desc->hdr_ptr = rmnet_frag_data_ptr(frag_desc); + + /* Remove padding only for csum offload packets. + * Coalesced packets should never have padding. + */ + if (!rmnet_frag_trim(frag_desc, port, len)) { + rc = -EINVAL; + break; + } + + list_del_init(&frag_desc->list); + list_add_tail(&frag_desc->list, list); + break; + default: + qmap_hex_dump(__func__, rmnet_frag_data_ptr(frag_desc), 64); + rc = -EINVAL; + break; + } + + return rc; +} + +static void +__rmnet_frag_ingress_handler(struct rmnet_frag_descriptor *frag_desc, + struct rmnet_port *port) +{ + struct rmnet_map_header *qmap; + struct rmnet_endpoint *ep; + struct rmnet_frag_descriptor *frag, *tmp; + LIST_HEAD(segs); + u16 len, pad; + u8 mux_id; + + qmap = (struct rmnet_map_header *)skb_frag_address(&frag_desc->frag); + mux_id = qmap->mux_id; + pad = qmap->pad_len; + len = ntohs(qmap->pkt_len) - pad; + + if (qmap->cd_bit) { + goto recycle; + } + + if (mux_id >= RMNET_MAX_LOGICAL_EP) + goto recycle; + + ep = rmnet_get_endpoint(port, mux_id); + if (!ep) + goto recycle; + + frag_desc->dev = ep->rmnet_dev; + + /* Handle QMAPv5 packet */ + if (qmap->next_hdr && + (port->data_format & (RMNET_FLAGS_INGRESS_COALESCE | + RMNET_FLAGS_INGRESS_MAP_CKSUMV5))) { + if (rmnet_frag_process_next_hdr_packet(frag_desc, port, &segs, + len)) + goto recycle; + } else { + /* We only have the main QMAP header to worry about */ + if (!rmnet_frag_pull(frag_desc, port, sizeof(*qmap))) + return; + + frag_desc->hdr_ptr = rmnet_frag_data_ptr(frag_desc); + + if (!rmnet_frag_trim(frag_desc, port, len)) + return; + + list_add_tail(&frag_desc->list, &segs); + } + + list_for_each_entry_safe(frag, tmp, &segs, list) { + list_del_init(&frag->list); + rmnet_frag_deliver(frag, port); + } + return; + +recycle: + rmnet_recycle_frag_descriptor(frag_desc, port); +} + +static void rmnet_frag_ingress_handler(struct sk_buff *skb, + struct rmnet_port *port) +{ + LIST_HEAD(desc_list); + int i = 0; + struct rmnet_nss_cb *nss_cb; + + /* Deaggregation and freeing of HW originating + * buffers is done within here + */ + while (skb) { + struct sk_buff *skb_frag; + + port->chain_head = NULL; + port->chain_tail = NULL; + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + rmnet_frag_deaggregate(&skb_shinfo(skb)->frags[i], port, + &desc_list); + if (!list_empty(&desc_list)) { + struct rmnet_frag_descriptor *frag_desc, *tmp; + + list_for_each_entry_safe(frag_desc, tmp, + &desc_list, list) { + list_del_init(&frag_desc->list); + __rmnet_frag_ingress_handler(frag_desc, + port); + } + } + } + + nss_cb = rcu_dereference(rmnet_nss_callbacks); + if (nss_cb && port->chain_head) { + port->chain_head->cb[0] = 0; + netif_receive_skb(port->chain_head); + } + + skb_frag = skb_shinfo(skb)->frag_list; + skb_shinfo(skb)->frag_list = NULL; + consume_skb(skb); + skb = skb_frag; + } +} + +static void +rmnet_map_ingress_handler(struct sk_buff *skb, + struct rmnet_port *port) +{ + if (port->data_format & (RMNET_FLAGS_INGRESS_COALESCE | + RMNET_FLAGS_INGRESS_MAP_CKSUMV5)) { + if (skb_is_nonlinear(skb)) { + rmnet_frag_ingress_handler(skb, port); + return; + } + } + + WARN_ON(1); +} + +static rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb); +static int rmnet_is_real_dev_registered(const struct net_device *real_dev) +{ + return rcu_access_pointer(real_dev->rx_handler) == rmnet_rx_handler; +} + + +/* Needs either rcu_read_lock() or rtnl lock */ +struct rmnet_port *rmnet_get_port(struct net_device *real_dev) +{ + if (rmnet_is_real_dev_registered(real_dev)) + return rcu_dereference_rtnl(real_dev->rx_handler_data); + else + return NULL; +} + +static rx_handler_result_t rmnet_rx_priv_handler(struct sk_buff **pskb) +{ + struct sk_buff *skb = *pskb; + struct rmnet_nss_cb *nss_cb; + + if (!skb) + return RX_HANDLER_CONSUMED; + if (nss_debug) printk("%s skb=%p, len=%d, protocol=%x, hdr_len=%d\n", __func__, skb, skb->len, skb->protocol, skb->hdr_len); + + if (skb->pkt_type == PACKET_LOOPBACK) + return RX_HANDLER_PASS; + + /* Check this so that we dont loop around netif_receive_skb */ + if (skb->cb[0] == 1) { + skb->cb[0] = 0; + + skb->dev->stats.rx_packets++; + return RX_HANDLER_PASS; + } + + while (skb) { + struct sk_buff *skb_frag = skb_shinfo(skb)->frag_list; + + skb_shinfo(skb)->frag_list = NULL; + + nss_cb = rcu_dereference(rmnet_nss_callbacks); + if (nss_cb) + nss_cb->nss_tx(skb); + + skb = skb_frag; + } + + return RX_HANDLER_CONSUMED; +} + +/* Ingress / Egress Entry Points */ + +/* Processes packet as per ingress data format for receiving device. Logical + * endpoint is determined from packet inspection. Packet is then sent to the + * egress device listed in the logical endpoint configuration. + */ +static rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb) +{ + struct sk_buff *skb = *pskb; + struct rmnet_port *port; + struct net_device *dev; + + if (!skb) + goto done; + + if (nss_debug) printk("%s skb=%p, len=%d, protocol=%x, hdr_len=%d\n", __func__, skb, skb->len, skb->protocol, skb->hdr_len); + + if (skb->pkt_type == PACKET_LOOPBACK) + return RX_HANDLER_PASS; + + if (skb->protocol != htons(ETH_P_MAP)) { + WARN_ON(1); + return RX_HANDLER_PASS; + } + + dev = skb->dev; + port = rmnet_get_port(dev); + + if (port == NULL) + return RX_HANDLER_PASS; + + port->chain_head = NULL; + port->chain_tail = NULL; + + switch (port->rmnet_mode) { + case RMNET_EPMODE_VND: + rmnet_map_ingress_handler(skb, port); + break; + case RMNET_EPMODE_BRIDGE: + //rmnet_bridge_handler(skb, port->bridge_ep); + break; + } + +done: + return RX_HANDLER_CONSUMED; +} + +static void rmnet_descriptor_deinit(struct rmnet_port *port) +{ + struct rmnet_frag_descriptor_pool *pool; + struct rmnet_frag_descriptor *frag_desc, *tmp; + + pool = port->frag_desc_pool; + + list_for_each_entry_safe(frag_desc, tmp, &pool->free_list, list) { + kfree(frag_desc); + pool->pool_size--; + } + + kfree(pool); +} + +static int rmnet_descriptor_init(struct rmnet_port *port) +{ + struct rmnet_frag_descriptor_pool *pool; + int i; + + spin_lock_init(&port->desc_pool_lock); + pool = kzalloc(sizeof(*pool), GFP_ATOMIC); + if (!pool) + return -ENOMEM; + + INIT_LIST_HEAD(&pool->free_list); + port->frag_desc_pool = pool; + + for (i = 0; i < RMNET_FRAG_DESCRIPTOR_POOL_SIZE; i++) { + struct rmnet_frag_descriptor *frag_desc; + + frag_desc = kzalloc(sizeof(*frag_desc), GFP_ATOMIC); + if (!frag_desc) + return -ENOMEM; + + INIT_LIST_HEAD(&frag_desc->list); + INIT_LIST_HEAD(&frag_desc->sub_frags); + list_add_tail(&frag_desc->list, &pool->free_list); + pool->pool_size++; + } + + return 0; +} + +struct rmnet_priv { + //struct rmnet_endpoint local_ep; + struct net_device *real_dev; + u8 mux_id; +}; + +static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct rmnet_priv *priv; + + if (nss_debug) printk("%s skb=%p, len=%d, protocol=%x, hdr_len=%d\n", __func__, skb, skb->len, skb->protocol, skb->hdr_len); + + priv = netdev_priv(dev); + if (priv->real_dev) { + add_qhdr_v5(skb, priv->mux_id); + skb->protocol = htons(ETH_P_MAP); + skb->dev = priv->real_dev; + dev_queue_xmit(skb); + dev->stats.tx_packets++; + //rmnet_egress_handler(skb); + } else { + //this_cpu_inc(priv->pcpu_stats->stats.tx_drops); + kfree_skb(skb); + } + return NETDEV_TX_OK; +} + +static int rmnet_vnd_change_mtu(struct net_device *rmnet_dev, int new_mtu) +{ + if (new_mtu < 0 || new_mtu > RMNET_MAX_PACKET_SIZE) + return -EINVAL; + + rmnet_dev->mtu = new_mtu; + return 0; +} + +static const struct net_device_ops rmnet_vnd_ops = { + .ndo_start_xmit = rmnet_vnd_start_xmit, + .ndo_change_mtu = rmnet_vnd_change_mtu, + //.ndo_get_iflink = rmnet_vnd_get_iflink, + //.ndo_add_slave = rmnet_add_bridge, + //.ndo_del_slave = rmnet_del_bridge, + //.ndo_init = rmnet_vnd_init, + //.ndo_uninit = rmnet_vnd_uninit, + //.ndo_get_stats64 = rmnet_get_stats64, +}; + +static void rmnet_vnd_setup(struct net_device *rmnet_dev) +{ + rmnet_dev->netdev_ops = &rmnet_vnd_ops; + rmnet_dev->mtu = RMNET_DFLT_PACKET_SIZE; + rmnet_dev->needed_headroom = RMNET_NEEDED_HEADROOM; + random_ether_addr(rmnet_dev->dev_addr); + rmnet_dev->tx_queue_len = RMNET_TX_QUEUE_LEN; + + /* Raw IP mode */ + rmnet_dev->header_ops = NULL; /* No header */ + rmnet_dev->type = ARPHRD_RAWIP; + rmnet_dev->hard_header_len = 0; + rmnet_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST); + + //rmnet_dev->needs_free_netdev = true; + + rmnet_dev->hw_features = NETIF_F_RXCSUM; + rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + //rmnet_dev->hw_features |= NETIF_F_SG; + //rmnet_dev->hw_features |= NETIF_F_GRO_HW; +} +#else +#include +#include +#include +#include +#include +#include +#include +#include + +static uint nss_debug = 0; +module_param( nss_debug, uint, S_IRUGO | S_IWUSR); + +/* rmnet section */ + +#define RMNET_FLAGS_INGRESS_DEAGGREGATION (1U << 0) +#define RMNET_FLAGS_INGRESS_MAP_COMMANDS (1U << 1) +#define RMNET_FLAGS_INGRESS_MAP_CKSUMV4 (1U << 2) +#define RMNET_FLAGS_EGRESS_MAP_CKSUMV4 (1U << 3) +#define RMNET_FLAGS_INGRESS_COALESCE (1U << 4) +#define RMNET_FLAGS_INGRESS_MAP_CKSUMV5 (1U << 5) +#define RMNET_FLAGS_EGRESS_MAP_CKSUMV5 (1U << 6) + +#ifdef CONFIG_ARCH_IPQ807x +#define CONFIG_QCA_NSS_DRV +#endif +#ifdef CONFIG_QCA_NSS_DRV +#include "rmnet_nss.c" +#else +#include "rmnet_nss.h" +#endif + +#include "rmnet_vnd.c" +#include "rmnet_map_command.c" +#include "rmnet_map_data.c" +#include "rmnet_descriptor.c" +#include "rmnet_config.c" +#include "rmnet_handlers.c" + +struct rmnet_nss_cb *rmnet_nss_callbacks __rcu __read_mostly; + +void rmnet_data_init(struct net_device *real_dev, u32 nr_rmnet_devs) +{ + struct rmnet_port *port; + struct rmnet_endpoint *ep; + struct net_device *rmnet_dev = NULL; + u32 nr = 0; + struct rmnet_nss_cb *nss_cb; + int rc = 0; + + nss_cb = rcu_dereference(rmnet_nss_callbacks); + if (!nss_cb) + { +#ifdef CONFIG_QCA_NSS_DRV + pr_err("%s(): initializing rmnet_nss\n", __func__); + RCU_INIT_POINTER(rmnet_nss_callbacks, &rmnet_nss); +#endif + } + + rtnl_lock(); + rc = rmnet_register_real_device(real_dev); + rtnl_unlock(); + + if (rc) { + pr_err("%s rmnet_register_real_device = %d\n", __func__, rc); + return; + } + + port = rmnet_get_port_rtnl(real_dev); + port->data_format = RMNET_FLAGS_INGRESS_DEAGGREGATION + | RMNET_FLAGS_INGRESS_MAP_CKSUMV5 | RMNET_FLAGS_EGRESS_MAP_CKSUMV5; + port->rmnet_mode = RMNET_EPMODE_VND; + + for (nr = 0; nr < nr_rmnet_devs; nr++) { + u8 mux_id = 0x81+nr; + + ep = kzalloc(sizeof(*ep), GFP_ATOMIC); + + rtnl_lock(); + rmnet_dev = alloc_netdev(sizeof(struct rmnet_priv), + "rmnet_data%d", NET_NAME_PREDICTABLE, + rmnet_vnd_setup); + + rmnet_vnd_newlink(mux_id, rmnet_dev, port, real_dev, ep); + netdev_rx_handler_register(rmnet_dev, rmnet_rx_priv_handler, NULL); + rtnl_unlock(); + + hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]); + } + + port->nr_rmnet_devs = nr_rmnet_devs; +} + +void rmnet_data_deinit(struct net_device *real_dev, u32 nr_rmnet_devs) +{ + struct rmnet_port *port; + u32 nr = 0; + struct rmnet_nss_cb *nss_cb; + + if (!real_dev || !rmnet_is_real_dev_registered(real_dev)) + return; + + port = rmnet_get_port_rtnl(real_dev); + + for (nr = 0; nr < nr_rmnet_devs; nr++) { + struct rmnet_endpoint *ep; + u8 mux_id = 0x81+nr; + + ep = rmnet_get_endpoint(port, mux_id); + if (ep) { + hlist_del_init_rcu(&ep->hlnode); + rmnet_vnd_dellink(mux_id, port, ep); + synchronize_rcu(); + kfree(ep); + } + } + + rmnet_unregister_real_device(real_dev, port); + + nss_cb = rcu_dereference(rmnet_nss_callbacks); + if (nss_cb) { +#ifdef CONFIG_QCA_NSS_DRV + struct hlist_node *tmp; + struct rmnet_nss_ctx *ctx; + int bkt; + + pr_err("%s(): exiting rmnet_nss\n", __func__); + RCU_INIT_POINTER(rmnet_nss_callbacks, NULL); + + /* Tear down all NSS contexts */ + hash_for_each_safe(rmnet_nss_ctx_hashtable, bkt, tmp, ctx, hnode) + rmnet_nss_free_ctx(ctx); +#endif + } +} +#endif diff --git a/package/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_descriptor.c b/package/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_descriptor.c new file mode 100644 index 000000000..75006d1cf --- /dev/null +++ b/package/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_descriptor.c @@ -0,0 +1,661 @@ +/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * RMNET Packet Descriptor Framework + * + */ + +#include +#include +#include +#include +#include "rmnet_config.h" +#include "rmnet_descriptor.h" +#include "rmnet_handlers.h" +#include "rmnet_private.h" +#include "rmnet_vnd.h" + +#define RMNET_FRAG_DESCRIPTOR_POOL_SIZE 64 +#define RMNET_DL_IND_HDR_SIZE (sizeof(struct rmnet_map_dl_ind_hdr) + \ + sizeof(struct rmnet_map_header) + \ + sizeof(struct rmnet_map_control_command_header)) +#define RMNET_DL_IND_TRL_SIZE (sizeof(struct rmnet_map_dl_ind_trl) + \ + sizeof(struct rmnet_map_header) + \ + sizeof(struct rmnet_map_control_command_header)) + +typedef void (*rmnet_perf_desc_hook_t)(struct rmnet_frag_descriptor *frag_desc, + struct rmnet_port *port); +typedef void (*rmnet_perf_chain_hook_t)(void); + +static struct rmnet_frag_descriptor * +rmnet_get_frag_descriptor(struct rmnet_port *port) +{ + struct rmnet_frag_descriptor_pool *pool = port->frag_desc_pool; + struct rmnet_frag_descriptor *frag_desc; + + spin_lock(&port->desc_pool_lock); + if (!list_empty(&pool->free_list)) { + frag_desc = list_first_entry(&pool->free_list, + struct rmnet_frag_descriptor, + list); + list_del_init(&frag_desc->list); + } else { + frag_desc = kzalloc(sizeof(*frag_desc), GFP_ATOMIC); + if (!frag_desc) + goto out; + + INIT_LIST_HEAD(&frag_desc->list); + INIT_LIST_HEAD(&frag_desc->sub_frags); + pool->pool_size++; + } + +out: + spin_unlock(&port->desc_pool_lock); + return frag_desc; +} + +static void rmnet_recycle_frag_descriptor(struct rmnet_frag_descriptor *frag_desc, + struct rmnet_port *port) +{ + struct rmnet_frag_descriptor_pool *pool = port->frag_desc_pool; + struct page *page = skb_frag_page(&frag_desc->frag); + + list_del(&frag_desc->list); + if (page) + put_page(page); + + memset(frag_desc, 0, sizeof(*frag_desc)); + INIT_LIST_HEAD(&frag_desc->list); + INIT_LIST_HEAD(&frag_desc->sub_frags); + spin_lock(&port->desc_pool_lock); + list_add_tail(&frag_desc->list, &pool->free_list); + spin_unlock(&port->desc_pool_lock); +} + +static void rmnet_descriptor_add_frag(struct rmnet_port *port, struct list_head *list, + struct page *p, u32 page_offset, u32 len) +{ + struct rmnet_frag_descriptor *frag_desc; + + frag_desc = rmnet_get_frag_descriptor(port); + if (!frag_desc) + return; + + rmnet_frag_fill(frag_desc, p, page_offset, len); + list_add_tail(&frag_desc->list, list); +} + +static u8 rmnet_frag_do_flow_control(struct rmnet_map_header *qmap, + struct rmnet_port *port, + int enable) +{ + struct rmnet_map_control_command *cmd; + struct rmnet_endpoint *ep; + struct net_device *vnd; + u16 ip_family; + u16 fc_seq; + u32 qos_id; + u8 mux_id; + int r; + + mux_id = qmap->mux_id; + cmd = (struct rmnet_map_control_command *) + ((char *)qmap + sizeof(*qmap)); + + if (mux_id >= RMNET_MAX_LOGICAL_EP) + return RX_HANDLER_CONSUMED; + + ep = rmnet_get_endpoint(port, mux_id); + if (!ep) + return RX_HANDLER_CONSUMED; + + vnd = ep->egress_dev; + + ip_family = cmd->flow_control.ip_family; + fc_seq = ntohs(cmd->flow_control.flow_control_seq_num); + qos_id = ntohl(cmd->flow_control.qos_id); + + /* Ignore the ip family and pass the sequence number for both v4 and v6 + * sequence. User space does not support creating dedicated flows for + * the 2 protocols + */ + r = rmnet_vnd_do_flow_control(vnd, enable); + if (r) + return RMNET_MAP_COMMAND_UNSUPPORTED; + else + return RMNET_MAP_COMMAND_ACK; +} + +static void rmnet_frag_send_ack(struct rmnet_map_header *qmap, + unsigned char type, + struct rmnet_port *port) +{ + struct rmnet_map_control_command *cmd; + struct net_device *dev = port->dev; + struct sk_buff *skb; + u16 alloc_len = ntohs(qmap->pkt_len) + sizeof(*qmap); + + skb = alloc_skb(alloc_len, GFP_ATOMIC); + if (!skb) + return; + + skb->protocol = htons(ETH_P_MAP); + skb->dev = dev; + + cmd = rmnet_map_get_cmd_start(skb); + cmd->cmd_type = type & 0x03; + + netif_tx_lock(dev); + dev->netdev_ops->ndo_start_xmit(skb, dev); + netif_tx_unlock(dev); +} + + +/* Process MAP command frame and send N/ACK message as appropriate. Message cmd + * name is decoded here and appropriate handler is called. + */ +static void rmnet_frag_command(struct rmnet_map_header *qmap, struct rmnet_port *port) +{ + struct rmnet_map_control_command *cmd; + unsigned char command_name; + unsigned char rc = 0; + + cmd = (struct rmnet_map_control_command *) + ((char *)qmap + sizeof(*qmap)); + command_name = cmd->command_name; + + switch (command_name) { + case RMNET_MAP_COMMAND_FLOW_ENABLE: + rc = rmnet_frag_do_flow_control(qmap, port, 1); + break; + + case RMNET_MAP_COMMAND_FLOW_DISABLE: + rc = rmnet_frag_do_flow_control(qmap, port, 0); + break; + + default: + rc = RMNET_MAP_COMMAND_UNSUPPORTED; + break; + } + if (rc == RMNET_MAP_COMMAND_ACK) + rmnet_frag_send_ack(qmap, rc, port); +} + +static void rmnet_frag_deaggregate(skb_frag_t *frag, struct rmnet_port *port, + struct list_head *list) +{ + struct rmnet_map_header *maph; + u8 *data = skb_frag_address(frag); + u32 offset = 0; + u32 packet_len; + + while (offset < skb_frag_size(frag)) { + maph = (struct rmnet_map_header *)data; + packet_len = ntohs(maph->pkt_len); + + /* Some hardware can send us empty frames. Catch them */ + if (packet_len == 0) + return; + + packet_len += sizeof(*maph); + + if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) { + packet_len += sizeof(struct rmnet_map_dl_csum_trailer); + WARN_ON(1); + } else if (port->data_format & + (RMNET_FLAGS_INGRESS_MAP_CKSUMV5 | + RMNET_FLAGS_INGRESS_COALESCE) && !maph->cd_bit) { + u32 hsize = 0; + u8 type; + + type = ((struct rmnet_map_v5_coal_header *) + (data + sizeof(*maph)))->header_type; + switch (type) { + case RMNET_MAP_HEADER_TYPE_COALESCING: + hsize = sizeof(struct rmnet_map_v5_coal_header); + break; + case RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD: + hsize = sizeof(struct rmnet_map_v5_csum_header); + break; + } + + packet_len += hsize; + } + else { + //qmap_hex_dump(__func__, data, 64); + WARN_ON(1); + } + + if ((int)skb_frag_size(frag) - (int)packet_len < 0) + return; + + rmnet_descriptor_add_frag(port, list, skb_frag_page(frag), + frag->page_offset + offset, + packet_len); + + offset += packet_len; + data += packet_len; + } +} + +/* Allocate and populate an skb to contain the packet represented by the + * frag descriptor. + */ +static struct sk_buff *rmnet_alloc_skb(struct rmnet_frag_descriptor *frag_desc, + struct rmnet_port *port) +{ + struct sk_buff *head_skb, *current_skb, *skb; + struct skb_shared_info *shinfo; + struct rmnet_frag_descriptor *sub_frag, *tmp; + + /* Use the exact sizes if we know them (i.e. RSB/RSC, rmnet_perf) */ + if (frag_desc->hdrs_valid) { + u16 hdr_len = frag_desc->ip_len + frag_desc->trans_len; + + head_skb = alloc_skb(hdr_len + RMNET_MAP_DESC_HEADROOM, + GFP_ATOMIC); + if (!head_skb) + return NULL; + + skb_reserve(head_skb, RMNET_MAP_DESC_HEADROOM); + skb_put_data(head_skb, frag_desc->hdr_ptr, hdr_len); + skb_reset_network_header(head_skb); + + if (frag_desc->trans_len) + skb_set_transport_header(head_skb, frag_desc->ip_len); + + /* Packets that have no data portion don't need any frags */ + if (hdr_len == skb_frag_size(&frag_desc->frag)) + goto skip_frags; + + /* If the headers we added are the start of the page, + * we don't want to add them twice + */ + if (frag_desc->hdr_ptr == rmnet_frag_data_ptr(frag_desc)) { + if (!rmnet_frag_pull(frag_desc, port, hdr_len)) { + kfree_skb(head_skb); + return NULL; + } + } + } else { + /* Allocate enough space to avoid penalties in the stack + * from __pskb_pull_tail() + */ + head_skb = alloc_skb(256 + RMNET_MAP_DESC_HEADROOM, + GFP_ATOMIC); + if (!head_skb) + return NULL; + + skb_reserve(head_skb, RMNET_MAP_DESC_HEADROOM); + } + + /* Add main fragment */ + get_page(skb_frag_page(&frag_desc->frag)); + skb_add_rx_frag(head_skb, 0, skb_frag_page(&frag_desc->frag), + frag_desc->frag.page_offset, + skb_frag_size(&frag_desc->frag), + skb_frag_size(&frag_desc->frag)); + + shinfo = skb_shinfo(head_skb); + current_skb = head_skb; + + /* Add in any frags from rmnet_perf */ + list_for_each_entry_safe(sub_frag, tmp, &frag_desc->sub_frags, list) { + skb_frag_t *frag; + u32 frag_size; + + frag = &sub_frag->frag; + frag_size = skb_frag_size(frag); + +add_frag: + if (shinfo->nr_frags < MAX_SKB_FRAGS) { + get_page(skb_frag_page(frag)); + skb_add_rx_frag(current_skb, shinfo->nr_frags, + skb_frag_page(frag), frag->page_offset, + frag_size, frag_size); + if (current_skb != head_skb) { + head_skb->len += frag_size; + head_skb->data_len += frag_size; + } + } else { + /* Alloc a new skb and try again */ + skb = alloc_skb(0, GFP_ATOMIC); + if (!skb) + break; + + if (current_skb == head_skb) + shinfo->frag_list = skb; + else + current_skb->next = skb; + + current_skb = skb; + shinfo = skb_shinfo(current_skb); + goto add_frag; + } + + rmnet_recycle_frag_descriptor(sub_frag, port); + } + +skip_frags: + head_skb->dev = frag_desc->dev; + rmnet_set_skb_proto(head_skb); + + /* Handle any header metadata that needs to be updated after RSB/RSC + * segmentation + */ + if (frag_desc->ip_id_set) { + struct iphdr *iph; + + iph = (struct iphdr *)rmnet_map_data_ptr(head_skb); + csum_replace2(&iph->check, iph->id, frag_desc->ip_id); + iph->id = frag_desc->ip_id; + } + + if (frag_desc->tcp_seq_set) { + struct tcphdr *th; + + th = (struct tcphdr *) + (rmnet_map_data_ptr(head_skb) + frag_desc->ip_len); + th->seq = frag_desc->tcp_seq; + } + + /* Handle csum offloading */ + if (frag_desc->csum_valid && frag_desc->hdrs_valid) { + /* Set the partial checksum information */ + //rmnet_frag_partial_csum(head_skb, frag_desc); + WARN_ON(1); + } else if (frag_desc->csum_valid) { + /* Non-RSB/RSC/perf packet. The current checksum is fine */ + head_skb->ip_summed = CHECKSUM_UNNECESSARY; + } else if (frag_desc->hdrs_valid && + (frag_desc->trans_proto == IPPROTO_TCP || + frag_desc->trans_proto == IPPROTO_UDP)) { + /* Unfortunately, we have to fake a bad checksum here, since + * the original bad value is lost by the hardware. The only + * reliable way to do it is to calculate the actual checksum + * and corrupt it. + */ + __sum16 *check; + __wsum csum; + unsigned int offset = skb_transport_offset(head_skb); + __sum16 pseudo; + + WARN_ON(1); + /* Calculate pseudo header and update header fields */ + if (frag_desc->ip_proto == 4) { + struct iphdr *iph = ip_hdr(head_skb); + __be16 tot_len = htons(head_skb->len); + + csum_replace2(&iph->check, iph->tot_len, tot_len); + iph->tot_len = tot_len; + pseudo = ~csum_tcpudp_magic(iph->saddr, iph->daddr, + head_skb->len - + frag_desc->ip_len, + frag_desc->trans_proto, 0); + } else { + struct ipv6hdr *ip6h = ipv6_hdr(head_skb); + + ip6h->payload_len = htons(head_skb->len - + sizeof(*ip6h)); + pseudo = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, + head_skb->len - + frag_desc->ip_len, + frag_desc->trans_proto, 0); + } + + if (frag_desc->trans_proto == IPPROTO_TCP) { + check = &tcp_hdr(head_skb)->check; + } else { + udp_hdr(head_skb)->len = htons(head_skb->len - + frag_desc->ip_len); + check = &udp_hdr(head_skb)->check; + } + + *check = pseudo; + csum = skb_checksum(head_skb, offset, head_skb->len - offset, + 0); + /* Add 1 to corrupt. This cannot produce a final value of 0 + * since csum_fold() can't return a value of 0xFFFF + */ + *check = csum16_add(csum_fold(csum), htons(1)); + head_skb->ip_summed = CHECKSUM_NONE; + } + + /* Handle any rmnet_perf metadata */ + if (frag_desc->hash) { + head_skb->hash = frag_desc->hash; + head_skb->sw_hash = 1; + } + + if (frag_desc->flush_shs) + head_skb->cb[0] = 1; + + /* Handle coalesced packets */ + //if (frag_desc->gso_segs > 1) + // rmnet_frag_gso_stamp(head_skb, frag_desc); + + return head_skb; +} + +/* Deliver the packets contained within a frag descriptor */ +static void rmnet_frag_deliver(struct rmnet_frag_descriptor *frag_desc, + struct rmnet_port *port) +{ + struct sk_buff *skb; + + skb = rmnet_alloc_skb(frag_desc, port); + if (skb) + rmnet_deliver_skb(skb, port); + rmnet_recycle_frag_descriptor(frag_desc, port); +} + +/* Process a QMAPv5 packet header */ +static int rmnet_frag_process_next_hdr_packet(struct rmnet_frag_descriptor *frag_desc, + struct rmnet_port *port, + struct list_head *list, + u16 len) +{ + int rc = 0; + + switch (rmnet_frag_get_next_hdr_type(frag_desc)) { + case RMNET_MAP_HEADER_TYPE_COALESCING: + rc = -1; + WARN_ON(1); + break; + case RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD: + if (rmnet_frag_get_csum_valid(frag_desc)) { + frag_desc->csum_valid = true; + } else { + } + + if (!rmnet_frag_pull(frag_desc, port, + sizeof(struct rmnet_map_header) + + sizeof(struct rmnet_map_v5_csum_header))) { + rc = -EINVAL; + break; + } + + frag_desc->hdr_ptr = rmnet_frag_data_ptr(frag_desc); + + /* Remove padding only for csum offload packets. + * Coalesced packets should never have padding. + */ + if (!rmnet_frag_trim(frag_desc, port, len)) { + rc = -EINVAL; + break; + } + + list_del_init(&frag_desc->list); + list_add_tail(&frag_desc->list, list); + break; + default: + //qmap_hex_dump(__func__, rmnet_frag_data_ptr(frag_desc), 64); + rc = -EINVAL; + break; + } + + return rc; +} + +static void +__rmnet_frag_ingress_handler(struct rmnet_frag_descriptor *frag_desc, + struct rmnet_port *port) +{ + struct rmnet_map_header *qmap; + struct rmnet_endpoint *ep; + struct rmnet_frag_descriptor *frag, *tmp; + LIST_HEAD(segs); + u16 len, pad; + u8 mux_id; + + qmap = (struct rmnet_map_header *)skb_frag_address(&frag_desc->frag); + mux_id = qmap->mux_id; + pad = qmap->pad_len; + len = ntohs(qmap->pkt_len) - pad; + + if (qmap->cd_bit) { + if (port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER) { + //rmnet_frag_flow_command(qmap, port, len); + goto recycle; + } + + if (port->data_format & RMNET_FLAGS_INGRESS_MAP_COMMANDS) + rmnet_frag_command(qmap, port); + + goto recycle; + } + + if (mux_id >= RMNET_MAX_LOGICAL_EP) + goto recycle; + + ep = rmnet_get_endpoint(port, mux_id); + if (!ep) + goto recycle; + + frag_desc->dev = ep->egress_dev; + + /* Handle QMAPv5 packet */ + if (qmap->next_hdr && + (port->data_format & (RMNET_FLAGS_INGRESS_COALESCE | + RMNET_FLAGS_INGRESS_MAP_CKSUMV5))) { + if (rmnet_frag_process_next_hdr_packet(frag_desc, port, &segs, + len)) + goto recycle; + } else { + /* We only have the main QMAP header to worry about */ + if (!rmnet_frag_pull(frag_desc, port, sizeof(*qmap))) + return; + + frag_desc->hdr_ptr = rmnet_frag_data_ptr(frag_desc); + + if (!rmnet_frag_trim(frag_desc, port, len)) + return; + + list_add_tail(&frag_desc->list, &segs); + } + + list_for_each_entry_safe(frag, tmp, &segs, list) { + list_del_init(&frag->list); + rmnet_frag_deliver(frag, port); + } + return; + +recycle: + rmnet_recycle_frag_descriptor(frag_desc, port); +} + +static void rmnet_frag_ingress_handler(struct sk_buff *skb, + struct rmnet_port *port) +{ + LIST_HEAD(desc_list); + int i = 0; + struct rmnet_nss_cb *nss_cb; + + /* Deaggregation and freeing of HW originating + * buffers is done within here + */ + while (skb) { + struct sk_buff *skb_frag; + + port->chain_head = NULL; + port->chain_tail = NULL; + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + rmnet_frag_deaggregate(&skb_shinfo(skb)->frags[i], port, + &desc_list); + if (!list_empty(&desc_list)) { + struct rmnet_frag_descriptor *frag_desc, *tmp; + + list_for_each_entry_safe(frag_desc, tmp, + &desc_list, list) { + list_del_init(&frag_desc->list); + __rmnet_frag_ingress_handler(frag_desc, + port); + } + } + } + + nss_cb = rcu_dereference(rmnet_nss_callbacks); + if (nss_cb && port->chain_head) { + port->chain_head->cb[0] = 0; + netif_receive_skb(port->chain_head); + } + + skb_frag = skb_shinfo(skb)->frag_list; + skb_shinfo(skb)->frag_list = NULL; + consume_skb(skb); + skb = skb_frag; + } +} + +void rmnet_descriptor_deinit(struct rmnet_port *port) +{ + struct rmnet_frag_descriptor_pool *pool; + struct rmnet_frag_descriptor *frag_desc, *tmp; + + pool = port->frag_desc_pool; + + list_for_each_entry_safe(frag_desc, tmp, &pool->free_list, list) { + kfree(frag_desc); + pool->pool_size--; + } + + kfree(pool); +} + +int rmnet_descriptor_init(struct rmnet_port *port) +{ + struct rmnet_frag_descriptor_pool *pool; + int i; + + spin_lock_init(&port->desc_pool_lock); + pool = kzalloc(sizeof(*pool), GFP_ATOMIC); + if (!pool) + return -ENOMEM; + + INIT_LIST_HEAD(&pool->free_list); + port->frag_desc_pool = pool; + + for (i = 0; i < RMNET_FRAG_DESCRIPTOR_POOL_SIZE; i++) { + struct rmnet_frag_descriptor *frag_desc; + + frag_desc = kzalloc(sizeof(*frag_desc), GFP_ATOMIC); + if (!frag_desc) + return -ENOMEM; + + INIT_LIST_HEAD(&frag_desc->list); + INIT_LIST_HEAD(&frag_desc->sub_frags); + list_add_tail(&frag_desc->list, &pool->free_list); + pool->pool_size++; + } + + return 0; +} diff --git a/package/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_descriptor.h b/package/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_descriptor.h new file mode 100644 index 000000000..962c663af --- /dev/null +++ b/package/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_descriptor.h @@ -0,0 +1,146 @@ +/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * RMNET Packet Descriptor Framework + * + */ + +#ifndef _RMNET_DESCRIPTOR_H_ +#define _RMNET_DESCRIPTOR_H_ + +#include +#include +#include +#include "rmnet_config.h" +#include "rmnet_map.h" + +struct rmnet_frag_descriptor_pool { + struct list_head free_list; + u32 pool_size; +}; + +struct rmnet_frag_descriptor { + struct list_head list; + struct list_head sub_frags; + skb_frag_t frag; + u8 *hdr_ptr; + struct net_device *dev; + u32 hash; + __be32 tcp_seq; + __be16 ip_id; + u16 data_offset; + u16 gso_size; + u16 gso_segs; + u16 ip_len; + u16 trans_len; + u8 ip_proto; + u8 trans_proto; + u8 pkt_id; + u8 csum_valid:1, + hdrs_valid:1, + ip_id_set:1, + tcp_seq_set:1, + flush_shs:1, + reserved:3; +}; + +/* Descriptor management */ +static struct rmnet_frag_descriptor * +rmnet_get_frag_descriptor(struct rmnet_port *port); +static void rmnet_recycle_frag_descriptor(struct rmnet_frag_descriptor *frag_desc, + struct rmnet_port *port); +static void rmnet_descriptor_add_frag(struct rmnet_port *port, struct list_head *list, + struct page *p, u32 page_offset, u32 len); + +/* QMAP command packets */ + +/* Ingress data handlers */ +static void rmnet_frag_deaggregate(skb_frag_t *frag, struct rmnet_port *port, + struct list_head *list); +static void rmnet_frag_deliver(struct rmnet_frag_descriptor *frag_desc, + struct rmnet_port *port); +static int rmnet_frag_process_next_hdr_packet(struct rmnet_frag_descriptor *frag_desc, + struct rmnet_port *port, + struct list_head *list, + u16 len); +static void rmnet_frag_ingress_handler(struct sk_buff *skb, + struct rmnet_port *port); + +static int rmnet_descriptor_init(struct rmnet_port *port); +static void rmnet_descriptor_deinit(struct rmnet_port *port); + +static inline void *rmnet_frag_data_ptr(struct rmnet_frag_descriptor *frag_desc) +{ + return skb_frag_address(&frag_desc->frag); +} + +static inline void *rmnet_frag_pull(struct rmnet_frag_descriptor *frag_desc, + struct rmnet_port *port, + unsigned int size) +{ + if (size >= skb_frag_size(&frag_desc->frag)) { + pr_info("%s(): Pulling %u bytes from %u byte pkt. Dropping\n", + __func__, size, skb_frag_size(&frag_desc->frag)); + rmnet_recycle_frag_descriptor(frag_desc, port); + return NULL; + } + + frag_desc->frag.page_offset += size; + skb_frag_size_sub(&frag_desc->frag, size); + + return rmnet_frag_data_ptr(frag_desc); +} + +static inline void *rmnet_frag_trim(struct rmnet_frag_descriptor *frag_desc, + struct rmnet_port *port, + unsigned int size) +{ + if (!size) { + pr_info("%s(): Trimming %u byte pkt to 0. Dropping\n", + __func__, skb_frag_size(&frag_desc->frag)); + rmnet_recycle_frag_descriptor(frag_desc, port); + return NULL; + } + + if (size < skb_frag_size(&frag_desc->frag)) + skb_frag_size_set(&frag_desc->frag, size); + + return rmnet_frag_data_ptr(frag_desc); +} + +static inline void rmnet_frag_fill(struct rmnet_frag_descriptor *frag_desc, + struct page *p, u32 page_offset, u32 len) +{ + get_page(p); + __skb_frag_set_page(&frag_desc->frag, p); + skb_frag_size_set(&frag_desc->frag, len); + frag_desc->frag.page_offset = page_offset; +} + +static inline u8 +rmnet_frag_get_next_hdr_type(struct rmnet_frag_descriptor *frag_desc) +{ + unsigned char *data = rmnet_frag_data_ptr(frag_desc); + + data += sizeof(struct rmnet_map_header); + return ((struct rmnet_map_v5_coal_header *)data)->header_type; +} + +static inline bool +rmnet_frag_get_csum_valid(struct rmnet_frag_descriptor *frag_desc) +{ + unsigned char *data = rmnet_frag_data_ptr(frag_desc); + + data += sizeof(struct rmnet_map_header); + return ((struct rmnet_map_v5_csum_header *)data)->csum_valid_required; +} + +#endif /* _RMNET_DESCRIPTOR_H_ */ diff --git a/package/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_handlers.c b/package/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_handlers.c new file mode 100644 index 000000000..6f1ce9de8 --- /dev/null +++ b/package/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_handlers.c @@ -0,0 +1,374 @@ +/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * RMNET Data ingress/egress handler + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include "rmnet_private.h" +#include "rmnet_config.h" +#include "rmnet_vnd.h" +#include "rmnet_map.h" +#include "rmnet_handlers.h" +#include "rmnet_descriptor.h" + +#define RMNET_IP_VERSION_4 0x40 +#define RMNET_IP_VERSION_6 0x60 + +/* Helper Functions */ + +static void rmnet_set_skb_proto(struct sk_buff *skb) +{ + switch (rmnet_map_data_ptr(skb)[0] & 0xF0) { + case RMNET_IP_VERSION_4: + skb->protocol = htons(ETH_P_IP); + break; + case RMNET_IP_VERSION_6: + skb->protocol = htons(ETH_P_IPV6); + break; + default: + skb->protocol = htons(ETH_P_MAP); + break; + } +} + +/* Generic handler */ + +static void +rmnet_deliver_skb(struct sk_buff *skb, struct rmnet_port *port) +{ + struct rmnet_nss_cb *nss_cb; + + rmnet_vnd_rx_fixup(skb->dev, skb->len); + + /* Pass off the packet to NSS driver if we can */ + nss_cb = rcu_dereference(rmnet_nss_callbacks); + if (nss_cb) { + if (!port->chain_head) + port->chain_head = skb; + else + skb_shinfo(port->chain_tail)->frag_list = skb; + + port->chain_tail = skb; + return; + } + + skb_reset_transport_header(skb); + skb_reset_network_header(skb); + + skb->pkt_type = PACKET_HOST; + skb_set_mac_header(skb, 0); + + //if (port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER) { + //} else { + //if (!rmnet_check_skb_can_gro(skb)) + // gro_cells_receive(&priv->gro_cells, skb); + //else + netif_receive_skb(skb); + //} +} + +/* Deliver a list of skbs after undoing coalescing */ +static void rmnet_deliver_skb_list(struct sk_buff_head *head, + struct rmnet_port *port) +{ + struct sk_buff *skb; + + while ((skb = __skb_dequeue(head))) { + rmnet_set_skb_proto(skb); + rmnet_deliver_skb(skb, port); + } +} + +/* MAP handler */ + +static void +_rmnet_map_ingress_handler(struct sk_buff *skb, + struct rmnet_port *port) +{ + struct rmnet_map_header *qmap; + struct rmnet_endpoint *ep; + struct sk_buff_head list; + u16 len, pad; + u8 mux_id; + + /* We don't need the spinlock since only we touch this */ + __skb_queue_head_init(&list); + + qmap = (struct rmnet_map_header *)rmnet_map_data_ptr(skb); + if (qmap->cd_bit) { + if (port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER) { + //if (!rmnet_map_flow_command(skb, port, false)) + return; + } + + if (port->data_format & RMNET_FLAGS_INGRESS_MAP_COMMANDS) + return rmnet_map_command(skb, port); + + goto free_skb; + } + + mux_id = qmap->mux_id; + pad = qmap->pad_len; + len = ntohs(qmap->pkt_len) - pad; + + if (mux_id >= RMNET_MAX_LOGICAL_EP) + goto free_skb; + + ep = rmnet_get_endpoint(port, mux_id); + if (!ep) + goto free_skb; + + skb->dev = ep->egress_dev; + + /* Handle QMAPv5 packet */ + if (qmap->next_hdr && + (port->data_format & (RMNET_FLAGS_INGRESS_COALESCE | + RMNET_FLAGS_INGRESS_MAP_CKSUMV5))) { + if (rmnet_map_process_next_hdr_packet(skb, &list, len)) + goto free_skb; + } else { + /* We only have the main QMAP header to worry about */ + pskb_pull(skb, sizeof(*qmap)); + + rmnet_set_skb_proto(skb); + + if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) { + //if (!rmnet_map_checksum_downlink_packet(skb, len + pad)) + // skb->ip_summed = CHECKSUM_UNNECESSARY; + } + + pskb_trim(skb, len); + + /* Push the single packet onto the list */ + __skb_queue_tail(&list, skb); + } + + rmnet_deliver_skb_list(&list, port); + return; + +free_skb: + kfree_skb(skb); +} + +static void +rmnet_map_ingress_handler(struct sk_buff *skb, + struct rmnet_port *port) +{ + struct sk_buff *skbn; + + if (port->data_format & (RMNET_FLAGS_INGRESS_COALESCE | + RMNET_FLAGS_INGRESS_MAP_CKSUMV5)) { + if (skb_is_nonlinear(skb)) { + rmnet_frag_ingress_handler(skb, port); + return; + } + } + + /* Deaggregation and freeing of HW originating + * buffers is done within here + */ + while (skb) { + struct sk_buff *skb_frag = skb_shinfo(skb)->frag_list; + + skb_shinfo(skb)->frag_list = NULL; + while ((skbn = rmnet_map_deaggregate(skb, port)) != NULL) { + _rmnet_map_ingress_handler(skbn, port); + + if (skbn == skb) + goto next_skb; + } + + consume_skb(skb); +next_skb: + skb = skb_frag; + } +} + +static int rmnet_map_egress_handler(struct sk_buff *skb, + struct rmnet_port *port, u8 mux_id, + struct net_device *orig_dev) +{ + int required_headroom, additional_header_len, csum_type; + struct rmnet_map_header *map_header; + + additional_header_len = 0; + required_headroom = sizeof(struct rmnet_map_header); + csum_type = 0; + + if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4) { + additional_header_len = sizeof(struct rmnet_map_ul_csum_header); + csum_type = RMNET_FLAGS_EGRESS_MAP_CKSUMV4; + } else if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV5) { + additional_header_len = sizeof(struct rmnet_map_v5_csum_header); + csum_type = RMNET_FLAGS_EGRESS_MAP_CKSUMV5; + } + + required_headroom += additional_header_len; + + if (skb_headroom(skb) < required_headroom) { + if (pskb_expand_head(skb, required_headroom, 0, GFP_ATOMIC)) + return -ENOMEM; + } + + if (csum_type) + rmnet_map_checksum_uplink_packet(skb, orig_dev, csum_type); + + map_header = rmnet_map_add_map_header(skb, additional_header_len, 0, + port); + if (!map_header) + return -ENOMEM; + + map_header->mux_id = mux_id; + + if (port->data_format & RMNET_EGRESS_FORMAT_AGGREGATION) { + if (rmnet_map_tx_agg_skip(skb, required_headroom)) + goto done; + + rmnet_map_tx_aggregate(skb, port); + return -EINPROGRESS; + } + +done: + skb->protocol = htons(ETH_P_MAP); + return 0; +} + +static void +rmnet_bridge_handler(struct sk_buff *skb, struct net_device *bridge_dev) +{ + if (bridge_dev) { + skb->dev = bridge_dev; + dev_queue_xmit(skb); + } +} + +/* Ingress / Egress Entry Points */ + +/* Processes packet as per ingress data format for receiving device. Logical + * endpoint is determined from packet inspection. Packet is then sent to the + * egress device listed in the logical endpoint configuration. + */ +static rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb) +{ + struct sk_buff *skb = *pskb; + struct rmnet_port *port; + struct net_device *dev; + + if (!skb) + goto done; + + if (skb->pkt_type == PACKET_LOOPBACK) + return RX_HANDLER_PASS; + + dev = skb->dev; + port = rmnet_get_port(dev); + + port->chain_head = NULL; + port->chain_tail = NULL; + + switch (port->rmnet_mode) { + case RMNET_EPMODE_VND: + rmnet_map_ingress_handler(skb, port); + break; + case RMNET_EPMODE_BRIDGE: + rmnet_bridge_handler(skb, port->bridge_ep); + break; + } + +done: + return RX_HANDLER_CONSUMED; +} + +static rx_handler_result_t rmnet_rx_priv_handler(struct sk_buff **pskb) +{ + struct sk_buff *skb = *pskb; + struct rmnet_nss_cb *nss_cb; + + if (!skb) + return RX_HANDLER_CONSUMED; + if (nss_debug) printk("%s skb=%p, len=%d, protocol=%x, hdr_len=%d\n", __func__, skb, skb->len, skb->protocol, skb->hdr_len); + + if (skb->pkt_type == PACKET_LOOPBACK) + return RX_HANDLER_PASS; + + /* Check this so that we dont loop around netif_receive_skb */ + if (skb->cb[0] == 1) { + skb->cb[0] = 0; + + skb->dev->stats.rx_packets++; + return RX_HANDLER_PASS; + } + + while (skb) { + struct sk_buff *skb_frag = skb_shinfo(skb)->frag_list; + + skb_shinfo(skb)->frag_list = NULL; + + nss_cb = rcu_dereference(rmnet_nss_callbacks); + if (nss_cb) + nss_cb->nss_tx(skb); + + skb = skb_frag; + } + + return RX_HANDLER_CONSUMED; +} + +/* Modifies packet as per logical endpoint configuration and egress data format + * for egress device configured in logical endpoint. Packet is then transmitted + * on the egress device. + */ +static void rmnet_egress_handler(struct sk_buff *skb) +{ + struct net_device *orig_dev; + struct rmnet_port *port; + struct rmnet_priv *priv; + u8 mux_id; + int err; + u32 skb_len; + + skb_orphan(skb); + + orig_dev = skb->dev; + priv = netdev_priv(orig_dev); + skb->dev = priv->real_dev; + mux_id = priv->mux_id; + + port = rmnet_get_port(skb->dev); + if (!port) + goto drop; + + skb_len = skb->len; + err = rmnet_map_egress_handler(skb, port, mux_id, orig_dev); + if (err == -ENOMEM) + goto drop; + else if (err == -EINPROGRESS) { + rmnet_vnd_tx_fixup(orig_dev, skb_len); + return; + } + + rmnet_vnd_tx_fixup(orig_dev, skb_len); + + dev_queue_xmit(skb); + return; + +drop: + this_cpu_inc(priv->pcpu_stats->stats.tx_drops); + kfree_skb(skb); +} diff --git a/package/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_handlers.h b/package/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_handlers.h new file mode 100644 index 000000000..29837baa7 --- /dev/null +++ b/package/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_handlers.h @@ -0,0 +1,32 @@ +/* Copyright (c) 2013, 2016-2017, 2019 + * The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * RMNET Data ingress/egress handler + * + */ + +#ifndef _RMNET_HANDLERS_H_ +#define _RMNET_HANDLERS_H_ + +#include "rmnet_config.h" + +enum rmnet_packet_context { + RMNET_NET_RX_CTX, + RMNET_WQ_CTX, +}; + +static void rmnet_egress_handler(struct sk_buff *skb); +static void rmnet_deliver_skb(struct sk_buff *skb, struct rmnet_port *port); +static void rmnet_set_skb_proto(struct sk_buff *skb); +static rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb); +static rx_handler_result_t rmnet_rx_priv_handler(struct sk_buff **pskb); +#endif /* _RMNET_HANDLERS_H_ */ diff --git a/package/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_map.h b/package/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_map.h new file mode 100644 index 000000000..ab4914933 --- /dev/null +++ b/package/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_map.h @@ -0,0 +1,272 @@ +/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _RMNET_MAP_H_ +#define _RMNET_MAP_H_ + +#include +#include "rmnet_config.h" + +struct rmnet_map_control_command { + u8 command_name; + u8 cmd_type:2; + u8 reserved:6; + u16 reserved2; + u32 transaction_id; + union { + struct { + u16 ip_family:2; + u16 reserved:14; + __be16 flow_control_seq_num; + __be32 qos_id; + } flow_control; + u8 data[0]; + }; +} __aligned(1); + +enum rmnet_map_commands { + RMNET_MAP_COMMAND_NONE, + RMNET_MAP_COMMAND_FLOW_DISABLE, + RMNET_MAP_COMMAND_FLOW_ENABLE, + RMNET_MAP_COMMAND_FLOW_START = 7, + RMNET_MAP_COMMAND_FLOW_END = 8, + /* These should always be the last 2 elements */ + RMNET_MAP_COMMAND_UNKNOWN, + RMNET_MAP_COMMAND_ENUM_LENGTH +}; + +enum rmnet_map_v5_header_type { + RMNET_MAP_HEADER_TYPE_UNKNOWN, + RMNET_MAP_HEADER_TYPE_COALESCING = 0x1, + RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD = 0x2, + RMNET_MAP_HEADER_TYPE_ENUM_LENGTH +}; + +enum rmnet_map_v5_close_type { + RMNET_MAP_COAL_CLOSE_NON_COAL, + RMNET_MAP_COAL_CLOSE_IP_MISS, + RMNET_MAP_COAL_CLOSE_TRANS_MISS, + RMNET_MAP_COAL_CLOSE_HW, + RMNET_MAP_COAL_CLOSE_COAL, +}; + +enum rmnet_map_v5_close_value { + RMNET_MAP_COAL_CLOSE_HW_NL, + RMNET_MAP_COAL_CLOSE_HW_PKT, + RMNET_MAP_COAL_CLOSE_HW_BYTE, + RMNET_MAP_COAL_CLOSE_HW_TIME, + RMNET_MAP_COAL_CLOSE_HW_EVICT, +}; + +/* Main QMAP header */ +struct rmnet_map_header { + u8 pad_len:6; + u8 next_hdr:1; + u8 cd_bit:1; + u8 mux_id; + __be16 pkt_len; +} __aligned(1); + +/* QMAP v5 headers */ +struct rmnet_map_v5_csum_header { + u8 next_hdr:1; + u8 header_type:7; + u8 hw_reserved:7; + u8 csum_valid_required:1; + __be16 reserved; +} __aligned(1); + +struct rmnet_map_v5_nl_pair { + __be16 pkt_len; + u8 csum_error_bitmap; + u8 num_packets; +} __aligned(1); + +/* NLO: Number-length object */ +#define RMNET_MAP_V5_MAX_NLOS (6) +#define RMNET_MAP_V5_MAX_PACKETS (48) + +struct rmnet_map_v5_coal_header { + u8 next_hdr:1; + u8 header_type:7; + u8 reserved1:4; + u8 num_nlos:3; + u8 csum_valid:1; + u8 close_type:4; + u8 close_value:4; + u8 reserved2:4; + u8 virtual_channel_id:4; + + struct rmnet_map_v5_nl_pair nl_pairs[RMNET_MAP_V5_MAX_NLOS]; +} __aligned(1); + +/* QMAP v4 headers */ +struct rmnet_map_dl_csum_trailer { + u8 reserved1; + u8 valid:1; + u8 reserved2:7; + u16 csum_start_offset; + u16 csum_length; + __be16 csum_value; +} __aligned(1); + +struct rmnet_map_ul_csum_header { + __be16 csum_start_offset; + u16 csum_insert_offset:14; + u16 udp_ind:1; + u16 csum_enabled:1; +} __aligned(1); + +struct rmnet_map_control_command_header { + u8 command_name; + u8 cmd_type:2; + u8 reserved:5; + u8 e:1; + u16 source_id:15; + u16 ext:1; + u32 transaction_id; +} __aligned(1); + +struct rmnet_map_flow_info_le { + __be32 mux_id; + __be32 flow_id; + __be32 bytes; + __be32 pkts; +} __aligned(1); + +struct rmnet_map_flow_info_be { + u32 mux_id; + u32 flow_id; + u32 bytes; + u32 pkts; +} __aligned(1); + +struct rmnet_map_dl_ind_hdr { + union { + struct { + u32 seq; + u32 bytes; + u32 pkts; + u32 flows; + struct rmnet_map_flow_info_le flow[0]; + } le __aligned(1); + struct { + __be32 seq; + __be32 bytes; + __be32 pkts; + __be32 flows; + struct rmnet_map_flow_info_be flow[0]; + } be __aligned(1); + } __aligned(1); +} __aligned(1); + +struct rmnet_map_dl_ind_trl { + union { + __be32 seq_be; + u32 seq_le; + } __aligned(1); +} __aligned(1); + +struct rmnet_map_dl_ind { + u8 priority; + union { + void (*dl_hdr_handler)(struct rmnet_map_dl_ind_hdr *); + void (*dl_hdr_handler_v2)(struct rmnet_map_dl_ind_hdr *, + struct + rmnet_map_control_command_header *); + } __aligned(1); + union { + void (*dl_trl_handler)(struct rmnet_map_dl_ind_trl *); + void (*dl_trl_handler_v2)(struct rmnet_map_dl_ind_trl *, + struct + rmnet_map_control_command_header *); + } __aligned(1); + struct list_head list; +}; + +#define RMNET_MAP_GET_MUX_ID(Y) (((struct rmnet_map_header *) \ + (Y)->data)->mux_id) +#define RMNET_MAP_GET_CD_BIT(Y) (((struct rmnet_map_header *) \ + (Y)->data)->cd_bit) +#define RMNET_MAP_GET_PAD(Y) (((struct rmnet_map_header *) \ + (Y)->data)->pad_len) +#define RMNET_MAP_GET_CMD_START(Y) ((struct rmnet_map_control_command *) \ + ((Y)->data + \ + sizeof(struct rmnet_map_header))) +#define RMNET_MAP_GET_LENGTH(Y) (ntohs(((struct rmnet_map_header *) \ + (Y)->data)->pkt_len)) + +#define RMNET_MAP_DEAGGR_SPACING 64 +#define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2) +#define RMNET_MAP_DESC_HEADROOM 128 + +#define RMNET_MAP_COMMAND_REQUEST 0 +#define RMNET_MAP_COMMAND_ACK 1 +#define RMNET_MAP_COMMAND_UNSUPPORTED 2 +#define RMNET_MAP_COMMAND_INVALID 3 + +#define RMNET_MAP_NO_PAD_BYTES 0 +#define RMNET_MAP_ADD_PAD_BYTES 1 + +static inline unsigned char *rmnet_map_data_ptr(struct sk_buff *skb) +{ + /* Nonlinear packets we receive are entirely within frag 0 */ + if (skb_is_nonlinear(skb) && skb->len == skb->data_len) + return skb_frag_address(skb_shinfo(skb)->frags); + + return skb->data; +} + +static inline struct rmnet_map_control_command * +rmnet_map_get_cmd_start(struct sk_buff *skb) +{ + unsigned char *data = rmnet_map_data_ptr(skb); + + data += sizeof(struct rmnet_map_header); + return (struct rmnet_map_control_command *)data; +} + +static inline u8 rmnet_map_get_next_hdr_type(struct sk_buff *skb) +{ + unsigned char *data = rmnet_map_data_ptr(skb); + + data += sizeof(struct rmnet_map_header); + return ((struct rmnet_map_v5_coal_header *)data)->header_type; +} + +static inline bool rmnet_map_get_csum_valid(struct sk_buff *skb) +{ + unsigned char *data = rmnet_map_data_ptr(skb); + + data += sizeof(struct rmnet_map_header); + return ((struct rmnet_map_v5_csum_header *)data)->csum_valid_required; +} + +static struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb, + struct rmnet_port *port); +static struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb, + int hdrlen, int pad, + struct rmnet_port *port); +static void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port); +static void rmnet_map_checksum_uplink_packet(struct sk_buff *skb, + struct net_device *orig_dev, + int csum_type); +static int rmnet_map_process_next_hdr_packet(struct sk_buff *skb, + struct sk_buff_head *list, + u16 len); +static int rmnet_map_tx_agg_skip(struct sk_buff *skb, int offset); +static void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port); +static void rmnet_map_tx_aggregate_init(struct rmnet_port *port); +static void rmnet_map_tx_aggregate_exit(struct rmnet_port *port); +static void rmnet_map_cmd_init(struct rmnet_port *port); +static void rmnet_map_cmd_exit(struct rmnet_port *port); +#endif /* _RMNET_MAP_H_ */ diff --git a/package/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_map_command.c b/package/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_map_command.c new file mode 100644 index 000000000..6c3318490 --- /dev/null +++ b/package/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_map_command.c @@ -0,0 +1,143 @@ +/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include "rmnet_config.h" +#include "rmnet_map.h" +#include "rmnet_private.h" +#include "rmnet_vnd.h" + +#define RMNET_DL_IND_HDR_SIZE (sizeof(struct rmnet_map_dl_ind_hdr) + \ + sizeof(struct rmnet_map_header) + \ + sizeof(struct rmnet_map_control_command_header)) + +#define RMNET_MAP_CMD_SIZE (sizeof(struct rmnet_map_header) + \ + sizeof(struct rmnet_map_control_command_header)) + +#define RMNET_DL_IND_TRL_SIZE (sizeof(struct rmnet_map_dl_ind_trl) + \ + sizeof(struct rmnet_map_header) + \ + sizeof(struct rmnet_map_control_command_header)) + +static u8 rmnet_map_do_flow_control(struct sk_buff *skb, + struct rmnet_port *port, + int enable) +{ + struct rmnet_map_header *qmap; + struct rmnet_map_control_command *cmd; + struct rmnet_endpoint *ep; + struct net_device *vnd; + u16 ip_family; + u16 fc_seq; + u32 qos_id; + u8 mux_id; + int r; + + qmap = (struct rmnet_map_header *)rmnet_map_data_ptr(skb); + mux_id = qmap->mux_id; + cmd = rmnet_map_get_cmd_start(skb); + + if (mux_id >= RMNET_MAX_LOGICAL_EP) { + kfree_skb(skb); + return RX_HANDLER_CONSUMED; + } + + ep = rmnet_get_endpoint(port, mux_id); + if (!ep) { + kfree_skb(skb); + return RX_HANDLER_CONSUMED; + } + + vnd = ep->egress_dev; + + ip_family = cmd->flow_control.ip_family; + fc_seq = ntohs(cmd->flow_control.flow_control_seq_num); + qos_id = ntohl(cmd->flow_control.qos_id); + + /* Ignore the ip family and pass the sequence number for both v4 and v6 + * sequence. User space does not support creating dedicated flows for + * the 2 protocols + */ + r = rmnet_vnd_do_flow_control(vnd, enable); + if (r) { + kfree_skb(skb); + return RMNET_MAP_COMMAND_UNSUPPORTED; + } else { + return RMNET_MAP_COMMAND_ACK; + } +} + +static void rmnet_map_send_ack(struct sk_buff *skb, + unsigned char type, + struct rmnet_port *port) +{ + struct rmnet_map_control_command *cmd; + struct net_device *dev = skb->dev; + + if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) + pskb_trim(skb, + skb->len - sizeof(struct rmnet_map_dl_csum_trailer)); + + skb->protocol = htons(ETH_P_MAP); + + cmd = rmnet_map_get_cmd_start(skb); + cmd->cmd_type = type & 0x03; + + netif_tx_lock(dev); + dev->netdev_ops->ndo_start_xmit(skb, dev); + netif_tx_unlock(dev); +} + +/* Process MAP command frame and send N/ACK message as appropriate. Message cmd + * name is decoded here and appropriate handler is called. + */ +static void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port) +{ + struct rmnet_map_control_command *cmd; + unsigned char command_name; + unsigned char rc = 0; + + cmd = rmnet_map_get_cmd_start(skb); + command_name = cmd->command_name; + + switch (command_name) { + case RMNET_MAP_COMMAND_FLOW_ENABLE: + rc = rmnet_map_do_flow_control(skb, port, 1); + break; + + case RMNET_MAP_COMMAND_FLOW_DISABLE: + rc = rmnet_map_do_flow_control(skb, port, 0); + break; + + default: + rc = RMNET_MAP_COMMAND_UNSUPPORTED; + kfree_skb(skb); + break; + } + if (rc == RMNET_MAP_COMMAND_ACK) + rmnet_map_send_ack(skb, rc, port); +} + + +static void rmnet_map_cmd_exit(struct rmnet_port *port) +{ + struct rmnet_map_dl_ind *tmp, *idx; + + list_for_each_entry_safe(tmp, idx, &port->dl_list, list) + list_del_rcu(&tmp->list); +} + +static void rmnet_map_cmd_init(struct rmnet_port *port) +{ + INIT_LIST_HEAD(&port->dl_list); + + port->dl_marker_flush = -1; +} diff --git a/package/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_map_data.c b/package/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_map_data.c new file mode 100644 index 000000000..783412c69 --- /dev/null +++ b/package/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_map_data.c @@ -0,0 +1,682 @@ +/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * RMNET Data MAP protocol + * + */ + +#include +#include +#include +#include +#include "rmnet_config.h" +#include "rmnet_map.h" +#include "rmnet_private.h" +#include "rmnet_handlers.h" + +#define RMNET_MAP_PKT_COPY_THRESHOLD 64 +#define RMNET_MAP_DEAGGR_SPACING 64 +#define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2) + +struct rmnet_map_coal_metadata { + void *ip_header; + void *trans_header; + u16 ip_len; + u16 trans_len; + u16 data_offset; + u16 data_len; + u8 ip_proto; + u8 trans_proto; + u8 pkt_id; + u8 pkt_count; +}; + +static __sum16 *rmnet_map_get_csum_field(unsigned char protocol, + const void *txporthdr) +{ + __sum16 *check = NULL; + + switch (protocol) { + case IPPROTO_TCP: + check = &(((struct tcphdr *)txporthdr)->check); + break; + + case IPPROTO_UDP: + check = &(((struct udphdr *)txporthdr)->check); + break; + + default: + check = NULL; + break; + } + + return check; +} + +static void rmnet_map_complement_ipv4_txporthdr_csum_field(void *iphdr) +{ + struct iphdr *ip4h = (struct iphdr *)iphdr; + void *txphdr; + u16 *csum; + + txphdr = iphdr + ip4h->ihl * 4; + + if (ip4h->protocol == IPPROTO_TCP || ip4h->protocol == IPPROTO_UDP) { + csum = (u16 *)rmnet_map_get_csum_field(ip4h->protocol, txphdr); + *csum = ~(*csum); + } +} + +static void +rmnet_map_ipv4_ul_csum_header(void *iphdr, + struct rmnet_map_ul_csum_header *ul_header, + struct sk_buff *skb) +{ + struct iphdr *ip4h = (struct iphdr *)iphdr; + __be16 *hdr = (__be16 *)ul_header, offset; + + offset = htons((__force u16)(skb_transport_header(skb) - + (unsigned char *)iphdr)); + ul_header->csum_start_offset = offset; + ul_header->csum_insert_offset = skb->csum_offset; + ul_header->csum_enabled = 1; + if (ip4h->protocol == IPPROTO_UDP) + ul_header->udp_ind = 1; + else + ul_header->udp_ind = 0; + + /* Changing remaining fields to network order */ + hdr++; + *hdr = htons((__force u16)*hdr); + + skb->ip_summed = CHECKSUM_NONE; + + rmnet_map_complement_ipv4_txporthdr_csum_field(iphdr); +} + +#if IS_ENABLED(CONFIG_IPV6) +static void rmnet_map_complement_ipv6_txporthdr_csum_field(void *ip6hdr) +{ + struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr; + void *txphdr; + u16 *csum; + + txphdr = ip6hdr + sizeof(struct ipv6hdr); + + if (ip6h->nexthdr == IPPROTO_TCP || ip6h->nexthdr == IPPROTO_UDP) { + csum = (u16 *)rmnet_map_get_csum_field(ip6h->nexthdr, txphdr); + *csum = ~(*csum); + } +} + +static void +rmnet_map_ipv6_ul_csum_header(void *ip6hdr, + struct rmnet_map_ul_csum_header *ul_header, + struct sk_buff *skb) +{ + struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr; + __be16 *hdr = (__be16 *)ul_header, offset; + + offset = htons((__force u16)(skb_transport_header(skb) - + (unsigned char *)ip6hdr)); + ul_header->csum_start_offset = offset; + ul_header->csum_insert_offset = skb->csum_offset; + ul_header->csum_enabled = 1; + + if (ip6h->nexthdr == IPPROTO_UDP) + ul_header->udp_ind = 1; + else + ul_header->udp_ind = 0; + + /* Changing remaining fields to network order */ + hdr++; + *hdr = htons((__force u16)*hdr); + + skb->ip_summed = CHECKSUM_NONE; + + rmnet_map_complement_ipv6_txporthdr_csum_field(ip6hdr); +} +#endif + +/* Adds MAP header to front of skb->data + * Padding is calculated and set appropriately in MAP header. Mux ID is + * initialized to 0. + */ +static struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb, + int hdrlen, int pad, + struct rmnet_port *port) +{ + struct rmnet_map_header *map_header; + u32 padding, map_datalen; + u8 *padbytes; + + map_datalen = skb->len - hdrlen; + map_header = (struct rmnet_map_header *) + skb_push(skb, sizeof(struct rmnet_map_header)); + memset(map_header, 0, sizeof(struct rmnet_map_header)); + + /* Set next_hdr bit for csum offload packets */ + if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV5) + map_header->next_hdr = 1; + + if (pad == RMNET_MAP_NO_PAD_BYTES) { + map_header->pkt_len = htons(map_datalen); + return map_header; + } + + padding = ALIGN(map_datalen, 4) - map_datalen; + + if (padding == 0) + goto done; + + if (skb_tailroom(skb) < padding) + return NULL; + + padbytes = (u8 *)skb_put(skb, padding); + memset(padbytes, 0, padding); + +done: + map_header->pkt_len = htons(map_datalen + padding); + map_header->pad_len = padding & 0x3F; + + return map_header; +} + +/* Deaggregates a single packet + * A whole new buffer is allocated for each portion of an aggregated frame. + * Caller should keep calling deaggregate() on the source skb until 0 is + * returned, indicating that there are no more packets to deaggregate. Caller + * is responsible for freeing the original skb. + */ +static struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb, + struct rmnet_port *port) +{ + struct rmnet_map_header *maph; + struct sk_buff *skbn; + unsigned char *data = rmnet_map_data_ptr(skb), *next_hdr = NULL; + u32 packet_len; + + if (skb->len == 0) + return NULL; + + maph = (struct rmnet_map_header *)data; + packet_len = ntohs(maph->pkt_len) + sizeof(struct rmnet_map_header); + + if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) + packet_len += sizeof(struct rmnet_map_dl_csum_trailer); + else if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV5) { + if (!maph->cd_bit) { + packet_len += sizeof(struct rmnet_map_v5_csum_header); + + /* Coalescing headers require MAPv5 */ + next_hdr = data + sizeof(*maph); + } + } + + if (((int)skb->len - (int)packet_len) < 0) + return NULL; + + /* Some hardware can send us empty frames. Catch them */ + if (ntohs(maph->pkt_len) == 0) + return NULL; + + if (next_hdr && + ((struct rmnet_map_v5_coal_header *)next_hdr)->header_type == + RMNET_MAP_HEADER_TYPE_COALESCING) + return skb; + + if (skb_is_nonlinear(skb)) { + skb_frag_t *frag0 = skb_shinfo(skb)->frags; + struct page *page = skb_frag_page(frag0); + + skbn = alloc_skb(RMNET_MAP_DEAGGR_HEADROOM, GFP_ATOMIC); + if (!skbn) + return NULL; + + skb_append_pagefrags(skbn, page, frag0->page_offset, + packet_len); + skbn->data_len += packet_len; + skbn->len += packet_len; + } else { + skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING, + GFP_ATOMIC); + if (!skbn) + return NULL; + + skb_reserve(skbn, RMNET_MAP_DEAGGR_HEADROOM); + skb_put(skbn, packet_len); + memcpy(skbn->data, data, packet_len); + } + + pskb_pull(skb, packet_len); + + return skbn; +} + +static void rmnet_map_v4_checksum_uplink_packet(struct sk_buff *skb, + struct net_device *orig_dev) +{ + struct rmnet_priv *priv = netdev_priv(orig_dev); + struct rmnet_map_ul_csum_header *ul_header; + void *iphdr; + + ul_header = (struct rmnet_map_ul_csum_header *) + skb_push(skb, sizeof(struct rmnet_map_ul_csum_header)); + + if (unlikely(!(orig_dev->features & + (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))) + goto sw_csum; + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + iphdr = (char *)ul_header + + sizeof(struct rmnet_map_ul_csum_header); + + if (skb->protocol == htons(ETH_P_IP)) { + rmnet_map_ipv4_ul_csum_header(iphdr, ul_header, skb); + priv->stats.csum_hw++; + return; + } else if (skb->protocol == htons(ETH_P_IPV6)) { +#if IS_ENABLED(CONFIG_IPV6) + rmnet_map_ipv6_ul_csum_header(iphdr, ul_header, skb); + priv->stats.csum_hw++; + return; +#else + priv->stats.csum_err_invalid_ip_version++; + goto sw_csum; +#endif + } else { + priv->stats.csum_err_invalid_ip_version++; + } + } + +sw_csum: + ul_header->csum_start_offset = 0; + ul_header->csum_insert_offset = 0; + ul_header->csum_enabled = 0; + ul_header->udp_ind = 0; + + priv->stats.csum_sw++; +} + +static void rmnet_map_v5_checksum_uplink_packet(struct sk_buff *skb, + struct net_device *orig_dev) +{ + struct rmnet_priv *priv = netdev_priv(orig_dev); + struct rmnet_map_v5_csum_header *ul_header; + + ul_header = (struct rmnet_map_v5_csum_header *) + skb_push(skb, sizeof(*ul_header)); + memset(ul_header, 0, sizeof(*ul_header)); + ul_header->header_type = RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD; + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + void *iph = (char *)ul_header + sizeof(*ul_header); + void *trans; + __sum16 *check; + u8 proto; + + if (skb->protocol == htons(ETH_P_IP)) { + u16 ip_len = ((struct iphdr *)iph)->ihl * 4; + + proto = ((struct iphdr *)iph)->protocol; + trans = iph + ip_len; + } else if (skb->protocol == htons(ETH_P_IPV6)) { + u16 ip_len = sizeof(struct ipv6hdr); + + proto = ((struct ipv6hdr *)iph)->nexthdr; + trans = iph + ip_len; + } else { + priv->stats.csum_err_invalid_ip_version++; + goto sw_csum; + } + + check = rmnet_map_get_csum_field(proto, trans); + if (check) { + *check = 0; + skb->ip_summed = CHECKSUM_NONE; + /* Ask for checksum offloading */ + ul_header->csum_valid_required = 1; + priv->stats.csum_hw++; + return; + } + } + +sw_csum: + priv->stats.csum_sw++; +} + + +/* Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP + * packets that are supported for UL checksum offload. + */ +void rmnet_map_checksum_uplink_packet(struct sk_buff *skb, + struct net_device *orig_dev, + int csum_type) +{ + switch (csum_type) { + case RMNET_FLAGS_EGRESS_MAP_CKSUMV4: + rmnet_map_v4_checksum_uplink_packet(skb, orig_dev); + break; + case RMNET_FLAGS_EGRESS_MAP_CKSUMV5: + rmnet_map_v5_checksum_uplink_packet(skb, orig_dev); + break; + default: + break; + } +} + +static void rmnet_map_move_headers(struct sk_buff *skb) +{ + struct iphdr *iph; + u16 ip_len; + u16 trans_len = 0; + u8 proto; + + /* This only applies to non-linear SKBs */ + if (!skb_is_nonlinear(skb)) + return; + + iph = (struct iphdr *)rmnet_map_data_ptr(skb); + if (iph->version == 4) { + ip_len = iph->ihl * 4; + proto = iph->protocol; + if (iph->frag_off & htons(IP_OFFSET)) + /* No transport header information */ + goto pull; + } else if (iph->version == 6) { + struct ipv6hdr *ip6h = (struct ipv6hdr *)iph; + __be16 frag_off; + u8 nexthdr = ip6h->nexthdr; + + ip_len = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr, + &frag_off); + if (ip_len < 0) + return; + + proto = nexthdr; + } else { + return; + } + + if (proto == IPPROTO_TCP) { + struct tcphdr *tp = (struct tcphdr *)((u8 *)iph + ip_len); + + trans_len = tp->doff * 4; + } else if (proto == IPPROTO_UDP) { + trans_len = sizeof(struct udphdr); + } else if (proto == NEXTHDR_FRAGMENT) { + /* Non-first fragments don't have the fragment length added by + * ipv6_skip_exthdr() and sho up as proto NEXTHDR_FRAGMENT, so + * we account for the length here. + */ + ip_len += sizeof(struct frag_hdr); + } + +pull: + __pskb_pull_tail(skb, ip_len + trans_len); + skb_reset_network_header(skb); + if (trans_len) + skb_set_transport_header(skb, ip_len); +} + + +/* Process a QMAPv5 packet header */ +static int rmnet_map_process_next_hdr_packet(struct sk_buff *skb, + struct sk_buff_head *list, + u16 len) +{ + struct rmnet_priv *priv = netdev_priv(skb->dev); + int rc = 0; + + switch (rmnet_map_get_next_hdr_type(skb)) { + case RMNET_MAP_HEADER_TYPE_COALESCING: + priv->stats.coal.coal_rx++; + break; + case RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD: + if (rmnet_map_get_csum_valid(skb)) { + priv->stats.csum_ok++; + skb->ip_summed = CHECKSUM_UNNECESSARY; + } else { + priv->stats.csum_valid_unset++; + } + + /* Pull unnecessary headers and move the rest to the linear + * section of the skb. + */ + pskb_pull(skb, + (sizeof(struct rmnet_map_header) + + sizeof(struct rmnet_map_v5_csum_header))); + rmnet_map_move_headers(skb); + + /* Remove padding only for csum offload packets. + * Coalesced packets should never have padding. + */ + pskb_trim(skb, len); + __skb_queue_tail(list, skb); + break; + default: + rc = -EINVAL; + break; + } + + return rc; +} + +long rmnet_agg_time_limit __read_mostly = 1000000L; +long rmnet_agg_bypass_time __read_mostly = 10000000L; + +static int rmnet_map_tx_agg_skip(struct sk_buff *skb, int offset) +{ + u8 *packet_start = skb->data + offset; + int is_icmp = 0; + + if (skb->protocol == htons(ETH_P_IP)) { + struct iphdr *ip4h = (struct iphdr *)(packet_start); + + if (ip4h->protocol == IPPROTO_ICMP) + is_icmp = 1; + } else if (skb->protocol == htons(ETH_P_IPV6)) { + struct ipv6hdr *ip6h = (struct ipv6hdr *)(packet_start); + + if (ip6h->nexthdr == IPPROTO_ICMPV6) { + is_icmp = 1; + } else if (ip6h->nexthdr == NEXTHDR_FRAGMENT) { + struct frag_hdr *frag; + + frag = (struct frag_hdr *)(packet_start + + sizeof(struct ipv6hdr)); + if (frag->nexthdr == IPPROTO_ICMPV6) + is_icmp = 1; + } + } + + return is_icmp; +} + +static void rmnet_map_flush_tx_packet_work(struct work_struct *work) +{ + struct sk_buff *skb = NULL; + struct rmnet_port *port; + unsigned long flags; + + port = container_of(work, struct rmnet_port, agg_wq); + + spin_lock_irqsave(&port->agg_lock, flags); + if (likely(port->agg_state == -EINPROGRESS)) { + /* Buffer may have already been shipped out */ + if (likely(port->agg_skb)) { + skb = port->agg_skb; + port->agg_skb = NULL; + port->agg_count = 0; + memset(&port->agg_time, 0, sizeof(struct timespec)); + } + port->agg_state = 0; + } + + spin_unlock_irqrestore(&port->agg_lock, flags); + if (skb) + dev_queue_xmit(skb); +} + +static enum hrtimer_restart rmnet_map_flush_tx_packet_queue(struct hrtimer *t) +{ + struct rmnet_port *port; + + port = container_of(t, struct rmnet_port, hrtimer); + + schedule_work(&port->agg_wq); + return HRTIMER_NORESTART; +} + +static void rmnet_map_linearize_copy(struct sk_buff *dst, struct sk_buff *src) +{ + unsigned int linear = src->len - src->data_len, target = src->len; + unsigned char *src_buf; + struct sk_buff *skb; + + src_buf = src->data; + skb_put_data(dst, src_buf, linear); + target -= linear; + + skb = src; + + while (target) { + unsigned int i = 0, non_linear = 0; + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + non_linear = skb_frag_size(&skb_shinfo(skb)->frags[i]); + src_buf = skb_frag_address(&skb_shinfo(skb)->frags[i]); + + skb_put_data(dst, src_buf, non_linear); + target -= non_linear; + } + + if (skb_shinfo(skb)->frag_list) { + skb = skb_shinfo(skb)->frag_list; + continue; + } + + if (skb->next) + skb = skb->next; + } +} + +static void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port) +{ + struct timespec diff, last; + int size, agg_count = 0; + struct sk_buff *agg_skb; + unsigned long flags; + +new_packet: + spin_lock_irqsave(&port->agg_lock, flags); + memcpy(&last, &port->agg_last, sizeof(struct timespec)); + getnstimeofday(&port->agg_last); + + if (!port->agg_skb) { + /* Check to see if we should agg first. If the traffic is very + * sparse, don't aggregate. We will need to tune this later + */ + diff = timespec_sub(port->agg_last, last); + size = port->egress_agg_params.agg_size - skb->len; + + if (diff.tv_sec > 0 || diff.tv_nsec > rmnet_agg_bypass_time || + size <= 0) { + spin_unlock_irqrestore(&port->agg_lock, flags); + skb->protocol = htons(ETH_P_MAP); + dev_queue_xmit(skb); + return; + } + + port->agg_skb = alloc_skb(port->egress_agg_params.agg_size, + GFP_ATOMIC); + if (!port->agg_skb) { + port->agg_skb = 0; + port->agg_count = 0; + memset(&port->agg_time, 0, sizeof(struct timespec)); + spin_unlock_irqrestore(&port->agg_lock, flags); + skb->protocol = htons(ETH_P_MAP); + dev_queue_xmit(skb); + return; + } + rmnet_map_linearize_copy(port->agg_skb, skb); + port->agg_skb->dev = skb->dev; + port->agg_skb->protocol = htons(ETH_P_MAP); + port->agg_count = 1; + getnstimeofday(&port->agg_time); + dev_kfree_skb_any(skb); + goto schedule; + } + diff = timespec_sub(port->agg_last, port->agg_time); + size = port->egress_agg_params.agg_size - port->agg_skb->len; + + if (skb->len > size || + port->agg_count >= port->egress_agg_params.agg_count || + diff.tv_sec > 0 || diff.tv_nsec > rmnet_agg_time_limit) { + agg_skb = port->agg_skb; + agg_count = port->agg_count; + port->agg_skb = 0; + port->agg_count = 0; + memset(&port->agg_time, 0, sizeof(struct timespec)); + port->agg_state = 0; + spin_unlock_irqrestore(&port->agg_lock, flags); + hrtimer_cancel(&port->hrtimer); + dev_queue_xmit(agg_skb); + goto new_packet; + } + + rmnet_map_linearize_copy(port->agg_skb, skb); + port->agg_count++; + dev_kfree_skb_any(skb); + +schedule: + if (port->agg_state != -EINPROGRESS) { + port->agg_state = -EINPROGRESS; + hrtimer_start(&port->hrtimer, + ns_to_ktime(port->egress_agg_params.agg_time), + HRTIMER_MODE_REL); + } + spin_unlock_irqrestore(&port->agg_lock, flags); +} + +static void rmnet_map_tx_aggregate_init(struct rmnet_port *port) +{ + hrtimer_init(&port->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + port->hrtimer.function = rmnet_map_flush_tx_packet_queue; + port->egress_agg_params.agg_size = 8192; + port->egress_agg_params.agg_count = 20; + port->egress_agg_params.agg_time = 3000000; + spin_lock_init(&port->agg_lock); + + INIT_WORK(&port->agg_wq, rmnet_map_flush_tx_packet_work); +} + +static void rmnet_map_tx_aggregate_exit(struct rmnet_port *port) +{ + unsigned long flags; + + hrtimer_cancel(&port->hrtimer); + cancel_work_sync(&port->agg_wq); + + spin_lock_irqsave(&port->agg_lock, flags); + if (port->agg_state == -EINPROGRESS) { + if (port->agg_skb) { + kfree_skb(port->agg_skb); + port->agg_skb = NULL; + port->agg_count = 0; + memset(&port->agg_time, 0, sizeof(struct timespec)); + } + + port->agg_state = 0; + } + + spin_unlock_irqrestore(&port->agg_lock, flags); +} diff --git a/package/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_private.h b/package/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_private.h new file mode 100644 index 000000000..d384b7b9a --- /dev/null +++ b/package/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_private.h @@ -0,0 +1,34 @@ +/* Copyright (c) 2013-2014, 2016-2019 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _RMNET_PRIVATE_H_ +#define _RMNET_PRIVATE_H_ + +#define RMNET_MAX_PACKET_SIZE 16384 +#define RMNET_DFLT_PACKET_SIZE 1500 +#define RMNET_NEEDED_HEADROOM 16 +#define RMNET_TX_QUEUE_LEN 1000 + +/* Constants */ +#define RMNET_EGRESS_FORMAT_AGGREGATION BIT(31) +#define RMNET_INGRESS_FORMAT_DL_MARKER_V1 BIT(30) +#define RMNET_INGRESS_FORMAT_DL_MARKER_V2 BIT(29) + +#define RMNET_INGRESS_FORMAT_DL_MARKER (RMNET_INGRESS_FORMAT_DL_MARKER_V1 |\ +RMNET_INGRESS_FORMAT_DL_MARKER_V2) + +/* Replace skb->dev to a virtual rmnet device and pass up the stack */ +#define RMNET_EPMODE_VND (1) +/* Pass the frame directly to another device with dev_queue_xmit() */ +#define RMNET_EPMODE_BRIDGE (2) + +#endif /* _RMNET_PRIVATE_H_ */ diff --git a/package/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_trace.h b/package/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_trace.h new file mode 100644 index 000000000..d453fc5d0 --- /dev/null +++ b/package/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_trace.h @@ -0,0 +1,257 @@ +/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM rmnet +#define TRACE_INCLUDE_FILE rmnet_trace + +#if !defined(_RMNET_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) +#define _RMNET_TRACE_H_ + +#include +#include +#include + +/*****************************************************************************/ +/* Trace events for rmnet module */ +/*****************************************************************************/ +DECLARE_EVENT_CLASS + (rmnet_mod_template, + + TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2, + u64 ulong1, u64 ulong2, void *ptr1, void *ptr2), + + TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2), + + TP_STRUCT__entry( + __field(u8, func) + __field(u8, evt) + __field(u32, uint1) + __field(u32, uint2) + __field(u64, ulong1) + __field(u64, ulong2) + __field(void *, ptr1) + __field(void *, ptr2) + ), + + TP_fast_assign( + __entry->func = func; + __entry->evt = evt; + __entry->uint1 = uint1; + __entry->uint2 = uint2; + __entry->ulong1 = ulong1; + __entry->ulong2 = ulong2; + __entry->ptr1 = ptr1; + __entry->ptr2 = ptr2; + ), + +TP_printk("fun:%u ev:%u u1:%u u2:%u ul1:%llu ul2:%llu p1:0x%pK p2:0x%pK", + __entry->func, __entry->evt, + __entry->uint1, __entry->uint2, + __entry->ulong1, __entry->ulong2, + __entry->ptr1, __entry->ptr2) +) + +DEFINE_EVENT + (rmnet_mod_template, rmnet_low, + + TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2, + u64 ulong1, u64 ulong2, void *ptr1, void *ptr2), + + TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2) + +); + +DEFINE_EVENT + (rmnet_mod_template, rmnet_high, + + TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2, + u64 ulong1, u64 ulong2, void *ptr1, void *ptr2), + + TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2) + +); + +DEFINE_EVENT + (rmnet_mod_template, rmnet_err, + + TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2, + u64 ulong1, u64 ulong2, void *ptr1, void *ptr2), + + TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2) + +); + +/*****************************************************************************/ +/* Trace events for rmnet_perf module */ +/*****************************************************************************/ +DEFINE_EVENT + (rmnet_mod_template, rmnet_perf_low, + + TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2, + u64 ulong1, u64 ulong2, void *ptr1, void *ptr2), + + TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2) + +); + +DEFINE_EVENT + (rmnet_mod_template, rmnet_perf_high, + + TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2, + u64 ulong1, u64 ulong2, void *ptr1, void *ptr2), + + TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2) + +); + +DEFINE_EVENT + (rmnet_mod_template, rmnet_perf_err, + + TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2, + u64 ulong1, u64 ulong2, void *ptr1, void *ptr2), + + TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2) + +); + +/*****************************************************************************/ +/* Trace events for rmnet_shs module */ +/*****************************************************************************/ +DEFINE_EVENT + (rmnet_mod_template, rmnet_shs_low, + + TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2, + u64 ulong1, u64 ulong2, void *ptr1, void *ptr2), + + TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2) + +); + +DEFINE_EVENT + (rmnet_mod_template, rmnet_shs_high, + + TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2, + u64 ulong1, u64 ulong2, void *ptr1, void *ptr2), + + TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2) + +); + +DEFINE_EVENT + (rmnet_mod_template, rmnet_shs_err, + + TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2, + u64 ulong1, u64 ulong2, void *ptr1, void *ptr2), + + TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2) + +); + +DEFINE_EVENT + (rmnet_mod_template, rmnet_shs_wq_low, + + TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2, + u64 ulong1, u64 ulong2, void *ptr1, void *ptr2), + + TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2) + +); + +DEFINE_EVENT + (rmnet_mod_template, rmnet_shs_wq_high, + + TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2, + u64 ulong1, u64 ulong2, void *ptr1, void *ptr2), + + TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2) + +); + +DEFINE_EVENT + (rmnet_mod_template, rmnet_shs_wq_err, + + TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2, + u64 ulong1, u64 ulong2, void *ptr1, void *ptr2), + + TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2) + +); + +DECLARE_EVENT_CLASS + (rmnet_freq_template, + + TP_PROTO(u8 core, u32 newfreq), + + TP_ARGS(core, newfreq), + + TP_STRUCT__entry( + __field(u8, core) + __field(u32, newfreq) + ), + + TP_fast_assign( + __entry->core = core; + __entry->newfreq = newfreq; + + ), + +TP_printk("freq policy core:%u freq floor :%u", + __entry->core, __entry->newfreq) + +); + +DEFINE_EVENT + (rmnet_freq_template, rmnet_freq_boost, + + TP_PROTO(u8 core, u32 newfreq), + + TP_ARGS(core, newfreq) +); + +DEFINE_EVENT + (rmnet_freq_template, rmnet_freq_reset, + + TP_PROTO(u8 core, u32 newfreq), + + TP_ARGS(core, newfreq) +); + +TRACE_EVENT + (rmnet_freq_update, + + TP_PROTO(u8 core, u32 lowfreq, u32 highfreq), + + TP_ARGS(core, lowfreq, highfreq), + + TP_STRUCT__entry( + __field(u8, core) + __field(u32, lowfreq) + __field(u32, highfreq) + ), + + TP_fast_assign( + __entry->core = core; + __entry->lowfreq = lowfreq; + __entry->highfreq = highfreq; + + ), + +TP_printk("freq policy update core:%u policy freq floor :%u freq ceil :%u", + __entry->core, __entry->lowfreq, __entry->highfreq) +); +#endif /* _RMNET_TRACE_H_ */ + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH ../drivers/net/ethernet/qualcomm/rmnet +#include diff --git a/package/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_vnd.c b/package/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_vnd.c new file mode 100644 index 000000000..4ef645daf --- /dev/null +++ b/package/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_vnd.c @@ -0,0 +1,382 @@ +/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * + * RMNET Data virtual network driver + * + */ + +#include +#include +#include +#include +#include "rmnet_config.h" +#include "rmnet_handlers.h" +#include "rmnet_private.h" +#include "rmnet_map.h" +#include "rmnet_vnd.h" + +/* RX/TX Fixup */ + +static void rmnet_vnd_rx_fixup(struct net_device *dev, u32 skb_len) +{ + struct rmnet_priv *priv = netdev_priv(dev); + struct rmnet_pcpu_stats *pcpu_ptr; + + pcpu_ptr = this_cpu_ptr(priv->pcpu_stats); + + u64_stats_update_begin(&pcpu_ptr->syncp); + pcpu_ptr->stats.rx_pkts++; + pcpu_ptr->stats.rx_bytes += skb_len; + u64_stats_update_end(&pcpu_ptr->syncp); +} + +static void rmnet_vnd_tx_fixup(struct net_device *dev, u32 skb_len) +{ + struct rmnet_priv *priv = netdev_priv(dev); + struct rmnet_pcpu_stats *pcpu_ptr; + + pcpu_ptr = this_cpu_ptr(priv->pcpu_stats); + + u64_stats_update_begin(&pcpu_ptr->syncp); + pcpu_ptr->stats.tx_pkts++; + pcpu_ptr->stats.tx_bytes += skb_len; + u64_stats_update_end(&pcpu_ptr->syncp); +} + +/* Network Device Operations */ + +static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct rmnet_priv *priv; + + priv = netdev_priv(dev); + if (priv->real_dev) { + rmnet_egress_handler(skb); + } else { + this_cpu_inc(priv->pcpu_stats->stats.tx_drops); + kfree_skb(skb); + } + return NETDEV_TX_OK; +} + +static int rmnet_vnd_change_mtu(struct net_device *rmnet_dev, int new_mtu) +{ + if (new_mtu < 0 || new_mtu > RMNET_MAX_PACKET_SIZE) + return -EINVAL; + + rmnet_dev->mtu = new_mtu; + return 0; +} + +static int rmnet_vnd_get_iflink(const struct net_device *dev) +{ + struct rmnet_priv *priv = netdev_priv(dev); + + return priv->real_dev->ifindex; +} + +static int rmnet_vnd_init(struct net_device *dev) +{ + struct rmnet_priv *priv = netdev_priv(dev); + int err; + + priv->pcpu_stats = alloc_percpu(struct rmnet_pcpu_stats); + if (!priv->pcpu_stats) + return -ENOMEM; + + err = gro_cells_init(&priv->gro_cells, dev); + if (err) { + free_percpu(priv->pcpu_stats); + return err; + } + + return 0; +} + +static void rmnet_vnd_uninit(struct net_device *dev) +{ + struct rmnet_priv *priv = netdev_priv(dev); + + gro_cells_destroy(&priv->gro_cells); + free_percpu(priv->pcpu_stats); +} + +static struct rtnl_link_stats64* rmnet_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *s) +{ + struct rmnet_priv *priv = netdev_priv(dev); + struct rmnet_vnd_stats total_stats; + struct rmnet_pcpu_stats *pcpu_ptr; + unsigned int cpu, start; + + memset(&total_stats, 0, sizeof(struct rmnet_vnd_stats)); + + for_each_possible_cpu(cpu) { + pcpu_ptr = per_cpu_ptr(priv->pcpu_stats, cpu); + + do { + start = u64_stats_fetch_begin_irq(&pcpu_ptr->syncp); + total_stats.rx_pkts += pcpu_ptr->stats.rx_pkts; + total_stats.rx_bytes += pcpu_ptr->stats.rx_bytes; + total_stats.tx_pkts += pcpu_ptr->stats.tx_pkts; + total_stats.tx_bytes += pcpu_ptr->stats.tx_bytes; + } while (u64_stats_fetch_retry_irq(&pcpu_ptr->syncp, start)); + + total_stats.tx_drops += pcpu_ptr->stats.tx_drops; + } + + s->rx_packets = total_stats.rx_pkts; + s->rx_bytes = total_stats.rx_bytes; + s->tx_packets = total_stats.tx_pkts; + s->tx_bytes = total_stats.tx_bytes; + s->tx_dropped = total_stats.tx_drops; + + return s; +} + +static const struct net_device_ops rmnet_vnd_ops = { + .ndo_start_xmit = rmnet_vnd_start_xmit, + .ndo_change_mtu = rmnet_vnd_change_mtu, + .ndo_get_iflink = rmnet_vnd_get_iflink, + //.ndo_add_slave = rmnet_add_bridge, + //.ndo_del_slave = rmnet_del_bridge, + .ndo_init = rmnet_vnd_init, + .ndo_uninit = rmnet_vnd_uninit, + .ndo_get_stats64 = rmnet_get_stats64, +}; + +static const char rmnet_gstrings_stats[][ETH_GSTRING_LEN] = { + "Checksum ok", + "Checksum valid bit not set", + "Checksum validation failed", + "Checksum error bad buffer", + "Checksum error bad ip version", + "Checksum error bad transport", + "Checksum skipped on ip fragment", + "Checksum skipped", + "Checksum computed in software", + "Checksum computed in hardware", + "Coalescing packets received", + "Coalesced packets", + "Coalescing header NLO errors", + "Coalescing header pcount errors", + "Coalescing checksum errors", + "Coalescing packet reconstructs", + "Coalescing IP version invalid", + "Coalescing L4 header invalid", + "Coalescing close Non-coalescable", + "Coalescing close L3 mismatch", + "Coalescing close L4 mismatch", + "Coalescing close HW NLO limit", + "Coalescing close HW packet limit", + "Coalescing close HW byte limit", + "Coalescing close HW time limit", + "Coalescing close HW eviction", + "Coalescing close Coalescable", + "Coalescing packets over VEID0", + "Coalescing packets over VEID1", + "Coalescing packets over VEID2", + "Coalescing packets over VEID3", +}; + +static const char rmnet_port_gstrings_stats[][ETH_GSTRING_LEN] = { + "MAP Cmd last version", + "MAP Cmd last ep id", + "MAP Cmd last transaction id", + "DL header last seen sequence", + "DL header last seen bytes", + "DL header last seen packets", + "DL header last seen flows", + "DL header pkts received", + "DL header total bytes received", + "DL header total pkts received", + "DL trailer last seen sequence", + "DL trailer pkts received", +}; + +static void rmnet_get_strings(struct net_device *dev, u32 stringset, u8 *buf) +{ + switch (stringset) { + case ETH_SS_STATS: + memcpy(buf, &rmnet_gstrings_stats, + sizeof(rmnet_gstrings_stats)); + memcpy(buf + sizeof(rmnet_gstrings_stats), + &rmnet_port_gstrings_stats, + sizeof(rmnet_port_gstrings_stats)); + break; + } +} + +static int rmnet_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(rmnet_gstrings_stats) + + ARRAY_SIZE(rmnet_port_gstrings_stats); + default: + return -EOPNOTSUPP; + } +} + +static void rmnet_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct rmnet_priv *priv = netdev_priv(dev); + struct rmnet_priv_stats *st = &priv->stats; + struct rmnet_port_priv_stats *stp; + struct rmnet_port *port; + + port = rmnet_get_port(priv->real_dev); + + if (!data || !port) + return; + + stp = &port->stats; + + memcpy(data, st, ARRAY_SIZE(rmnet_gstrings_stats) * sizeof(u64)); + memcpy(data + ARRAY_SIZE(rmnet_gstrings_stats), stp, + ARRAY_SIZE(rmnet_port_gstrings_stats) * sizeof(u64)); +} + +static int rmnet_stats_reset(struct net_device *dev) +{ + struct rmnet_priv *priv = netdev_priv(dev); + struct rmnet_port_priv_stats *stp; + struct rmnet_port *port; + + port = rmnet_get_port(priv->real_dev); + if (!port) + return -EINVAL; + + stp = &port->stats; + + memset(stp, 0, sizeof(*stp)); + return 0; +} + +static const struct ethtool_ops rmnet_ethtool_ops = { + .get_ethtool_stats = rmnet_get_ethtool_stats, + .get_strings = rmnet_get_strings, + .get_sset_count = rmnet_get_sset_count, + .nway_reset = rmnet_stats_reset, +}; + +/* Called by kernel whenever a new rmnet device is created. Sets MTU, + * flags, ARP type, needed headroom, etc... + */ +void rmnet_vnd_setup(struct net_device *rmnet_dev) +{ + rmnet_dev->netdev_ops = &rmnet_vnd_ops; + rmnet_dev->mtu = RMNET_DFLT_PACKET_SIZE; + rmnet_dev->needed_headroom = RMNET_NEEDED_HEADROOM; + random_ether_addr(rmnet_dev->dev_addr); + rmnet_dev->tx_queue_len = RMNET_TX_QUEUE_LEN; + + /* Raw IP mode */ + rmnet_dev->header_ops = NULL; /* No header */ + rmnet_dev->type = ARPHRD_RAWIP; + rmnet_dev->hard_header_len = 0; + rmnet_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST); + + //rmnet_dev->needs_free_netdev = true; + + rmnet_dev->hw_features = NETIF_F_RXCSUM; + rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + //rmnet_dev->hw_features |= NETIF_F_SG; + //rmnet_dev->hw_features |= NETIF_F_GRO_HW; +} + +/* Exposed API */ + +static int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev, + struct rmnet_port *port, + struct net_device *real_dev, + struct rmnet_endpoint *ep) +{ + struct rmnet_priv *priv = netdev_priv(rmnet_dev); + struct rmnet_nss_cb *nss_cb; + int rc; + + if (ep->egress_dev) + return -EINVAL; + + if (rmnet_get_endpoint(port, id)) + return -EBUSY; + + rmnet_dev->hw_features = NETIF_F_RXCSUM; + rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + rmnet_dev->hw_features |= NETIF_F_SG; + + priv->real_dev = real_dev; + + rc = register_netdevice(rmnet_dev); + if (!rc) { + ep->egress_dev = rmnet_dev; + ep->mux_id = id; + port->nr_rmnet_devs++; + + //rmnet_dev->rtnl_link_ops = &rmnet_link_ops; + + priv->mux_id = id; + + netdev_dbg(rmnet_dev, "rmnet dev created\n"); + } + + nss_cb = rcu_dereference(rmnet_nss_callbacks); + if (nss_cb) { + rc = nss_cb->nss_create(rmnet_dev); + if (rc) { + /* Log, but don't fail the device creation */ + netdev_err(rmnet_dev, "Device will not use NSS path: %d\n", rc); + rc = 0; + } else { + netdev_dbg(rmnet_dev, "NSS context created\n"); + } + } + + return rc; +} + +static int rmnet_vnd_dellink(u8 id, struct rmnet_port *port, + struct rmnet_endpoint *ep) +{ + struct rmnet_nss_cb *nss_cb; + + if (id >= RMNET_MAX_LOGICAL_EP || !ep->egress_dev) + return -EINVAL; + + if (ep->egress_dev) { + nss_cb = rcu_dereference(rmnet_nss_callbacks); + if (nss_cb) + nss_cb->nss_free(ep->egress_dev); + } + ep->egress_dev = NULL; + port->nr_rmnet_devs--; + + return 0; +} + +static int rmnet_vnd_do_flow_control(struct net_device *rmnet_dev, int enable) +{ + netdev_dbg(rmnet_dev, "Setting VND TX queue state to %d\n", enable); + /* Although we expect similar number of enable/disable + * commands, optimize for the disable. That is more + * latency sensitive than enable + */ + if (unlikely(enable)) + netif_wake_queue(rmnet_dev); + else + netif_stop_queue(rmnet_dev); + + return 0; +} diff --git a/package/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_vnd.h b/package/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_vnd.h new file mode 100644 index 000000000..f8a65a953 --- /dev/null +++ b/package/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_vnd.h @@ -0,0 +1,29 @@ +/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * RMNET Data Virtual Network Device APIs + * + */ + +#ifndef _RMNET_VND_H_ +#define _RMNET_VND_H_ + +static int rmnet_vnd_do_flow_control(struct net_device *dev, int enable); +static int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev, + struct rmnet_port *port, + struct net_device *real_dev, + struct rmnet_endpoint *ep); +static int rmnet_vnd_dellink(u8 id, struct rmnet_port *port, + struct rmnet_endpoint *ep); +static void rmnet_vnd_rx_fixup(struct net_device *dev, u32 skb_len); +static void rmnet_vnd_tx_fixup(struct net_device *dev, u32 skb_len); +static void rmnet_vnd_setup(struct net_device *dev); +#endif /* _RMNET_VND_H_ */ diff --git a/package/wwan/driver/quectel_MHI/src/devices/rmnet_handler.c b/package/wwan/driver/quectel_MHI/src/devices/rmnet_handler.c new file mode 100644 index 000000000..b10026292 --- /dev/null +++ b/package/wwan/driver/quectel_MHI/src/devices/rmnet_handler.c @@ -0,0 +1,1129 @@ +#if 0 + +#define RMNET_MAX_PACKET_SIZE 16384 +#define RMNET_DFLT_PACKET_SIZE 1500 +#define RMNET_NEEDED_HEADROOM 16 +#define RMNET_TX_QUEUE_LEN 1000 + +#define RMNET_MAX_LOGICAL_EP 255 +#define RMNET_MAP_DESC_HEADROOM 128 +#define RMNET_FRAG_DESCRIPTOR_POOL_SIZE 64 + +/* Pass the frame up the stack with no modifications to skb->dev */ +#define RMNET_EPMODE_NONE (0) +/* Replace skb->dev to a virtual rmnet device and pass up the stack */ +#define RMNET_EPMODE_VND (1) +/* Pass the frame directly to another device with dev_queue_xmit() */ +#define RMNET_EPMODE_BRIDGE (2) + +/* rmnet section */ + +#define RMNET_FLAGS_INGRESS_DEAGGREGATION (1U << 0) +#define RMNET_FLAGS_INGRESS_MAP_COMMANDS (1U << 1) +#define RMNET_FLAGS_INGRESS_MAP_CKSUMV4 (1U << 2) +#define RMNET_FLAGS_EGRESS_MAP_CKSUMV4 (1U << 3) +#define RMNET_FLAGS_INGRESS_COALESCE (1U << 4) +#define RMNET_FLAGS_INGRESS_MAP_CKSUMV5 (1U << 5) +#define RMNET_FLAGS_EGRESS_MAP_CKSUMV5 (1U << 6) + +enum rmnet_map_v5_header_type { + RMNET_MAP_HEADER_TYPE_UNKNOWN, + RMNET_MAP_HEADER_TYPE_COALESCING = 0x1, + RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD = 0x2, + RMNET_MAP_HEADER_TYPE_ENUM_LENGTH +}; + +/* Main QMAP header */ +struct rmnet_map_header { + u8 pad_len:6; + u8 next_hdr:1; + u8 cd_bit:1; + u8 mux_id; + __be16 pkt_len; +} __aligned(1); + +/* QMAP v5 headers */ +struct rmnet_map_v5_csum_header { + u8 next_hdr:1; + u8 header_type:7; + u8 hw_reserved:7; + u8 csum_valid_required:1; + __be16 reserved; +} __aligned(1); + +struct rmnet_map_v5_nl_pair { + __be16 pkt_len; + u8 csum_error_bitmap; + u8 num_packets; +} __aligned(1); + +/* NLO: Number-length object */ +#define RMNET_MAP_V5_MAX_NLOS (6) +#define RMNET_MAP_V5_MAX_PACKETS (48) + +struct rmnet_map_v5_coal_header { + u8 next_hdr:1; + u8 header_type:7; + u8 reserved1:4; + u8 num_nlos:3; + u8 csum_valid:1; + u8 close_type:4; + u8 close_value:4; + u8 reserved2:4; + u8 virtual_channel_id:4; + + struct rmnet_map_v5_nl_pair nl_pairs[RMNET_MAP_V5_MAX_NLOS]; +} __aligned(1); + +/* QMAP v4 headers */ +struct rmnet_map_dl_csum_trailer { + u8 reserved1; + u8 valid:1; + u8 reserved2:7; + u16 csum_start_offset; + u16 csum_length; + __be16 csum_value; +} __aligned(1); + +struct rmnet_frag_descriptor_pool { + struct list_head free_list; + u32 pool_size; +}; + +struct rmnet_frag_descriptor { + struct list_head list; + struct list_head sub_frags; + skb_frag_t frag; + u8 *hdr_ptr; + struct net_device *dev; + u32 hash; + __be32 tcp_seq; + __be16 ip_id; + u16 data_offset; + u16 gso_size; + u16 gso_segs; + u16 ip_len; + u16 trans_len; + u8 ip_proto; + u8 trans_proto; + u8 pkt_id; + u8 csum_valid:1, + hdrs_valid:1, + ip_id_set:1, + tcp_seq_set:1, + flush_shs:1, + reserved:3; +}; + +struct rmnet_endpoint { + u8 rmnet_mode; + u8 mux_id; + struct net_device *rmnet_dev; +}; + +/* One instance of this structure is instantiated for each real_dev associated + * with rmnet. + */ +struct rmnet_port { + struct net_device *dev; + u8 rmnet_mode; + u32 data_format; + u32 nr_rmnet_devs; + struct rmnet_endpoint muxed_ep[16]; + + /* Descriptor pool */ + spinlock_t desc_pool_lock; + struct rmnet_frag_descriptor_pool *frag_desc_pool; + struct sk_buff *chain_head; + struct sk_buff *chain_tail; +}; + +static struct sk_buff * add_qhdr_v5(struct sk_buff *skb, u8 mux_id) +{ + struct rmnet_map_header *map_header; + struct rmnet_map_v5_csum_header *ul_header; + u32 padding, map_datalen; + + map_datalen = skb->len; + padding = map_datalen%4; + if (padding) { + padding = 4 - padding; + if (skb_tailroom(skb) < padding) { + printk("skb_tailroom small!\n"); + padding = 0; + } + if (padding) + __skb_put(skb, padding); + } + + map_header = (struct rmnet_map_header *)skb_push(skb, (sizeof(struct rmnet_map_header) + sizeof(struct rmnet_map_v5_csum_header))); + + BUILD_BUG_ON((sizeof(struct rmnet_map_header) + sizeof(struct rmnet_map_v5_csum_header)) != 8); + + map_header->cd_bit = 0; + map_header->next_hdr = 1; + map_header->pad_len = padding; + map_header->mux_id = mux_id; + map_header->pkt_len = htons(map_datalen + padding); + + ul_header = (struct rmnet_map_v5_csum_header *)(map_header + 1); + memset(ul_header, 0, sizeof(*ul_header)); + ul_header->header_type = RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD; + + return skb; +} + +struct rmnet_endpoint *rmnet_get_endpoint(struct rmnet_port *port, u8 mux_id) +{ + return &port->muxed_ep[0]; +} + +static void +rmnet_deliver_skb(struct sk_buff *skb, struct rmnet_port *port) +{ + struct rmnet_nss_cb *nss_cb; + + //rmnet_vnd_rx_fixup(skb->dev, skb->len); + + /* Pass off the packet to NSS driver if we can */ + nss_cb = rcu_dereference(rmnet_nss_callbacks); + if (nss_cb) { + if (!port->chain_head) + port->chain_head = skb; + else + skb_shinfo(port->chain_tail)->frag_list = skb; + + port->chain_tail = skb; + return; + } + + skb_reset_transport_header(skb); + skb_reset_network_header(skb); + + skb->pkt_type = PACKET_HOST; + skb_set_mac_header(skb, 0); + + //if (port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER) { + //} else { + //if (!rmnet_check_skb_can_gro(skb)) + // gro_cells_receive(&priv->gro_cells, skb); + //else + netif_receive_skb(skb); + //} +} + +static inline unsigned char *rmnet_map_data_ptr(struct sk_buff *skb) +{ + /* Nonlinear packets we receive are entirely within frag 0 */ + if (skb_is_nonlinear(skb) && skb->len == skb->data_len) + return skb_frag_address(skb_shinfo(skb)->frags); + + return skb->data; +} + +static inline void *rmnet_frag_data_ptr(struct rmnet_frag_descriptor *frag_desc) +{ + return skb_frag_address(&frag_desc->frag); +} + +static struct rmnet_frag_descriptor * +rmnet_get_frag_descriptor(struct rmnet_port *port) +{ + struct rmnet_frag_descriptor_pool *pool = port->frag_desc_pool; + struct rmnet_frag_descriptor *frag_desc; + + spin_lock(&port->desc_pool_lock); + if (!list_empty(&pool->free_list)) { + frag_desc = list_first_entry(&pool->free_list, + struct rmnet_frag_descriptor, + list); + list_del_init(&frag_desc->list); + } else { + frag_desc = kzalloc(sizeof(*frag_desc), GFP_ATOMIC); + if (!frag_desc) + goto out; + + INIT_LIST_HEAD(&frag_desc->list); + INIT_LIST_HEAD(&frag_desc->sub_frags); + pool->pool_size++; + } + +out: + spin_unlock(&port->desc_pool_lock); + return frag_desc; +} + +static void rmnet_recycle_frag_descriptor(struct rmnet_frag_descriptor *frag_desc, + struct rmnet_port *port) +{ + struct rmnet_frag_descriptor_pool *pool = port->frag_desc_pool; + struct page *page = skb_frag_page(&frag_desc->frag); + + list_del(&frag_desc->list); + if (page) + put_page(page); + + memset(frag_desc, 0, sizeof(*frag_desc)); + INIT_LIST_HEAD(&frag_desc->list); + INIT_LIST_HEAD(&frag_desc->sub_frags); + spin_lock(&port->desc_pool_lock); + list_add_tail(&frag_desc->list, &pool->free_list); + spin_unlock(&port->desc_pool_lock); +} + +static inline void rmnet_frag_fill(struct rmnet_frag_descriptor *frag_desc, + struct page *p, u32 page_offset, u32 len) +{ + get_page(p); + __skb_frag_set_page(&frag_desc->frag, p); + skb_frag_size_set(&frag_desc->frag, len); + frag_desc->frag.page_offset = page_offset; +} + +static inline void *rmnet_frag_pull(struct rmnet_frag_descriptor *frag_desc, + struct rmnet_port *port, + unsigned int size) +{ + if (size >= skb_frag_size(&frag_desc->frag)) { + pr_info("%s(): Pulling %u bytes from %u byte pkt. Dropping\n", + __func__, size, skb_frag_size(&frag_desc->frag)); + rmnet_recycle_frag_descriptor(frag_desc, port); + return NULL; + } + + frag_desc->frag.page_offset += size; + skb_frag_size_sub(&frag_desc->frag, size); + + return rmnet_frag_data_ptr(frag_desc); +} + +static inline void *rmnet_frag_trim(struct rmnet_frag_descriptor *frag_desc, + struct rmnet_port *port, + unsigned int size) +{ + if (!size) { + pr_info("%s(): Trimming %u byte pkt to 0. Dropping\n", + __func__, skb_frag_size(&frag_desc->frag)); + rmnet_recycle_frag_descriptor(frag_desc, port); + return NULL; + } + + if (size < skb_frag_size(&frag_desc->frag)) + skb_frag_size_set(&frag_desc->frag, size); + + return rmnet_frag_data_ptr(frag_desc); +} + +static inline u8 +rmnet_frag_get_next_hdr_type(struct rmnet_frag_descriptor *frag_desc) +{ + unsigned char *data = rmnet_frag_data_ptr(frag_desc); + + data += sizeof(struct rmnet_map_header); + return ((struct rmnet_map_v5_coal_header *)data)->header_type; +} + +static inline bool +rmnet_frag_get_csum_valid(struct rmnet_frag_descriptor *frag_desc) +{ + unsigned char *data = rmnet_frag_data_ptr(frag_desc); + + data += sizeof(struct rmnet_map_header); + return ((struct rmnet_map_v5_csum_header *)data)->csum_valid_required; +} + +static void rmnet_descriptor_add_frag(struct rmnet_port *port, struct list_head *list, + struct page *p, u32 page_offset, u32 len) +{ + struct rmnet_frag_descriptor *frag_desc; + + frag_desc = rmnet_get_frag_descriptor(port); + if (!frag_desc) + return; + + rmnet_frag_fill(frag_desc, p, page_offset, len); + list_add_tail(&frag_desc->list, list); +} + +static void rmnet_frag_deaggregate(skb_frag_t *frag, struct rmnet_port *port, + struct list_head *list) +{ + struct rmnet_map_header *maph; + u8 *data = skb_frag_address(frag); + u32 offset = 0; + u32 packet_len; + + while (offset < skb_frag_size(frag)) { + maph = (struct rmnet_map_header *)data; + packet_len = ntohs(maph->pkt_len); + + /* Some hardware can send us empty frames. Catch them */ + if (packet_len == 0) + return; + + packet_len += sizeof(*maph); + + if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) { + packet_len += sizeof(struct rmnet_map_dl_csum_trailer); + WARN_ON(1); + } else if (port->data_format & + (RMNET_FLAGS_INGRESS_MAP_CKSUMV5 | + RMNET_FLAGS_INGRESS_COALESCE) && !maph->cd_bit) { + u32 hsize = 0; + u8 type; + + type = ((struct rmnet_map_v5_coal_header *) + (data + sizeof(*maph)))->header_type; + switch (type) { + case RMNET_MAP_HEADER_TYPE_COALESCING: + hsize = sizeof(struct rmnet_map_v5_coal_header); + break; + case RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD: + hsize = sizeof(struct rmnet_map_v5_csum_header); + break; + } + + packet_len += hsize; + } + else { + qmap_hex_dump(__func__, data, 64); + WARN_ON(1); + } + + if ((int)skb_frag_size(frag) - (int)packet_len < 0) + return; + + rmnet_descriptor_add_frag(port, list, skb_frag_page(frag), + frag->page_offset + offset, + packet_len); + + offset += packet_len; + data += packet_len; + } +} + + +#define RMNET_IP_VERSION_4 0x40 +#define RMNET_IP_VERSION_6 0x60 + +/* Helper Functions */ + +static void rmnet_set_skb_proto(struct sk_buff *skb) +{ + switch (rmnet_map_data_ptr(skb)[0] & 0xF0) { + case RMNET_IP_VERSION_4: + skb->protocol = htons(ETH_P_IP); + break; + case RMNET_IP_VERSION_6: + skb->protocol = htons(ETH_P_IPV6); + break; + default: + skb->protocol = htons(ETH_P_MAP); + WARN_ON(1); + break; + } +} + +/* Allocate and populate an skb to contain the packet represented by the + * frag descriptor. + */ +static struct sk_buff *rmnet_alloc_skb(struct rmnet_frag_descriptor *frag_desc, + struct rmnet_port *port) +{ + struct sk_buff *head_skb, *current_skb, *skb; + struct skb_shared_info *shinfo; + struct rmnet_frag_descriptor *sub_frag, *tmp; + + /* Use the exact sizes if we know them (i.e. RSB/RSC, rmnet_perf) */ + if (frag_desc->hdrs_valid) { + u16 hdr_len = frag_desc->ip_len + frag_desc->trans_len; + + head_skb = alloc_skb(hdr_len + RMNET_MAP_DESC_HEADROOM, + GFP_ATOMIC); + if (!head_skb) + return NULL; + + skb_reserve(head_skb, RMNET_MAP_DESC_HEADROOM); + skb_put_data(head_skb, frag_desc->hdr_ptr, hdr_len); + skb_reset_network_header(head_skb); + + if (frag_desc->trans_len) + skb_set_transport_header(head_skb, frag_desc->ip_len); + + /* Packets that have no data portion don't need any frags */ + if (hdr_len == skb_frag_size(&frag_desc->frag)) + goto skip_frags; + + /* If the headers we added are the start of the page, + * we don't want to add them twice + */ + if (frag_desc->hdr_ptr == rmnet_frag_data_ptr(frag_desc)) { + if (!rmnet_frag_pull(frag_desc, port, hdr_len)) { + kfree_skb(head_skb); + return NULL; + } + } + } else { + /* Allocate enough space to avoid penalties in the stack + * from __pskb_pull_tail() + */ + head_skb = alloc_skb(256 + RMNET_MAP_DESC_HEADROOM, + GFP_ATOMIC); + if (!head_skb) + return NULL; + + skb_reserve(head_skb, RMNET_MAP_DESC_HEADROOM); + } + + /* Add main fragment */ + get_page(skb_frag_page(&frag_desc->frag)); + skb_add_rx_frag(head_skb, 0, skb_frag_page(&frag_desc->frag), + frag_desc->frag.page_offset, + skb_frag_size(&frag_desc->frag), + skb_frag_size(&frag_desc->frag)); + + shinfo = skb_shinfo(head_skb); + current_skb = head_skb; + + /* Add in any frags from rmnet_perf */ + list_for_each_entry_safe(sub_frag, tmp, &frag_desc->sub_frags, list) { + skb_frag_t *frag; + u32 frag_size; + + frag = &sub_frag->frag; + frag_size = skb_frag_size(frag); + +add_frag: + if (shinfo->nr_frags < MAX_SKB_FRAGS) { + get_page(skb_frag_page(frag)); + skb_add_rx_frag(current_skb, shinfo->nr_frags, + skb_frag_page(frag), frag->page_offset, + frag_size, frag_size); + if (current_skb != head_skb) { + head_skb->len += frag_size; + head_skb->data_len += frag_size; + } + } else { + /* Alloc a new skb and try again */ + skb = alloc_skb(0, GFP_ATOMIC); + if (!skb) + break; + + if (current_skb == head_skb) + shinfo->frag_list = skb; + else + current_skb->next = skb; + + current_skb = skb; + shinfo = skb_shinfo(current_skb); + goto add_frag; + } + + rmnet_recycle_frag_descriptor(sub_frag, port); + } + +skip_frags: + head_skb->dev = frag_desc->dev; + rmnet_set_skb_proto(head_skb); + + /* Handle any header metadata that needs to be updated after RSB/RSC + * segmentation + */ + if (frag_desc->ip_id_set) { + struct iphdr *iph; + + iph = (struct iphdr *)rmnet_map_data_ptr(head_skb); + csum_replace2(&iph->check, iph->id, frag_desc->ip_id); + iph->id = frag_desc->ip_id; + } + + if (frag_desc->tcp_seq_set) { + struct tcphdr *th; + + th = (struct tcphdr *) + (rmnet_map_data_ptr(head_skb) + frag_desc->ip_len); + th->seq = frag_desc->tcp_seq; + } + + /* Handle csum offloading */ + if (frag_desc->csum_valid && frag_desc->hdrs_valid) { + /* Set the partial checksum information */ + //rmnet_frag_partial_csum(head_skb, frag_desc); + WARN_ON(1); + } else if (frag_desc->csum_valid) { + /* Non-RSB/RSC/perf packet. The current checksum is fine */ + head_skb->ip_summed = CHECKSUM_UNNECESSARY; + } else if (frag_desc->hdrs_valid && + (frag_desc->trans_proto == IPPROTO_TCP || + frag_desc->trans_proto == IPPROTO_UDP)) { + /* Unfortunately, we have to fake a bad checksum here, since + * the original bad value is lost by the hardware. The only + * reliable way to do it is to calculate the actual checksum + * and corrupt it. + */ + __sum16 *check; + __wsum csum; + unsigned int offset = skb_transport_offset(head_skb); + __sum16 pseudo; + + WARN_ON(1); + /* Calculate pseudo header and update header fields */ + if (frag_desc->ip_proto == 4) { + struct iphdr *iph = ip_hdr(head_skb); + __be16 tot_len = htons(head_skb->len); + + csum_replace2(&iph->check, iph->tot_len, tot_len); + iph->tot_len = tot_len; + pseudo = ~csum_tcpudp_magic(iph->saddr, iph->daddr, + head_skb->len - + frag_desc->ip_len, + frag_desc->trans_proto, 0); + } else { + struct ipv6hdr *ip6h = ipv6_hdr(head_skb); + + ip6h->payload_len = htons(head_skb->len - + sizeof(*ip6h)); + pseudo = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, + head_skb->len - + frag_desc->ip_len, + frag_desc->trans_proto, 0); + } + + if (frag_desc->trans_proto == IPPROTO_TCP) { + check = &tcp_hdr(head_skb)->check; + } else { + udp_hdr(head_skb)->len = htons(head_skb->len - + frag_desc->ip_len); + check = &udp_hdr(head_skb)->check; + } + + *check = pseudo; + csum = skb_checksum(head_skb, offset, head_skb->len - offset, + 0); + /* Add 1 to corrupt. This cannot produce a final value of 0 + * since csum_fold() can't return a value of 0xFFFF + */ + *check = csum16_add(csum_fold(csum), htons(1)); + head_skb->ip_summed = CHECKSUM_NONE; + } + + /* Handle any rmnet_perf metadata */ + if (frag_desc->hash) { + head_skb->hash = frag_desc->hash; + head_skb->sw_hash = 1; + } + + if (frag_desc->flush_shs) + head_skb->cb[0] = 1; + + /* Handle coalesced packets */ + //if (frag_desc->gso_segs > 1) + // rmnet_frag_gso_stamp(head_skb, frag_desc); + + return head_skb; +} + +/* Deliver the packets contained within a frag descriptor */ +static void rmnet_frag_deliver(struct rmnet_frag_descriptor *frag_desc, + struct rmnet_port *port) +{ + struct sk_buff *skb; + + skb = rmnet_alloc_skb(frag_desc, port); + if (skb) + rmnet_deliver_skb(skb, port); + rmnet_recycle_frag_descriptor(frag_desc, port); +} + +/* Process a QMAPv5 packet header */ +static int rmnet_frag_process_next_hdr_packet(struct rmnet_frag_descriptor *frag_desc, + struct rmnet_port *port, + struct list_head *list, + u16 len) +{ + int rc = 0; + + switch (rmnet_frag_get_next_hdr_type(frag_desc)) { + case RMNET_MAP_HEADER_TYPE_COALESCING: + rc = -1; + WARN_ON(1); + break; + case RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD: + if (rmnet_frag_get_csum_valid(frag_desc)) { + frag_desc->csum_valid = true; + } else { + } + + if (!rmnet_frag_pull(frag_desc, port, + sizeof(struct rmnet_map_header) + + sizeof(struct rmnet_map_v5_csum_header))) { + rc = -EINVAL; + break; + } + + frag_desc->hdr_ptr = rmnet_frag_data_ptr(frag_desc); + + /* Remove padding only for csum offload packets. + * Coalesced packets should never have padding. + */ + if (!rmnet_frag_trim(frag_desc, port, len)) { + rc = -EINVAL; + break; + } + + list_del_init(&frag_desc->list); + list_add_tail(&frag_desc->list, list); + break; + default: + qmap_hex_dump(__func__, rmnet_frag_data_ptr(frag_desc), 64); + rc = -EINVAL; + break; + } + + return rc; +} + +static void +__rmnet_frag_ingress_handler(struct rmnet_frag_descriptor *frag_desc, + struct rmnet_port *port) +{ + struct rmnet_map_header *qmap; + struct rmnet_endpoint *ep; + struct rmnet_frag_descriptor *frag, *tmp; + LIST_HEAD(segs); + u16 len, pad; + u8 mux_id; + + qmap = (struct rmnet_map_header *)skb_frag_address(&frag_desc->frag); + mux_id = qmap->mux_id; + pad = qmap->pad_len; + len = ntohs(qmap->pkt_len) - pad; + + if (qmap->cd_bit) { + goto recycle; + } + + if (mux_id >= RMNET_MAX_LOGICAL_EP) + goto recycle; + + ep = rmnet_get_endpoint(port, mux_id); + if (!ep) + goto recycle; + + frag_desc->dev = ep->rmnet_dev; + + /* Handle QMAPv5 packet */ + if (qmap->next_hdr && + (port->data_format & (RMNET_FLAGS_INGRESS_COALESCE | + RMNET_FLAGS_INGRESS_MAP_CKSUMV5))) { + if (rmnet_frag_process_next_hdr_packet(frag_desc, port, &segs, + len)) + goto recycle; + } else { + /* We only have the main QMAP header to worry about */ + if (!rmnet_frag_pull(frag_desc, port, sizeof(*qmap))) + return; + + frag_desc->hdr_ptr = rmnet_frag_data_ptr(frag_desc); + + if (!rmnet_frag_trim(frag_desc, port, len)) + return; + + list_add_tail(&frag_desc->list, &segs); + } + + list_for_each_entry_safe(frag, tmp, &segs, list) { + list_del_init(&frag->list); + rmnet_frag_deliver(frag, port); + } + return; + +recycle: + rmnet_recycle_frag_descriptor(frag_desc, port); +} + +static void rmnet_frag_ingress_handler(struct sk_buff *skb, + struct rmnet_port *port) +{ + LIST_HEAD(desc_list); + int i = 0; + struct rmnet_nss_cb *nss_cb; + + /* Deaggregation and freeing of HW originating + * buffers is done within here + */ + while (skb) { + struct sk_buff *skb_frag; + + port->chain_head = NULL; + port->chain_tail = NULL; + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + rmnet_frag_deaggregate(&skb_shinfo(skb)->frags[i], port, + &desc_list); + if (!list_empty(&desc_list)) { + struct rmnet_frag_descriptor *frag_desc, *tmp; + + list_for_each_entry_safe(frag_desc, tmp, + &desc_list, list) { + list_del_init(&frag_desc->list); + __rmnet_frag_ingress_handler(frag_desc, + port); + } + } + } + + nss_cb = rcu_dereference(rmnet_nss_callbacks); + if (nss_cb && port->chain_head) { + port->chain_head->cb[0] = 0; + netif_receive_skb(port->chain_head); + } + + skb_frag = skb_shinfo(skb)->frag_list; + skb_shinfo(skb)->frag_list = NULL; + consume_skb(skb); + skb = skb_frag; + } +} + +static void +rmnet_map_ingress_handler(struct sk_buff *skb, + struct rmnet_port *port) +{ + if (port->data_format & (RMNET_FLAGS_INGRESS_COALESCE | + RMNET_FLAGS_INGRESS_MAP_CKSUMV5)) { + if (skb_is_nonlinear(skb)) { + rmnet_frag_ingress_handler(skb, port); + return; + } + } + + WARN_ON(1); +} + +static rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb); +static int rmnet_is_real_dev_registered(const struct net_device *real_dev) +{ + return rcu_access_pointer(real_dev->rx_handler) == rmnet_rx_handler; +} + + +/* Needs either rcu_read_lock() or rtnl lock */ +struct rmnet_port *rmnet_get_port(struct net_device *real_dev) +{ + if (rmnet_is_real_dev_registered(real_dev)) + return rcu_dereference_rtnl(real_dev->rx_handler_data); + else + return NULL; +} + +static rx_handler_result_t rmnet_rx_priv_handler(struct sk_buff **pskb) +{ + struct sk_buff *skb = *pskb; + struct rmnet_nss_cb *nss_cb; + + if (!skb) + return RX_HANDLER_CONSUMED; + if (nss_debug) printk("%s skb=%p, len=%d, protocol=%x, hdr_len=%d\n", __func__, skb, skb->len, skb->protocol, skb->hdr_len); + + if (skb->pkt_type == PACKET_LOOPBACK) + return RX_HANDLER_PASS; + + /* Check this so that we dont loop around netif_receive_skb */ + if (skb->cb[0] == 1) { + skb->cb[0] = 0; + + skb->dev->stats.rx_packets++; + return RX_HANDLER_PASS; + } + + while (skb) { + struct sk_buff *skb_frag = skb_shinfo(skb)->frag_list; + + skb_shinfo(skb)->frag_list = NULL; + + nss_cb = rcu_dereference(rmnet_nss_callbacks); + if (nss_cb) + nss_cb->nss_tx(skb); + + skb = skb_frag; + } + + return RX_HANDLER_CONSUMED; +} + +/* Ingress / Egress Entry Points */ + +/* Processes packet as per ingress data format for receiving device. Logical + * endpoint is determined from packet inspection. Packet is then sent to the + * egress device listed in the logical endpoint configuration. + */ +static rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb) +{ + struct sk_buff *skb = *pskb; + struct rmnet_port *port; + struct net_device *dev; + + if (!skb) + goto done; + + if (nss_debug) printk("%s skb=%p, len=%d, protocol=%x, hdr_len=%d\n", __func__, skb, skb->len, skb->protocol, skb->hdr_len); + + if (skb->pkt_type == PACKET_LOOPBACK) + return RX_HANDLER_PASS; + + if (skb->protocol != htons(ETH_P_MAP)) { + WARN_ON(1); + return RX_HANDLER_PASS; + } + + dev = skb->dev; + port = rmnet_get_port(dev); + + if (port == NULL) + return RX_HANDLER_PASS; + + port->chain_head = NULL; + port->chain_tail = NULL; + + switch (port->rmnet_mode) { + case RMNET_EPMODE_VND: + rmnet_map_ingress_handler(skb, port); + break; + case RMNET_EPMODE_BRIDGE: + //rmnet_bridge_handler(skb, port->bridge_ep); + break; + } + +done: + return RX_HANDLER_CONSUMED; +} + +static void rmnet_descriptor_deinit(struct rmnet_port *port) +{ + struct rmnet_frag_descriptor_pool *pool; + struct rmnet_frag_descriptor *frag_desc, *tmp; + + pool = port->frag_desc_pool; + + list_for_each_entry_safe(frag_desc, tmp, &pool->free_list, list) { + kfree(frag_desc); + pool->pool_size--; + } + + kfree(pool); +} + +static int rmnet_descriptor_init(struct rmnet_port *port) +{ + struct rmnet_frag_descriptor_pool *pool; + int i; + + spin_lock_init(&port->desc_pool_lock); + pool = kzalloc(sizeof(*pool), GFP_ATOMIC); + if (!pool) + return -ENOMEM; + + INIT_LIST_HEAD(&pool->free_list); + port->frag_desc_pool = pool; + + for (i = 0; i < RMNET_FRAG_DESCRIPTOR_POOL_SIZE; i++) { + struct rmnet_frag_descriptor *frag_desc; + + frag_desc = kzalloc(sizeof(*frag_desc), GFP_ATOMIC); + if (!frag_desc) + return -ENOMEM; + + INIT_LIST_HEAD(&frag_desc->list); + INIT_LIST_HEAD(&frag_desc->sub_frags); + list_add_tail(&frag_desc->list, &pool->free_list); + pool->pool_size++; + } + + return 0; +} + +struct rmnet_priv { + //struct rmnet_endpoint local_ep; + struct net_device *real_dev; + u8 mux_id; +}; + +static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct rmnet_priv *priv; + + if (nss_debug) printk("%s skb=%p, len=%d, protocol=%x, hdr_len=%d\n", __func__, skb, skb->len, skb->protocol, skb->hdr_len); + + priv = netdev_priv(dev); + if (priv->real_dev) { + add_qhdr_v5(skb, priv->mux_id); + skb->protocol = htons(ETH_P_MAP); + skb->dev = priv->real_dev; + dev_queue_xmit(skb); + dev->stats.tx_packets++; + //rmnet_egress_handler(skb); + } else { + //this_cpu_inc(priv->pcpu_stats->stats.tx_drops); + kfree_skb(skb); + } + return NETDEV_TX_OK; +} + +static int rmnet_vnd_change_mtu(struct net_device *rmnet_dev, int new_mtu) +{ + if (new_mtu < 0 || new_mtu > RMNET_MAX_PACKET_SIZE) + return -EINVAL; + + rmnet_dev->mtu = new_mtu; + return 0; +} + +static const struct net_device_ops rmnet_vnd_ops = { + .ndo_start_xmit = rmnet_vnd_start_xmit, + .ndo_change_mtu = rmnet_vnd_change_mtu, + //.ndo_get_iflink = rmnet_vnd_get_iflink, + //.ndo_add_slave = rmnet_add_bridge, + //.ndo_del_slave = rmnet_del_bridge, + //.ndo_init = rmnet_vnd_init, + //.ndo_uninit = rmnet_vnd_uninit, + //.ndo_get_stats64 = rmnet_get_stats64, +}; + +static void rmnet_vnd_setup(struct net_device *rmnet_dev) +{ + rmnet_dev->netdev_ops = &rmnet_vnd_ops; + rmnet_dev->mtu = RMNET_DFLT_PACKET_SIZE; + rmnet_dev->needed_headroom = RMNET_NEEDED_HEADROOM; + random_ether_addr(rmnet_dev->dev_addr); + rmnet_dev->tx_queue_len = RMNET_TX_QUEUE_LEN; + + /* Raw IP mode */ + rmnet_dev->header_ops = NULL; /* No header */ + rmnet_dev->type = ARPHRD_RAWIP; + rmnet_dev->hard_header_len = 0; + rmnet_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST); + + //rmnet_dev->needs_free_netdev = true; + + rmnet_dev->hw_features = NETIF_F_RXCSUM; + rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + //rmnet_dev->hw_features |= NETIF_F_SG; + //rmnet_dev->hw_features |= NETIF_F_GRO_HW; +} +#else +#include +#include +#include +#include +#include +#include +#include +#include + +static uint nss_debug = 0; +module_param( nss_debug, uint, S_IRUGO | S_IWUSR); + +/* rmnet section */ + +#define RMNET_FLAGS_INGRESS_DEAGGREGATION (1U << 0) +#define RMNET_FLAGS_INGRESS_MAP_COMMANDS (1U << 1) +#define RMNET_FLAGS_INGRESS_MAP_CKSUMV4 (1U << 2) +#define RMNET_FLAGS_EGRESS_MAP_CKSUMV4 (1U << 3) +#define RMNET_FLAGS_INGRESS_COALESCE (1U << 4) +#define RMNET_FLAGS_INGRESS_MAP_CKSUMV5 (1U << 5) +#define RMNET_FLAGS_EGRESS_MAP_CKSUMV5 (1U << 6) + +#ifdef CONFIG_ARCH_IPQ807x +#define CONFIG_QCA_NSS_DRV +#endif +#ifdef CONFIG_QCA_NSS_DRV +#include "rmnet/rmnet_nss.c" +#else +#include "rmnet_nss.h" +#endif + +#include "rmnet/rmnet_vnd.c" +#include "rmnet/rmnet_map_command.c" +#include "rmnet/rmnet_map_data.c" +#include "rmnet/rmnet_descriptor.c" +#include "rmnet/rmnet_config.c" +#include "rmnet/rmnet_handlers.c" + +struct rmnet_nss_cb *rmnet_nss_callbacks __rcu __read_mostly; + +void rmnet_init(struct net_device *real_dev, u32 nr_rmnet_devs) +{ + struct rmnet_port *port; + struct rmnet_endpoint *ep; + struct net_device *rmnet_dev = NULL; + u32 nr = 0; + struct rmnet_nss_cb *nss_cb = rcu_dereference(rmnet_nss_callbacks); + + if (!nss_cb) { +#ifdef CONFIG_QCA_NSS_DRV + rmnet_nss_init(); +#endif + } + + rmnet_register_real_device(real_dev); + + port = rmnet_get_port_rtnl(real_dev); + + port->data_format = RMNET_FLAGS_INGRESS_DEAGGREGATION + | RMNET_FLAGS_INGRESS_MAP_CKSUMV5 | RMNET_FLAGS_EGRESS_MAP_CKSUMV5; + port->rmnet_mode = RMNET_EPMODE_VND; + + for (nr = 0; nr < nr_rmnet_devs; nr++) { + u8 mux_id = 0x81+nr; + + rmnet_dev = alloc_netdev(sizeof(struct rmnet_priv), + "rmnet_data%d", NET_NAME_PREDICTABLE, + rmnet_vnd_setup); + + ep = kzalloc(sizeof(*ep), GFP_ATOMIC); + + rmnet_vnd_newlink(mux_id, rmnet_dev, port, real_dev, ep); + netdev_rx_handler_register(rmnet_dev, rmnet_rx_priv_handler, NULL); + hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]); + } + + port->nr_rmnet_devs = nr_rmnet_devs; +} + +void rmnet_deinit(struct net_device *real_dev, u32 nr_rmnet_devs) +{ + struct rmnet_port *port; + u32 nr = 0; + struct rmnet_nss_cb *nss_cb = rcu_dereference(rmnet_nss_callbacks); + + port = rmnet_get_port_rtnl(real_dev); + + if (!real_dev || !rmnet_is_real_dev_registered(real_dev)) + return; + + port = rmnet_get_port_rtnl(real_dev); + + for (nr = 0; nr < nr_rmnet_devs; nr++) { + struct rmnet_endpoint *ep; + u8 mux_id = 0x81+nr; + + ep = rmnet_get_endpoint(port, mux_id); + if (ep) { + hlist_del_init_rcu(&ep->hlnode); + rmnet_vnd_dellink(mux_id, port, ep); + synchronize_rcu(); + kfree(ep); + } + } + + rmnet_unregister_real_device(real_dev, port); + + if (nss_cb) { +#ifdef CONFIG_QCA_NSS_DRV + rmnet_nss_exit(); +#endif + } +} +#endif diff --git a/package/wwan/driver/quectel_MHI/src/devices/rmnet_nss.c b/package/wwan/driver/quectel_MHI/src/devices/rmnet_nss.c new file mode 100644 index 000000000..e6e841468 --- /dev/null +++ b/package/wwan/driver/quectel_MHI/src/devices/rmnet_nss.c @@ -0,0 +1,424 @@ +/* Copyright (c) 2019, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define RMNET_NSS_HASH_BITS 8 +#define hash_add_ptr(table, node, key) \ + hlist_add_head(node, &table[hash_ptr(key, HASH_BITS(table))]) + +static DEFINE_HASHTABLE(rmnet_nss_ctx_hashtable, RMNET_NSS_HASH_BITS); + +struct rmnet_nss_ctx { + struct hlist_node hnode; + struct net_device *rmnet_dev; + struct nss_rmnet_rx_handle *nss_ctx; +}; + +enum __rmnet_nss_stat { + RMNET_NSS_RX_ETH, + RMNET_NSS_RX_FAIL, + RMNET_NSS_RX_NON_ETH, + RMNET_NSS_RX_BUSY, + RMNET_NSS_TX_NO_CTX, + RMNET_NSS_TX_SUCCESS, + RMNET_NSS_TX_FAIL, + RMNET_NSS_TX_NONLINEAR, + RMNET_NSS_TX_BAD_IP, + RMNET_NSS_EXCEPTIONS, + RMNET_NSS_EX_BAD_HDR, + RMNET_NSS_EX_BAD_IP, + RMNET_NSS_EX_SUCCESS, + RMNET_NSS_TX_BAD_FRAGS, + RMNET_NSS_TX_LINEARIZE_FAILS, + RMNET_NSS_TX_NON_ZERO_HEADLEN_FRAGS, + RMNET_NSS_TX_BUSY_LOOP, + RMNET_NSS_NUM_STATS, +}; + +static unsigned long rmnet_nss_stats[RMNET_NSS_NUM_STATS]; + +#define RMNET_NSS_STAT(name, counter, desc) \ + module_param_named(name, rmnet_nss_stats[counter], ulong, 0444); \ + MODULE_PARM_DESC(name, desc) + +RMNET_NSS_STAT(rmnet_nss_rx_ethernet, RMNET_NSS_RX_ETH, + "Number of Ethernet headers successfully removed"); +RMNET_NSS_STAT(rmnet_nss_rx_fail, RMNET_NSS_RX_FAIL, + "Number of Ethernet headers that could not be removed"); +RMNET_NSS_STAT(rmnet_nss_rx_non_ethernet, RMNET_NSS_RX_NON_ETH, + "Number of non-Ethernet packets received"); +RMNET_NSS_STAT(rmnet_nss_rx_busy, RMNET_NSS_RX_BUSY, + "Number of packets dropped decause rmnet_data device was busy"); +RMNET_NSS_STAT(rmnet_nss_tx_slow, RMNET_NSS_TX_NO_CTX, + "Number of packets sent over non-NSS-accelerated rmnet device"); +RMNET_NSS_STAT(rmnet_nss_tx_fast, RMNET_NSS_TX_SUCCESS, + "Number of packets sent over NSS-accelerated rmnet device"); +RMNET_NSS_STAT(rmnet_nss_tx_fail, RMNET_NSS_TX_FAIL, + "Number of packets that NSS could not transmit"); +RMNET_NSS_STAT(rmnet_nss_tx_nonlinear, RMNET_NSS_TX_NONLINEAR, + "Number of non linear sent over NSS-accelerated rmnet device"); +RMNET_NSS_STAT(rmnet_nss_tx_invalid_ip, RMNET_NSS_TX_BAD_IP, + "Number of ingress packets with invalid IP headers"); +RMNET_NSS_STAT(rmnet_nss_tx_invalid_frags, RMNET_NSS_TX_BAD_FRAGS, + "Number of ingress packets with invalid frag format"); +RMNET_NSS_STAT(rmnet_nss_tx_linearize_fail, RMNET_NSS_TX_LINEARIZE_FAILS, + "Number of ingress packets where linearize in tx fails"); +RMNET_NSS_STAT(rmnet_nss_tx_exceptions, RMNET_NSS_EXCEPTIONS, + "Number of times our DL exception handler was invoked"); +RMNET_NSS_STAT(rmnet_nss_exception_non_ethernet, RMNET_NSS_EX_BAD_HDR, + "Number of non-Ethernet exception packets"); +RMNET_NSS_STAT(rmnet_nss_exception_invalid_ip, RMNET_NSS_EX_BAD_IP, + "Number of exception packets with invalid IP headers"); +RMNET_NSS_STAT(rmnet_nss_exception_success, RMNET_NSS_EX_SUCCESS, + "Number of exception packets handled successfully"); +RMNET_NSS_STAT(rmnet_nss_tx_non_zero_headlen_frags, RMNET_NSS_TX_NON_ZERO_HEADLEN_FRAGS, + "Number of packets with non zero headlen"); +RMNET_NSS_STAT(rmnet_nss_tx_busy_loop, RMNET_NSS_TX_BUSY_LOOP, + "Number of times tx packets busy looped"); + +static void rmnet_nss_inc_stat(enum __rmnet_nss_stat stat) +{ + if (stat >= 0 && stat < RMNET_NSS_NUM_STATS) + rmnet_nss_stats[stat]++; +} + +static struct rmnet_nss_ctx *rmnet_nss_find_ctx(struct net_device *dev) +{ + struct rmnet_nss_ctx *ctx; + struct hlist_head *bucket; + u32 hash; + + hash = hash_ptr(dev, HASH_BITS(rmnet_nss_ctx_hashtable)); + bucket = &rmnet_nss_ctx_hashtable[hash]; + hlist_for_each_entry(ctx, bucket, hnode) { + if (ctx->rmnet_dev == dev) + return ctx; + } + + return NULL; +} + +static void rmnet_nss_free_ctx(struct rmnet_nss_ctx *ctx) +{ + if (ctx) { + hash_del(&ctx->hnode); + nss_rmnet_rx_xmit_callback_unregister(ctx->nss_ctx); + nss_rmnet_rx_destroy_sync(ctx->nss_ctx); + kfree(ctx); + } +} + +/* Pull off an ethernet header, if possible */ +static int rmnet_nss_ethhdr_pull(struct sk_buff *skb) +{ + if (!skb->protocol || skb->protocol == htons(ETH_P_802_3)) { + void *ret = skb_pull(skb, sizeof(struct ethhdr)); + + rmnet_nss_inc_stat((ret) ? RMNET_NSS_RX_ETH : + RMNET_NSS_RX_FAIL); + return !ret; + } + + rmnet_nss_inc_stat(RMNET_NSS_RX_NON_ETH); + return -1; +} + +/* Copy headers to linear section for non linear packets */ +static int rmnet_nss_adjust_header(struct sk_buff *skb) +{ + struct iphdr *iph; + skb_frag_t *frag; + int bytes = 0; + u8 transport; + + if (skb_shinfo(skb)->nr_frags != 1) { + rmnet_nss_inc_stat(RMNET_NSS_TX_BAD_FRAGS); + return -EINVAL; + } + + if (skb_headlen(skb)) { + rmnet_nss_inc_stat(RMNET_NSS_TX_NON_ZERO_HEADLEN_FRAGS); + return 0; + } + + frag = &skb_shinfo(skb)->frags[0]; + + iph = (struct iphdr *)(skb_frag_address(frag)); + + if (iph->version == 4) { + bytes = iph->ihl*4; + transport = iph->protocol; + } else if (iph->version == 6) { + struct ipv6hdr *ip6h = (struct ipv6hdr *)iph; + + bytes = sizeof(struct ipv6hdr); + /* Dont have to account for extension headers yet */ + transport = ip6h->nexthdr; + } else { + rmnet_nss_inc_stat(RMNET_NSS_TX_BAD_IP); + return -EINVAL; + } + + if (transport == IPPROTO_TCP) { + struct tcphdr *th; + + th = (struct tcphdr *)((u8 *)iph + bytes); + bytes += th->doff * 4; + } else if (transport == IPPROTO_UDP) { + bytes += sizeof(struct udphdr); + } else { + /* cant do anything else here unfortunately so linearize */ + if (skb_linearize(skb)) { + rmnet_nss_inc_stat(RMNET_NSS_TX_LINEARIZE_FAILS); + return -EINVAL; + } else { + return 0; + } + } + + if (bytes > skb_frag_size(frag)) { + rmnet_nss_inc_stat(RMNET_NSS_TX_BAD_FRAGS); + return -EINVAL; + } + + skb_push(skb, bytes); + memcpy(skb->data, iph, bytes); + + /* subtract to account for skb_push */ + skb->len -= bytes; + + frag->page_offset += bytes; + skb_frag_size_sub(frag, bytes); + + /* subtract to account for skb_frag_size_sub */ + skb->data_len -= bytes; + + return 0; +} + +/* Main downlink handler + * Looks up NSS contex associated with the device. If the context is found, + * we add a dummy ethernet header with the approriate protocol field set, + * the pass the packet off to NSS for hardware acceleration. + */ +int rmnet_nss_tx(struct sk_buff *skb) +{ + struct ethhdr *eth; + struct rmnet_nss_ctx *ctx; + struct net_device *dev = skb->dev; + nss_tx_status_t rc; + unsigned int len; + u8 version; + + if (skb_is_nonlinear(skb)) { + if (rmnet_nss_adjust_header(skb)) + goto fail; + else + rmnet_nss_inc_stat(RMNET_NSS_TX_NONLINEAR); + } + + version = ((struct iphdr *)skb->data)->version; + + ctx = rmnet_nss_find_ctx(dev); + if (!ctx) { + rmnet_nss_inc_stat(RMNET_NSS_TX_NO_CTX); + return -EINVAL; + } + + eth = (struct ethhdr *)skb_push(skb, sizeof(*eth)); + memset(ð->h_dest, 0, ETH_ALEN * 2); + if (version == 4) { + eth->h_proto = htons(ETH_P_IP); + } else if (version == 6) { + eth->h_proto = htons(ETH_P_IPV6); + } else { + rmnet_nss_inc_stat(RMNET_NSS_TX_BAD_IP); + goto fail; + } + + skb->protocol = htons(ETH_P_802_3); + /* Get length including ethhdr */ + len = skb->len; + +transmit: + rc = nss_rmnet_rx_tx_buf(ctx->nss_ctx, skb); + if (rc == NSS_TX_SUCCESS) { + /* Increment rmnet_data device stats. + * Don't call rmnet_data_vnd_rx_fixup() to do this, as + * there's no guarantee the skb pointer is still valid. + */ + dev->stats.rx_packets++; + dev->stats.rx_bytes += len; + rmnet_nss_inc_stat(RMNET_NSS_TX_SUCCESS); + return 0; + } else if (rc == NSS_TX_FAILURE_QUEUE) { + rmnet_nss_inc_stat(RMNET_NSS_TX_BUSY_LOOP); + goto transmit; + } + +fail: + rmnet_nss_inc_stat(RMNET_NSS_TX_FAIL); + kfree_skb(skb); + return 1; +} + +/* Called by NSS in the DL exception case. + * Since the packet cannot be sent over the accelerated path, we need to + * handle it. Remove the ethernet header and pass it onward to the stack + * if possible. + */ +void rmnet_nss_receive(struct net_device *dev, struct sk_buff *skb, + struct napi_struct *napi) +{ + rmnet_nss_inc_stat(RMNET_NSS_EXCEPTIONS); + + if (!skb) + return; + + if (rmnet_nss_ethhdr_pull(skb)) { + rmnet_nss_inc_stat(RMNET_NSS_EX_BAD_HDR); + goto drop; + } + + /* reset header pointers */ + skb_reset_transport_header(skb); + skb_reset_network_header(skb); + skb_reset_mac_header(skb); + + /* reset packet type */ + skb->pkt_type = PACKET_HOST; + + skb->dev = dev; + + /* reset protocol type */ + switch (skb->data[0] & 0xF0) { + case 0x40: + skb->protocol = htons(ETH_P_IP); + break; + case 0x60: + skb->protocol = htons(ETH_P_IPV6); + break; + default: + rmnet_nss_inc_stat(RMNET_NSS_EX_BAD_IP); + goto drop; + } + + rmnet_nss_inc_stat(RMNET_NSS_EX_SUCCESS); + + /* Set this so that we dont loop around netif_receive_skb */ + + skb->cb[0] = 1; + + netif_receive_skb(skb); + return; + +drop: + kfree_skb(skb); +} + +/* Called by NSS in the UL acceleration case. + * We are guaranteed to have an ethernet packet here from the NSS hardware, + * We need to pull the header off and invoke our ndo_start_xmit function + * to handle transmitting the packet to the network stack. + */ +void rmnet_nss_xmit(struct net_device *dev, struct sk_buff *skb) +{ + netdev_tx_t ret; + + skb_pull(skb, sizeof(struct ethhdr)); + rmnet_nss_inc_stat(RMNET_NSS_RX_ETH); + + /* NSS takes care of shaping, so bypassing Qdiscs like this is OK */ + ret = dev->netdev_ops->ndo_start_xmit(skb, dev); + if (unlikely(ret == NETDEV_TX_BUSY)) { + dev_kfree_skb_any(skb); + rmnet_nss_inc_stat(RMNET_NSS_RX_BUSY); + } +} + +/* Create and register an NSS context for an rmnet_data device */ +int rmnet_nss_create_vnd(struct net_device *dev) +{ + struct rmnet_nss_ctx *ctx; + + ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC); + if (!ctx) + return -ENOMEM; + + ctx->rmnet_dev = dev; + ctx->nss_ctx = nss_rmnet_rx_create_sync_nexthop(dev, NSS_N2H_INTERFACE, + NSS_C2C_TX_INTERFACE); + if (!ctx->nss_ctx) { + kfree(ctx); + return -1; + } + + nss_rmnet_rx_register(ctx->nss_ctx, rmnet_nss_receive, dev); + nss_rmnet_rx_xmit_callback_register(ctx->nss_ctx, rmnet_nss_xmit); + hash_add_ptr(rmnet_nss_ctx_hashtable, &ctx->hnode, dev); + return 0; +} + +/* Unregister and destroy the NSS context for an rmnet_data device */ +int rmnet_nss_free_vnd(struct net_device *dev) +{ + struct rmnet_nss_ctx *ctx; + + ctx = rmnet_nss_find_ctx(dev); + rmnet_nss_free_ctx(ctx); + + return 0; +} + +static const struct rmnet_nss_cb rmnet_nss = { + .nss_create = rmnet_nss_create_vnd, + .nss_free = rmnet_nss_free_vnd, + .nss_tx = rmnet_nss_tx, +}; + +int __init rmnet_nss_init(void) +{ + pr_err("%s(): initializing rmnet_nss\n", __func__); + RCU_INIT_POINTER(rmnet_nss_callbacks, &rmnet_nss); + return 0; +} + +void __exit rmnet_nss_exit(void) +{ + struct hlist_node *tmp; + struct rmnet_nss_ctx *ctx; + int bkt; + + pr_err("%s(): exiting rmnet_nss\n", __func__); + RCU_INIT_POINTER(rmnet_nss_callbacks, NULL); + + /* Tear down all NSS contexts */ + hash_for_each_safe(rmnet_nss_ctx_hashtable, bkt, tmp, ctx, hnode) + rmnet_nss_free_ctx(ctx); +} + +#if 0 +MODULE_LICENSE("GPL v2"); +module_init(rmnet_nss_init); +module_exit(rmnet_nss_exit); +#endif diff --git a/package/wwan/driver/quectel_MHI/src/log/AT_OVER_PCIE.txt b/package/wwan/driver/quectel_MHI/src/log/AT_OVER_PCIE.txt new file mode 100644 index 000000000..9f86ac32b --- /dev/null +++ b/package/wwan/driver/quectel_MHI/src/log/AT_OVER_PCIE.txt @@ -0,0 +1,31 @@ +root@imx6qsabresd:~# busybox microcom /dev/mhi_DUN +[ 384.652992] [I][mhi_uci_open] Node open, ref counts 1 +[ 384.658144] [I][mhi_uci_open] Starting channel +[ 384.662612] [I][__mhi_prepare_channel] Entered: preparing channel:32 +[ 384.680397] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1 +[ 384.685890] [I][__mhi_prepare_channel] Chan:32 successfully moved to start state +[ 384.693312] [I][__mhi_prepare_channel] Entered: preparing channel:33 +[ 384.708692] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1 +[ 384.714324] [I][__mhi_prepare_channel] Chan:33 successfully moved to start state + +RDY + ++CFUN: 1 + ++CPIN: READY + ++QUSIM: 1 + ++QIND: SMS DONE + ++QIND: PB DONE +ati +Quectel +EM20 +Revision: EM20GR01A01M4G + +OK +at+cpin? ++CPIN: READY + +OK diff --git a/package/wwan/driver/quectel_MHI/src/log/MBIM_OVER_PCIE.txt b/package/wwan/driver/quectel_MHI/src/log/MBIM_OVER_PCIE.txt new file mode 100644 index 000000000..a91b7387c --- /dev/null +++ b/package/wwan/driver/quectel_MHI/src/log/MBIM_OVER_PCIE.txt @@ -0,0 +1,145 @@ +root@OpenWrt:~# insmod pcie_mhi.ko mhi_mbim_enabled=1 +root@OpenWrt:~# dmesg | grep mhi +[ 65.587160] mhi_init Quectel_Linux_PCIE_MHI_Driver_V1.3.0.6 +[ 65.597089] mhi_pci_probe pci_dev->name = 0000:01:00.0, domain=0, bus=1, slot=0, vendor=17CB, device=0306 +[ 65.602250] mhi_q 0000:01:00.0: BAR 0: assigned [mem 0x20300000-0x20300fff 64bit] +[ 65.611690] mhi_q 0000:01:00.0: enabling device (0140 -> 0142) +[ 65.619307] [I][mhi_init_pci_dev] msi_required = 5, msi_allocated = 5, msi_irq = 63 +[ 65.619327] [I][mhi_power_up] dev_state:RESET +[ 65.619331] [I][mhi_async_power_up] Requested to power on +[ 65.619449] [I][mhi_alloc_coherent] size = 114688, dma_handle = 6fca0000 +[ 65.619462] [I][mhi_init_dev_ctxt] mhi_ctxt->ctrl_seg = c221e000 +[ 65.619731] [I][mhi_async_power_up] dev_state:RESET ee:AMSS +[ 65.619747] [I][mhi_pm_st_worker] Transition to state:READY +[ 65.619760] [I][mhi_pm_st_worker] INVALID_EE -> AMSS +[ 65.619764] [I][mhi_ready_state_transition] Waiting to enter READY state +[ 65.619885] [I][mhi_async_power_up] Power on setup success +[ 65.619897] [I][mhi_pci_probe] Return successful +[ 65.665114] [I][mhi_ready_state_transition] Device in READY State +[ 65.665125] [I][mhi_intvec_threaded_handlr] device ee:AMSS dev_state:READY, pm_state:POR +[ 65.665131] [I][mhi_intvec_threaded_handlr] device ee:AMSS dev_state:READY, INVALID_EE +[ 65.665133] [I][mhi_tryset_pm_state] Transition to pm state from:POR to:POR +[ 65.665137] [I][mhi_init_mmio] Initializing MMIO +[ 65.665142] [I][mhi_init_mmio] CHDBOFF:0x300 +[ 65.665151] [I][mhi_init_mmio] ERDBOFF:0x700 +[ 65.665156] [I][mhi_init_mmio] Programming all MMIO values. +[ 65.786283] [I][mhi_dump_tre] carl_ev evt_state_change mhistate=2 +[ 65.786289] [I][mhi_process_ctrl_ev_ring] MHI state change event to state:M0 +[ 65.786295] [I][mhi_pm_m0_transition] Entered With State:READY PM_STATE:POR +[ 65.786300] [I][mhi_tryset_pm_state] Transition to pm state from:POR to:M0 +[ 65.789734] [I][mhi_dump_tre] carl_ev evt_ee_state execenv=2 +[ 65.789739] [I][mhi_process_ctrl_ev_ring] MHI EE received event:AMSS +[ 65.789756] [I][mhi_pm_st_worker] Transition to state:MISSION MODE +[ 65.789767] [I][mhi_pm_st_worker] INVALID_EE -> AMSS +[ 65.789771] [I][mhi_pm_mission_mode_transition] Processing Mission Mode Transition +[ 65.789787] [I][mhi_init_timesync] No timesync capability found +[ 65.789791] [I][mhi_pm_mission_mode_transition] Adding new devices +[ 65.790570] [I][mhi_dtr_probe] Enter for DTR control channel +[ 65.790577] [I][__mhi_prepare_channel] Entered: preparing channel:18 +[ 65.797036] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1 +[ 65.797051] [I][__mhi_prepare_channel] Chan:18 successfully moved to start state +[ 65.797055] [I][__mhi_prepare_channel] Entered: preparing channel:19 +[ 65.802457] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1 +[ 65.802469] [I][__mhi_prepare_channel] Chan:19 successfully moved to start state +[ 65.802485] [I][mhi_dtr_probe] Exit with ret:0 +[ 65.802748] [I][mhi_netdev_enable_iface] Prepare the channels for transfer +[ 65.802772] [I][__mhi_prepare_channel] Entered: preparing channel:100 +[ 65.825279] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1 +[ 65.825293] [I][__mhi_prepare_channel] Chan:100 successfully moved to start state +[ 65.825297] [I][__mhi_prepare_channel] Entered: preparing channel:101 +[ 65.835565] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1 +[ 65.835578] [I][__mhi_prepare_channel] Chan:101 successfully moved to start state +[ 65.839141] [I][mhi_netdev_enable_iface] Exited. +[ 65.839875] rmnet_vnd_register_device(rmnet_mhi0.1)=0 +[ 65.843278] net rmnet_mhi0 rmnet_mhi0.1: NSS context created +[ 65.861808] [I][mhi_pm_mission_mode_transition] Exit with ret:0 +[ 68.625595] [I][__mhi_prepare_channel] Entered: preparing channel:12 +[ 68.634610] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1 +[ 68.634622] [I][__mhi_prepare_channel] Chan:12 successfully moved to start state +[ 68.634625] [I][__mhi_prepare_channel] Entered: preparing channel:13 +[ 68.644978] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1 +[ 68.644987] [I][__mhi_prepare_channel] Chan:13 successfully moved to start state +[ 69.170666] net rmnet_mhi0: link_state 0x0 -> 0x1 +[ 69.177035] [I][mhi_netdev_open] Opened net dev interface +[ 71.655431] [I][mhi_netdev_open] Opened net dev interface + +root@OpenWrt:~# ./quectel-CM & +[04-02_04:14:12:134] Quectel_QConnectManager_Linux_V1.6.0.5 +[04-02_04:14:12:134] Find /sys/bus/usb/devices/4-1 idVendor=0x2c7c idProduct=0x800, bus=0x004, dev=0x002 +[04-02_04:14:12:135] network interface '' or qmidev '' is not exist +[04-02_04:14:12:135] netcard driver = pcie_mhi, driver version = V1.3.0.6 +[04-02_04:14:12:135] Modem works in MBIM mode +[04-02_04:14:12:135] apn (null), user (null), passwd (null), auth 0 +[04-02_04:14:12:135] IP Proto MBIMContextIPTypeIPv4 +[04-02_04:14:12:154] mbim_read_thread is created +sh: can't create /sys/class/net/rmnet_mhi0/mbim/link_state: nonexistent directory +[04-02_04:14:12:156] system(echo 0 > /sys/class/net/rmnet_mhi0/mbim/link_state)=256 +[04-02_04:14:12:185] system(ip address flush dev rmnet_mhi0)=0 +[04-02_04:14:12:187] system(ip link set dev rmnet_mhi0 down)=0 +[04-02_04:14:12:188] mbim_open_device() +[04-02_04:14:12:605] mbim_device_caps_query() +[04-02_04:14:12:610] DeviceId: 869710030002905 +[04-02_04:14:12:610] HardwareInfo: 0 +[04-02_04:14:12:610] mbim_set_radio_state( 1 ) +[04-02_04:14:12:613] HwRadioState: 1, SwRadioState: 1 +[04-02_04:14:12:613] mbim_subscriber_status_query() +[04-02_04:14:12:620] SubscriberReadyState NotInitialized -> Initialized +[04-02_04:14:12:620] mbim_register_state_query() +[04-02_04:14:12:625] RegisterState Unknown -> Home +[04-02_04:14:12:625] mbim_packet_service_query() +[04-02_04:14:12:629] PacketServiceState Unknown -> Attached +[04-02_04:14:12:629] mbim_query_connect(sessionID=0) +[04-02_04:14:12:633] ActivationState Unknown -> Deactivated +[04-02_04:14:12:633] mbim_set_connect(onoff=1, sessionID=0) +[ 69.170666] net rmnet_mhi0: link_state 0x0 -> 0x1 +[04-02_04:14:12:680] ActivationState Deactivated -> Activated +[ 69.177035] [I][mhi_netdev_open] Opened net dev interface +[04-02_04:14:12:680] mbim_ip_config(sessionID=0) +[04-02_04:14:12:683] < SessionId = 0 +[04-02_04:14:12:683] < IPv4ConfigurationAvailable = 0xf +[04-02_04:14:12:683] < IPv6ConfigurationAvailable = 0x0 +[04-02_04:14:12:683] < IPv4AddressCount = 0x1 +[04-02_04:14:12:683] < IPv4AddressOffset = 0x3c +[04-02_04:14:12:683] < IPv6AddressCount = 0x0 +[04-02_04:14:12:683] < IPv6AddressOffset = 0x0 +[04-02_04:14:12:683] < IPv4 = 10.129.59.93/30 +[04-02_04:14:12:683] < gw = 10.129.59.94 +[04-02_04:14:12:683] < dns1 = 211.138.180.2 +[04-02_04:14:12:683] < dns2 = 211.138.180.3 +[04-02_04:14:12:683] < ipv4 mtu = 1500 +sh: can't create /sys/class/net/rmnet_mhi0/mbim/link_state: nonexistent directory +[04-02_04:14:12:684] system(echo 1 > /sys/class/net/rmnet_mhi0/mbim/link_state)=256 +[04-02_04:14:12:689] system(ip link set dev rmnet_mhi0 up)=0 +[04-02_04:14:12:692] system(ip -4 address flush dev rmnet_mhi0)=0 +[04-02_04:14:12:694] system(ip -4 address add 10.129.59.93/30 dev rmnet_mhi0)=0 +[04-02_04:14:12:697] system(ip -4 route add default via 10.129.59.94 dev rmnet_mhi0)=0 +[04-02_04:14:12:699] system(ip -4 link set dev rmnet_mhi0 mtu 1500)=0 + +root@OpenWrt:~# ifconfig rmnet_mhi0 +rmnet_mhi0 Link encap:UNSPEC HWaddr 00-00-00-00-00-00-00-00-00-00-00-00-00-00-00-00 + UP RUNNING NOARP MTU:1500 Metric:1 + RX packets:99379 errors:0 dropped:0 overruns:0 frame:0 + TX packets:176569 errors:0 dropped:0 overruns:0 carrier:0 + collisions:0 txqueuelen:1000 + RX bytes:1528181052 (1.4 GiB) TX bytes:62467192 (59.5 MiB) + +root@OpenWrt:~# ifconfig rmnet_mhi0.1 +rmnet_mhi0.1 Link encap:UNSPEC HWaddr 02-50-F4-00-00-00-00-00-00-00-00-00-00-00-00-00 + inet addr:10.129.59.93 Mask:255.255.255.252 + inet6 addr: fe80::50:f4ff:fe00:0/64 Scope:Link + UP RUNNING NOARP MTU:1500 Metric:1 + RX packets:1089360 errors:0 dropped:0 overruns:0 frame:0 + TX packets:176581 errors:0 dropped:0 overruns:0 carrier:0 + collisions:0 txqueuelen:1000 + RX bytes:1521449058 (1.4 GiB) TX bytes:57525792 (54.8 MiB) + +# adjust CPU load balancing +root@OpenWrt:~# echo 2 > /sys/class/net/rmnet_mhi0/queues/rx-0/rps_cpus +root@OpenWrt:~# echo 4 > /sys/class/net/rmnet_mhi0.1/queues/rx-0/rps_cpus +root@OpenWrt:~# echo 2000 > /proc/sys/net/core/netdev_max_backlog +root@OpenWrt:~# cat /sys/class/net/rmnet_mhi0/queues/rx-0/rps_cpus +2 +root@OpenWrt:~# cat /sys/class/net/rmnet_mhi0.1/queues/rx-0/rps_cpus +4 +root@OpenWrt:~# cat /proc/sys/net/core/netdev_max_backlog +2000 diff --git a/package/wwan/driver/quectel_MHI/src/log/QMI_OVER_PCIE.txt b/package/wwan/driver/quectel_MHI/src/log/QMI_OVER_PCIE.txt new file mode 100644 index 000000000..3604545ba --- /dev/null +++ b/package/wwan/driver/quectel_MHI/src/log/QMI_OVER_PCIE.txt @@ -0,0 +1,134 @@ +disable ccflags-y += -DCONFIG_MHI_NETDEV_MBIM in pcie_mhi/Makefile + +root@OpenWrt:~# insmod pcie_mhi.ko + +root@OpenWrt:~# dmesg | grep mhi +[ 138.483252] mhi_init Quectel_Linux_PCIE_MHI_Driver_V1.3.0.6 +[ 138.492350] mhi_pci_probe pci_dev->name = 0000:01:00.0, domain=0, bus=1, slot=0, vendor=17CB, device=0306 +[ 138.497564] mhi_q 0000:01:00.0: BAR 0: assigned [mem 0x20300000-0x20300fff 64bit] +[ 138.506952] mhi_q 0000:01:00.0: enabling device (0140 -> 0142) +[ 138.514562] [I][mhi_init_pci_dev] msi_required = 5, msi_allocated = 5, msi_irq = 63 +[ 138.514581] [I][mhi_power_up] dev_state:RESET +[ 138.514587] [I][mhi_async_power_up] Requested to power on +[ 138.514728] [I][mhi_alloc_coherent] size = 114688, dma_handle = 72160000 +[ 138.514734] [I][mhi_init_dev_ctxt] mhi_ctxt->ctrl_seg = c221f000 +[ 138.515030] [I][mhi_async_power_up] dev_state:RESET ee:AMSS +[ 138.515056] [I][mhi_pm_st_worker] Transition to state:READY +[ 138.515067] [I][mhi_pm_st_worker] INVALID_EE -> AMSS +[ 138.515073] [I][mhi_ready_state_transition] Waiting to enter READY state +[ 138.515210] [I][mhi_async_power_up] Power on setup success +[ 138.515227] [I][mhi_pci_probe] Return successful +[ 138.589013] [I][mhi_ready_state_transition] Device in READY State +[ 138.589029] [I][mhi_intvec_threaded_handlr] device ee:AMSS dev_state:READY, pm_state:POR +[ 138.589038] [I][mhi_intvec_threaded_handlr] device ee:AMSS dev_state:READY, INVALID_EE +[ 138.589041] [I][mhi_tryset_pm_state] Transition to pm state from:POR to:POR +[ 138.589046] [I][mhi_init_mmio] Initializing MMIO +[ 138.589050] [I][mhi_init_mmio] CHDBOFF:0x300 +[ 138.589060] [I][mhi_init_mmio] ERDBOFF:0x700 +[ 138.589065] [I][mhi_init_mmio] Programming all MMIO values. +[ 138.706124] [I][mhi_dump_tre] carl_ev evt_state_change mhistate=2 +[ 138.706132] [I][mhi_process_ctrl_ev_ring] MHI state change event to state:M0 +[ 138.706140] [I][mhi_pm_m0_transition] Entered With State:READY PM_STATE:POR +[ 138.706146] [I][mhi_tryset_pm_state] Transition to pm state from:POR to:M0 +[ 138.708699] [I][mhi_dump_tre] carl_ev evt_ee_state execenv=2 +[ 138.708706] [I][mhi_process_ctrl_ev_ring] MHI EE received event:AMSS +[ 138.708726] [I][mhi_pm_st_worker] Transition to state:MISSION MODE +[ 138.708736] [I][mhi_pm_st_worker] INVALID_EE -> AMSS +[ 138.708742] [I][mhi_pm_mission_mode_transition] Processing Mission Mode Transition +[ 138.708758] [I][mhi_init_timesync] No timesync capability found +[ 138.708764] [I][mhi_pm_mission_mode_transition] Adding new devices +[ 138.709785] [I][mhi_dtr_probe] Enter for DTR control channel +[ 138.709794] [I][__mhi_prepare_channel] Entered: preparing channel:18 +[ 138.715378] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1 +[ 138.715397] [I][__mhi_prepare_channel] Chan:18 successfully moved to start state +[ 138.715403] [I][__mhi_prepare_channel] Entered: preparing channel:19 +[ 138.720201] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1 +[ 138.720218] [I][__mhi_prepare_channel] Chan:19 successfully moved to start state +[ 138.720236] [I][mhi_dtr_probe] Exit with ret:0 +[ 138.720590] [I][mhi_netdev_enable_iface] Prepare the channels for transfer +[ 138.720630] [I][__mhi_prepare_channel] Entered: preparing channel:100 +[ 138.757230] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1 +[ 138.757253] [I][__mhi_prepare_channel] Chan:100 successfully moved to start state +[ 138.757259] [I][__mhi_prepare_channel] Entered: preparing channel:101 +[ 138.774352] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1 +[ 138.774370] [I][__mhi_prepare_channel] Chan:101 successfully moved to start state +[ 138.778137] [I][mhi_netdev_enable_iface] Exited. +[ 138.779018] rmnet_vnd_register_device(rmnet_mhi0.1)=0 +[ 138.782283] net rmnet_mhi0 rmnet_mhi0.1: NSS context created +[ 138.800865] [I][mhi_pm_mission_mode_transition] Exit with ret:0 + +root@OpenWrt:~# ./quectel-CM & +root@OpenWrt:~# [04-02_04:12:16:477] Quectel_QConnectManager_Linux_V1.6.0.5 +[04-02_04:12:16:477] Find /sys/bus/usb/devices/4-1 idVendor=0x2c7c idProduct=0x800, bus=0x004, dev=0x002 +[04-02_04:12:16:478] network interface '' or qmidev '' is not exist +[04-02_04:12:16:478] netcard driver = pcie_mhi, driver version = V1.3.0.6 +[04-02_04:12:16:479] qmap_mode = 1, qmap_version = 9, qmap_size = 16384, muxid = 0x81, qmap_netcard = rmnet_mhi0.1 +[04-02_04:12:16:479] Modem works in QMI mode +[04-02_04:12:16:505] cdc_wdm_fd = 7 +[04-02_04:12:17:506] QmiThreadSendQMITimeout pthread_cond_timeout_np timeout +[04-02_04:12:18:516] Get clientWDS = 19 +[04-02_04:12:18:520] Get clientDMS = 1 +[04-02_04:12:18:524] Get clientNAS = 3 +[04-02_04:12:18:527] Get clientUIM = 1 +[04-02_04:12:18:531] Get clientWDA = 1 +[04-02_04:12:18:535] requestBaseBandVersion RM500QGLAAR03A01M4G_BETA_20200107F 1 [Dec 30 2019 17:00:00] +[04-02_04:12:18:539] qmap_settings.rx_urb_size = 16384 +[04-02_04:12:18:539] qmap_settings.ul_data_aggregation_max_datagrams = 16 +[04-02_04:12:18:539] qmap_settings.ul_data_aggregation_max_size = 8192 +[04-02_04:12:18:539] qmap_settings.dl_minimum_padding = 0 +[04-02_04:12:18:550] requestSetLoopBackState(loopback_state=1, replication_factor=14) +[04-02_04:12:18:557] requestGetSIMStatus SIMStatus: SIM_ABSENT +[04-02_04:12:18:560] requestGetProfile[1] ///0 +[04-02_04:12:18:563] requestRegistrationState2 MCC: 0, MNC: 0, PS: Detached, DataCap: UNKNOW +[04-02_04:12:18:565] requestQueryDataCall IPv4ConnectionStatus: DISCONNECTED +[04-02_04:12:18:566] ifconfig rmnet_mhi0.1 down +[04-02_04:12:18:571] ifconfig rmnet_mhi0.1 0.0.0.0 +ifconfig: SIOCSIFFLAGS: Network is down +[04-02_04:12:18:575] SetLoopBackInd: loopback_state=1, replication_factor=14 +[04-02_04:12:18:591] requestSetupDataCall WdsConnectionIPv4Handle: 0xe40182a0 +[04-02_04:12:18:601] ifconfig rmnet_mhi0 up +[04-02_04:12:18:607] ifconfig rmnet_mhi0.1 up +[04-02_04:12:18:613] you are use OpenWrt? +[04-02_04:12:18:614] should not calling udhcpc manually? +[04-02_04:12:18:614] should modify /etc/config/network as below? +[04-02_04:12:18:614] config interface wan +[04-02_04:12:18:614] option ifname rmnet_mhi0.1 +[04-02_04:12:18:614] option proto dhcp +[04-02_04:12:18:614] should use "/sbin/ifstaus wan" to check rmnet_mhi0.1 's status? +[04-02_04:12:18:614] busybox udhcpc -f -n -q -t 5 -i rmnet_mhi0.1 +udhcpc: started, v1.28.3 +udhcpc: sending discover +udhcpc: sending select for 192.168.48.171 +udhcpc: lease of 192.168.48.171 obtained, lease time 7200 +[04-02_04:12:18:809] udhcpc: ifconfig rmnet_mhi0.1 192.168.48.171 netmask 255.255.255.248 broadcast + +[04-02_04:12:18:819] udhcpc: setting default routers: 192.168.48.172 + +root@OpenWrt:~# ifconfig rmnet_mhi0 +rmnet_mhi0 Link encap:Ethernet HWaddr 02:50:F4:00:00:00 + inet6 addr: fe80::50:f4ff:fe00:0/64 Scope:Link + UP RUNNING NOARP MTU:1500 Metric:1 + RX packets:2 errors:0 dropped:0 overruns:0 frame:0 + TX packets:2 errors:0 dropped:0 overruns:0 carrier:0 + collisions:0 txqueuelen:1000 + RX bytes:608 (608.0 B) TX bytes:672 (672.0 B) + +root@OpenWrt:~# ifconfig rmnet_mhi0.1 +rmnet_mhi0.1 Link encap:UNSPEC HWaddr 02-50-F4-00-00-00-00-00-00-00-00-00-00-00-00-00 + inet addr:192.168.48.171 Mask:255.255.255.248 + inet6 addr: fe80::50:f4ff:fe00:0/64 Scope:Link + UP RUNNING NOARP MTU:1500 Metric:1 + RX packets:2 errors:0 dropped:0 overruns:0 frame:0 + TX packets:2 errors:0 dropped:0 overruns:0 carrier:0 + collisions:0 txqueuelen:1000 + RX bytes:592 (592.0 B) TX bytes:656 (656.0 B) + +# adjust CPU load balancing +root@OpenWrt:~# echo 2 > /sys/class/net/rmnet_mhi0/queues/rx-0/rps_cpus +root@OpenWrt:~# echo 4 > /sys/class/net/rmnet_mhi0.1/queues/rx-0/rps_cpus +root@OpenWrt:~# echo 2000 > /proc/sys/net/core/netdev_max_backlog +root@OpenWrt:~# cat /sys/class/net/rmnet_mhi0/queues/rx-0/rps_cpus +2 +root@OpenWrt:~# cat /sys/class/net/rmnet_mhi0.1/queues/rx-0/rps_cpus +4 +root@OpenWrt:~# cat /proc/sys/net/core/netdev_max_backlog +2000 \ No newline at end of file diff --git a/package/wwan/driver/quectel_MHI/src/log/QXDM_OVER_PCIE.txt b/package/wwan/driver/quectel_MHI/src/log/QXDM_OVER_PCIE.txt new file mode 100644 index 000000000..13e9cc381 --- /dev/null +++ b/package/wwan/driver/quectel_MHI/src/log/QXDM_OVER_PCIE.txt @@ -0,0 +1,14 @@ +root@imx6qsabresd:~# ./QLog -p /dev/mhi_DIAG -s log & +root@imx6qsabresd:~# [000.000]QLog Version: Quectel_QLog_Linux&Android_V1.2.4 +[ 298.597963] [I][mhi_uci_open] Node open, ref counts 1 +[ 298.605601] [I][mhi_uci_open] Starting channel +[ 298.612159] [I][__mhi_prepare_channel] Entered: preparing channel:4 +[ 298.629906] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1 +[ 298.635415] [I][__mhi_prepare_channel] Chan:4 successfully moved to start state +[ 298.642749] [I][__mhi_prepare_channel] Entered: preparing channel:5 +[ 298.658043] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1 +[ 298.663543] [I][__mhi_prepare_channel] Chan:5 successfully moved to start state +[000.075]open /dev/mhi_DIAG ttyfd = 3 +[000.075]Press CTRL+C to stop catch log. +[000.096]qlog_logfile_create log/20160920_145758_0000.qmdl logfd=4 +[005.268]recv: 0M 70K 490B in 5181 msec diff --git a/package/wwan/driver/quectel_QMI_WWAN/Makefile b/package/wwan/driver/quectel_QMI_WWAN/Makefile new file mode 100755 index 000000000..74e9d1c62 --- /dev/null +++ b/package/wwan/driver/quectel_QMI_WWAN/Makefile @@ -0,0 +1,47 @@ +# +# Copyright (C) 2015 OpenWrt.org +# +# This is free software, licensed under the GNU General Public License v2. +# See /LICENSE for more information. +# + +include $(TOPDIR)/rules.mk + +PKG_NAME:=qmi_wwan_q +PKG_VERSION:=3.0 +PKG_RELEASE:=1 + +include $(INCLUDE_DIR)/kernel.mk +include $(INCLUDE_DIR)/package.mk + +define KernelPackage/qmi_wwan_q + SUBMENU:=qmiwwan Support + TITLE:=Quectel Linux USB QMI WWAN Driver + DEPENDS:=+kmod-usb-net kmod-usb-wdm + FILES:=$(PKG_BUILD_DIR)/qmi_wwan_q.ko + AUTOLOAD:=$(call AutoLoad,81,qmi_wwan_q) +endef + +define KernelPackage/qmi_wwan_q/description + Quectel Linux USB QMI WWAN Driver +endef + +MAKE_OPTS:= \ + ARCH="$(LINUX_KARCH)" \ + CROSS_COMPILE="$(TARGET_CROSS)" \ + CXXFLAGS="$(TARGET_CXXFLAGS)" \ + M="$(PKG_BUILD_DIR)" \ + $(EXTRA_KCONFIG) + +define Build/Prepare + mkdir -p $(PKG_BUILD_DIR) + $(CP) ./src/* $(PKG_BUILD_DIR)/ +endef + +define Build/Compile + $(MAKE) -C "$(LINUX_DIR)" \ + $(MAKE_OPTS) \ + modules +endef + +$(eval $(call KernelPackage,qmi_wwan_q)) diff --git a/package/wwan/driver/quectel_QMI_WWAN/src/Makefile b/package/wwan/driver/quectel_QMI_WWAN/src/Makefile new file mode 100644 index 000000000..774d168db --- /dev/null +++ b/package/wwan/driver/quectel_QMI_WWAN/src/Makefile @@ -0,0 +1,36 @@ +obj-m += qmi_wwan_q.o + +PWD := $(shell pwd) +OUTPUTDIR=/lib/modules/`uname -r`/kernel/drivers/net/usb/ + +ifeq ($(ARCH),) +ARCH := $(shell uname -m) +endif +ifeq ($(CROSS_COMPILE),) +CROSS_COMPILE := +endif +ifeq ($(KDIR),) +KDIR := /lib/modules/$(shell uname -r)/build +ifeq ($(ARCH),i686) +ifeq ($(wildcard $KDIR/arch/$ARCH),) +ARCH=i386 +endif +endif +endif + +ifneq ($(findstring &,${PWD}),) +$(warning "${PWD}") +$(warning "current directory contain special char '&' !") +$(error "please remove it!") +endif + +default: + $(MAKE) ARCH=${ARCH} CROSS_COMPILE=${CROSS_COMPILE} -C $(KDIR) M=$(PWD) modules + +install: default + cp $(PWD)/qmi_wwan_q.ko /lib/modules/$(shell uname -r)/kernel/drivers/net/usb/ + depmod + +clean: + rm -rf *~ .tmp_versions modules.order Module.symvers + find . -type f -name "*~" -o -name "*.o" -o -name "*.ko" -o -name "*.cmd" -o -name "*.mod.c" | xargs rm -rf diff --git a/package/wwan/driver/quectel_QMI_WWAN/src/ReleaseNote.txt b/package/wwan/driver/quectel_QMI_WWAN/src/ReleaseNote.txt new file mode 100644 index 000000000..8f2015214 --- /dev/null +++ b/package/wwan/driver/quectel_QMI_WWAN/src/ReleaseNote.txt @@ -0,0 +1,146 @@ +Release Notes + +[V1.2.2] +Date: 9/7/2022 +enhancement: + 1. Optimization, the network card send queue wakeup is changed from callback to tasklet + 2. Add the function of returning LAN packets in bridge mode + 3. support ndo ioctl on kernel>5.14 + 4. Allow setting MTU greater than 1500 +fix: + +[V1.2.1] +Date: 9/26/2021 +enhancement: + 1. support IPQ5018's NSS + 2. use 'qsdk/qca/src/data-kernel/drivers/rmnet-nss/rmnet_nss.c' instead myself rmnet_nss.c + and qmi_wwan_q.ko must load after rmnet_nss.ko +fix: + +[V1.2.0.25] +Date: 9/17/2021 +enhancement: +fix: + 1. add sdx6x platfrom support + +[V1.2.0.24] +Date: 9/6/2021 +enhancement: +fix: + 1. add BG95 support + 2. support Linux 5.14.0 + +[V1.2.0.23] +Date: 3/23/2021 +enhancement: +fix: + 1. add sdx12 platfrom support + +[V1.2.0.22] +Date: 2/5/2021 +enhancement: +fix: + 1. fix panic (memory-access-after-free) when do modem reboot stress test + +[V1.2.0.21] +Date: 2/4/2021 +enhancement: + 1. Code refactoring - QMAP and rmnet +fix: + 1. qmap_qmi_wwan_rx_fixup: change skb_dequeue to __skb_dequeue + +[V1.2.0.20] +Date: 11/2/2020 +enhancement: +fix: + 1. LTE-A modems can not obtain IP by DHCP + +[V1.2.0.19] +Date: 10/9/2020 +enhancement: +fix: + 1. X55 can not access internet after usb resume + +[V1.2.0.18] +Date: 10/9/2020 +enhancement: +fix: + 1. X55: rename rmnet_usb0.1 to wwan0_1 + 1.1 if there is '.', openwrt will think it is vlan, and auto create vlan + 1.2 if there is '.', android will think it is not vaild + 1.3 if named as rmnet_usb0, and SOC is QCOM, QCOM's netmgr will auto manager it + +[V1.2.0.17] +Date: 9/14/2020 +enhancement: + 1. Code refactoring - QMAP size and version +fix: + +[V1.2.0.16] +Date: 9/14/2020 +enhancement: + 1. rx_fixup() check if this is enough skb_headroom() to fill ethernet header +fix: + 1. fix "WARNING: suspicious RCU usage" + +[V1.2.0.15] +Date: 9/10/2020 +enhancement: +fix: + 1. fix compile errors on kernel 3.10~3.13 + +[V1.2.0.14] +Date: 7/24/2020 +enhancement: +fix: + 1. fix QMAP V5 bug on Big Endian CPU + +[V1.2.0.13] +Date: 6/22/2020 +enhancement: +fix: + 1. fix no data traffic when do Upload TPUT test + +[V1.2.0.12] +Date: 5/29/2020 +enhancement: +fix: + 1. IPQ8074: when enable hyfi, quectel-CM will crash system crash + +[V1.2.0.9] +Date: 5/13/2020 +enhancement: +fix: + 1. IPQ8074: enable CONFIG_QCA_NSS_DRV by CONFIG_PINCTRL_IPQ807x (from CONFIG_ARCH_IPQ807x) + +[V1.2.0.8] +Date: 5/9/2020 +enhancement: +fix: + 1. fix compile errors on kernel V3.10 + +[V1.2.0.7] +Date: 4/25/2020 +enhancement: + 1. X55 support bridge mode +fix: + +[V1.2.0.6] +Date: 4/20/2020 +enhancement: + 1. add stat64, or the rx/tx statics will become to 0 when data > 4G + 2. do not use skb_clone, will make QCOM's NSS and SFE 's cpu loading very high +fix: + +[V1.2.0.5] +Date: 4/8/2020 +enhancement: + 1. add attrite link_state, change carrier state accoring link_state + quectel-CM will set link_state to 1 when QMI setup call success. +fix: + +[V1.2.0.4] +Date: 4/8/2020 +enhancement: + 1. support X55's QMAP V5 +fix: diff --git a/package/wwan/driver/quectel_QMI_WWAN/src/qmi_wwan_q.c b/package/wwan/driver/quectel_QMI_WWAN/src/qmi_wwan_q.c new file mode 100644 index 000000000..5b9c9ac18 --- /dev/null +++ b/package/wwan/driver/quectel_QMI_WWAN/src/qmi_wwan_q.c @@ -0,0 +1,2577 @@ +/* + * Copyright (c) 2012 Bjørn Mork + * + * The probing code is heavily inspired by cdc_ether, which is: + * Copyright (C) 2003-2005 by David Brownell + * Copyright (C) 2006 by Ole Andre Vadla Ravnas (ActiveSync) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#if LINUX_VERSION_CODE > KERNEL_VERSION(3,16,0) //8b094cd03b4a3793220d8d8d86a173bfea8c285b +#include +#else +#define timespec64 timespec +#define ktime_get_ts64 ktime_get_ts +#define timespec64_sub timespec_sub +#endif +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef ETH_P_MAP +#define ETH_P_MAP 0xDA1A +#endif + +#if (ETH_P_MAP == 0x00F9) +#undef ETH_P_MAP +#define ETH_P_MAP 0xDA1A +#endif + +#ifndef ARPHRD_RAWIP +#define ARPHRD_RAWIP ARPHRD_NONE +#endif + +#ifdef CONFIG_PINCTRL_IPQ807x +#define CONFIG_QCA_NSS_DRV +//#define CONFIG_QCA_NSS_PACKET_FILTER +#endif + +#define _RMNET_NSS_H_ +#define _RMENT_NSS_H_ +struct rmnet_nss_cb { + int (*nss_create)(struct net_device *dev); + int (*nss_free)(struct net_device *dev); + int (*nss_tx)(struct sk_buff *skb); +}; +static struct rmnet_nss_cb __read_mostly *nss_cb = NULL; +#if defined(CONFIG_PINCTRL_IPQ807x) || defined(CONFIG_PINCTRL_IPQ5018) +#ifdef CONFIG_RMNET_DATA +#define CONFIG_QCA_NSS_DRV +/* define at qsdk/qca/src/linux-4.4/net/rmnet_data/rmnet_data_main.c */ +/* set at qsdk/qca/src/data-kernel/drivers/rmnet-nss/rmnet_nss.c */ +extern struct rmnet_nss_cb *rmnet_nss_callbacks __rcu __read_mostly; +#endif +#endif + +/* This driver supports wwan (3G/LTE/?) devices using a vendor + * specific management protocol called Qualcomm MSM Interface (QMI) - + * in addition to the more common AT commands over serial interface + * management + * + * QMI is wrapped in CDC, using CDC encapsulated commands on the + * control ("master") interface of a two-interface CDC Union + * resembling standard CDC ECM. The devices do not use the control + * interface for any other CDC messages. Most likely because the + * management protocol is used in place of the standard CDC + * notifications NOTIFY_NETWORK_CONNECTION and NOTIFY_SPEED_CHANGE + * + * Alternatively, control and data functions can be combined in a + * single USB interface. + * + * Handling a protocol like QMI is out of the scope for any driver. + * It is exported as a character device using the cdc-wdm driver as + * a subdriver, enabling userspace applications ("modem managers") to + * handle it. + * + * These devices may alternatively/additionally be configured using AT + * commands on a serial interface + */ +#define VERSION_NUMBER "V1.2.2" +#define QUECTEL_WWAN_VERSION "Quectel_Linux&Android_QMI_WWAN_Driver_"VERSION_NUMBER +static const char driver_name[] = "qmi_wwan_q"; + +/* driver specific data */ +struct qmi_wwan_state { + struct usb_driver *subdriver; + atomic_t pmcount; + unsigned long unused; + struct usb_interface *control; + struct usb_interface *data; +}; + +/* default ethernet address used by the modem */ +static const u8 default_modem_addr[ETH_ALEN] = {0x02, 0x50, 0xf3}; + +#if 1 //Added by Quectel +/* + Quectel_WCDMA<E_Linux_USB_Driver_User_Guide_V1.9.pdf + 5.6. Test QMAP on GobiNet or QMI WWAN + 0 - no QMAP + 1 - QMAP (Aggregation protocol) + X - QMAP (Multiplexing and Aggregation protocol) +*/ +#define QUECTEL_WWAN_QMAP 4 //MAX is 7 + +#if defined(QUECTEL_WWAN_QMAP) +#define QUECTEL_QMAP_MUX_ID 0x81 + +static uint __read_mostly qmap_mode = 0; +module_param( qmap_mode, uint, S_IRUGO); +module_param_named( rx_qmap, qmap_mode, uint, S_IRUGO ); +#endif + +#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) || defined(CONFIG_BRIDGE_LAN) +#define QUECTEL_BRIDGE_MODE +#endif + +#ifdef QUECTEL_BRIDGE_MODE +static uint __read_mostly bridge_mode = 0/*|BIT(1)*/; +module_param( bridge_mode, uint, S_IRUGO ); +#endif + +#ifdef CONFIG_BRIDGE_LAN +static const u8 broadcast_mac_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; +#endif + +//#define QMI_NETDEV_ONE_CARD_MODE +static uint __read_mostly one_card_mode = 0; + +#if defined(QUECTEL_WWAN_QMAP) +#define QUECTEL_UL_DATA_AGG 1 + +#if defined(QUECTEL_UL_DATA_AGG) +struct tx_agg_ctx { + /* QMIWDS_ADMIN_SET_DATA_FORMAT_RESP TLV_0x17 and TLV_0x18 */ + uint ul_data_aggregation_max_datagrams; //UplinkDataAggregationMaxDatagramsTlv + uint ul_data_aggregation_max_size; //UplinkDataAggregationMaxSizeTlv + uint dl_minimum_padding; //0x1A +}; +#endif + +typedef struct { + unsigned int size; + unsigned int rx_urb_size; + unsigned int ep_type; + unsigned int iface_id; + unsigned int qmap_mode; + unsigned int qmap_version; + unsigned int dl_minimum_padding; + char ifname[8][16]; + unsigned char mux_id[8]; +} RMNET_INFO; + +typedef struct sQmiWwanQmap +{ + struct usbnet *mpNetDev; + struct driver_info driver_info; + atomic_t refcount; + struct net_device *mpQmapNetDev[QUECTEL_WWAN_QMAP]; + uint link_state; + uint qmap_mode; + uint qmap_size; + uint qmap_version; + +#if defined(QUECTEL_UL_DATA_AGG) + struct tx_agg_ctx tx_ctx; + struct tasklet_struct txq; + struct tasklet_struct usbnet_bh; +#endif + +#ifdef QUECTEL_BRIDGE_MODE + uint bridge_mode; + uint bridge_ipv4; + unsigned char bridge_mac[ETH_ALEN]; +#ifdef CONFIG_BRIDGE_LAN + unsigned char bridge_self_mac[ETH_ALEN]; +#endif +#endif + uint use_rmnet_usb; + RMNET_INFO rmnet_info; +} sQmiWwanQmap; + +#if LINUX_VERSION_CODE > KERNEL_VERSION(3,13,0) //8f84985fec10de64a6b4cdfea45f2b0ab8f07c78 +#define MHI_NETDEV_STATUS64 +#endif +struct qmap_priv { + struct usbnet *dev; + struct net_device *real_dev; + struct net_device *self_dev; + u8 offset_id; + u8 mux_id; + u8 qmap_version; // 5~v1, 9~v5 + u8 link_state; + +#if defined(MHI_NETDEV_STATUS64) + struct pcpu_sw_netstats __percpu *stats64; +#endif + + spinlock_t agg_lock; + struct sk_buff *agg_skb; + unsigned agg_count; + struct timespec64 agg_time; + struct hrtimer agg_hrtimer; + struct work_struct agg_wq; + +#ifdef QUECTEL_BRIDGE_MODE + uint bridge_mode; + uint bridge_ipv4; + unsigned char bridge_mac[ETH_ALEN]; +#ifdef CONFIG_BRIDGE_LAN + unsigned char bridge_self_mac[ETH_ALEN]; +#endif +#endif + uint use_qca_nss; +}; + +struct qmap_hdr { + u8 cd_rsvd_pad; + u8 mux_id; + u16 pkt_len; +} __packed; + +enum rmnet_map_v5_header_type { + RMNET_MAP_HEADER_TYPE_UNKNOWN, + RMNET_MAP_HEADER_TYPE_COALESCING = 0x1, + RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD = 0x2, + RMNET_MAP_HEADER_TYPE_ENUM_LENGTH +}; + +/* Main QMAP header */ +struct rmnet_map_header { +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 pad_len:6; + u8 next_hdr:1; + u8 cd_bit:1; +#elif defined (__BIG_ENDIAN_BITFIELD) + u8 cd_bit:1; + u8 next_hdr:1; + u8 pad_len:6; +#else +#error "Please fix " +#endif + u8 mux_id; + __be16 pkt_len; +} __aligned(1); + +/* QMAP v5 headers */ +struct rmnet_map_v5_csum_header { +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 next_hdr:1; + u8 header_type:7; + u8 hw_reserved:7; + u8 csum_valid_required:1; +#elif defined (__BIG_ENDIAN_BITFIELD) + u8 header_type:7; + u8 next_hdr:1; + u8 csum_valid_required:1; + u8 hw_reserved:7; +#else +#error "Please fix " +#endif + __be16 reserved; +} __aligned(1); + +#ifdef QUECTEL_BRIDGE_MODE +static int is_qmap_netdev(const struct net_device *netdev); +#endif +#endif + +static const struct driver_info rmnet_usb_info; + +#ifdef QUECTEL_BRIDGE_MODE +static int bridge_arp_reply(struct net_device *net, struct sk_buff *skb, uint bridge_ipv4) { + struct arphdr *parp; + u8 *arpptr, *sha; + u8 sip[4], tip[4], ipv4[4]; + struct sk_buff *reply = NULL; + + ipv4[0] = (bridge_ipv4 >> 24) & 0xFF; + ipv4[1] = (bridge_ipv4 >> 16) & 0xFF; + ipv4[2] = (bridge_ipv4 >> 8) & 0xFF; + ipv4[3] = (bridge_ipv4 >> 0) & 0xFF; + + parp = arp_hdr(skb); + + if (parp->ar_hrd == htons(ARPHRD_ETHER) && parp->ar_pro == htons(ETH_P_IP) + && parp->ar_op == htons(ARPOP_REQUEST) && parp->ar_hln == 6 && parp->ar_pln == 4) { + arpptr = (u8 *)parp + sizeof(struct arphdr); + sha = arpptr; + arpptr += net->addr_len; /* sha */ + memcpy(sip, arpptr, sizeof(sip)); + arpptr += sizeof(sip); + arpptr += net->addr_len; /* tha */ + memcpy(tip, arpptr, sizeof(tip)); + + pr_info("%s sip = %d.%d.%d.%d, tip=%d.%d.%d.%d, ipv4=%d.%d.%d.%d\n", netdev_name(net), + sip[0], sip[1], sip[2], sip[3], tip[0], tip[1], tip[2], tip[3], ipv4[0], ipv4[1], ipv4[2], ipv4[3]); + //wwan0 sip = 10.151.137.255, tip=10.151.138.0, ipv4=10.151.137.255 + if (tip[0] == ipv4[0] && tip[1] == ipv4[1] && (tip[2]&0xFC) == (ipv4[2]&0xFC) && tip[3] != ipv4[3]) + reply = arp_create(ARPOP_REPLY, ETH_P_ARP, *((__be32 *)sip), net, *((__be32 *)tip), sha, default_modem_addr, sha); + + if (reply) { + skb_reset_mac_header(reply); + __skb_pull(reply, skb_network_offset(reply)); + reply->ip_summed = CHECKSUM_UNNECESSARY; + reply->pkt_type = PACKET_HOST; + + netif_rx_ni(reply); + } + return 1; + } + + return 0; +} + +static struct sk_buff *bridge_mode_tx_fixup(struct net_device *net, struct sk_buff *skb, uint bridge_ipv4, unsigned char *bridge_mac) { + struct ethhdr *ehdr; + const struct iphdr *iph; + + skb_reset_mac_header(skb); + ehdr = eth_hdr(skb); + + if (ehdr->h_proto == htons(ETH_P_ARP)) { + if (bridge_ipv4) + bridge_arp_reply(net, skb, bridge_ipv4); + return NULL; + } + + iph = ip_hdr(skb); + //DBG("iphdr: "); + //PrintHex((void *)iph, sizeof(struct iphdr)); + +// 1 0.000000000 0.0.0.0 255.255.255.255 DHCP 362 DHCP Request - Transaction ID 0xe7643ad7 + if (ehdr->h_proto == htons(ETH_P_IP) && iph->protocol == IPPROTO_UDP && iph->saddr == 0x00000000 && iph->daddr == 0xFFFFFFFF) { + //if (udp_hdr(skb)->dest == htons(67)) //DHCP Request + { + memcpy(bridge_mac, ehdr->h_source, ETH_ALEN); + pr_info("%s PC Mac Address: %02x:%02x:%02x:%02x:%02x:%02x\n", netdev_name(net), + bridge_mac[0], bridge_mac[1], bridge_mac[2], bridge_mac[3], bridge_mac[4], bridge_mac[5]); + } + } + +#ifdef CONFIG_BRIDGE_LAN + //bridge Lan IP 192.168.0.0 + if (ehdr->h_proto == htons(ETH_P_IP) && (iph->daddr & 0xFFFF) == 0xA8C0) + { + struct sk_buff *reply = skb_copy(skb, GFP_ATOMIC); + ehdr = eth_hdr(reply); + + memcpy(ehdr->h_source, default_modem_addr, ETH_ALEN); + if(is_qmap_netdev(net)) + { + struct qmap_priv *priv = netdev_priv(net); + memcpy(ehdr->h_dest, priv->bridge_self_mac, ETH_ALEN); + } + else + { + struct usbnet * usbnetdev = netdev_priv(net); + struct qmi_wwan_state *info = (void *)&usbnetdev->data; + sQmiWwanQmap *pQmapDev = (sQmiWwanQmap *)info->unused; + memcpy(ehdr->h_dest, pQmapDev->bridge_self_mac, ETH_ALEN); + } + + //pr_info("%s br rx pkt addr: %02x:%02x:%02x:%02x:%02x:%02x -> %02x:%02x:%02x:%02x:%02x:%02x\n", netdev_name(net), + // ehdr->h_source[0], ehdr->h_source[1], ehdr->h_source[2], ehdr->h_source[3], ehdr->h_source[4], ehdr->h_source[5], + // ehdr->h_dest[0], ehdr->h_dest[1], ehdr->h_dest[2], ehdr->h_dest[3], ehdr->h_dest[4], ehdr->h_dest[5]); + + skb_reset_mac_header(reply); + __skb_pull(reply, skb_network_offset(reply)); + reply->ip_summed = CHECKSUM_UNNECESSARY; + reply->pkt_type = PACKET_HOST; + netif_rx_ni(reply); + return NULL; + } +#endif + + if (memcmp(ehdr->h_source, bridge_mac, ETH_ALEN)) { + return NULL; + } + + return skb; +} + +static void bridge_mode_rx_fixup(sQmiWwanQmap *pQmapDev, struct net_device *net, struct sk_buff *skb) { + uint bridge_mode = 0; + unsigned char *bridge_mac; + + if (pQmapDev->qmap_mode > 1 || pQmapDev->use_rmnet_usb == 1) { + struct qmap_priv *priv = netdev_priv(net); + bridge_mode = priv->bridge_mode; + bridge_mac = priv->bridge_mac; + } + else { + bridge_mode = pQmapDev->bridge_mode; + bridge_mac = pQmapDev->bridge_mac; + } + + if (bridge_mode) + memcpy(eth_hdr(skb)->h_dest, bridge_mac, ETH_ALEN); + else + memcpy(eth_hdr(skb)->h_dest, net->dev_addr, ETH_ALEN); +} +#endif + +#if defined(QUECTEL_WWAN_QMAP) +static ssize_t qmap_mode_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct net_device *netdev = to_net_dev(dev); + struct usbnet * usbnetdev = netdev_priv( netdev ); + struct qmi_wwan_state *info = (void *)&usbnetdev->data; + sQmiWwanQmap *pQmapDev = (sQmiWwanQmap *)info->unused; + + return snprintf(buf, PAGE_SIZE, "%d\n", pQmapDev->qmap_mode); +} + +static DEVICE_ATTR(qmap_mode, S_IRUGO, qmap_mode_show, NULL); + +static ssize_t qmap_size_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct net_device *netdev = to_net_dev(dev); + struct usbnet * usbnetdev = netdev_priv( netdev ); + struct qmi_wwan_state *info = (void *)&usbnetdev->data; + sQmiWwanQmap *pQmapDev = (sQmiWwanQmap *)info->unused; + + return snprintf(buf, PAGE_SIZE, "%u\n", pQmapDev->qmap_size); +} + +static DEVICE_ATTR(qmap_size, S_IRUGO, qmap_size_show, NULL); + +static ssize_t link_state_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct net_device *netdev = to_net_dev(dev); + struct usbnet * usbnetdev = netdev_priv( netdev ); + struct qmi_wwan_state *info = (void *)&usbnetdev->data; + sQmiWwanQmap *pQmapDev = (sQmiWwanQmap *)info->unused; + + return snprintf(buf, PAGE_SIZE, "0x%x\n", pQmapDev->link_state); +} + +static ssize_t link_state_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { + struct net_device *netdev = to_net_dev(dev); + struct usbnet * usbnetdev = netdev_priv( netdev ); + struct qmi_wwan_state *info = (void *)&usbnetdev->data; + sQmiWwanQmap *pQmapDev = (sQmiWwanQmap *)info->unused; + unsigned link_state = 0; + unsigned old_link = pQmapDev->link_state; + uint offset_id = 0; + + link_state = simple_strtoul(buf, NULL, 0); + + if (pQmapDev->qmap_mode == 1) { + pQmapDev->link_state = !!link_state; + } + else if (pQmapDev->qmap_mode > 1) { + offset_id = ((link_state&0x7F) - 1); + + if (offset_id >= pQmapDev->qmap_mode) { + dev_info(dev, "%s offset_id is %d. but qmap_mode is %d\n", __func__, offset_id, pQmapDev->qmap_mode); + return count; + } + + if (link_state&0x80) + pQmapDev->link_state &= ~(1 << offset_id); + else + pQmapDev->link_state |= (1 << offset_id); + } + + if (old_link != pQmapDev->link_state) { + struct net_device *qmap_net = pQmapDev->mpQmapNetDev[offset_id]; + + if (usbnetdev->net->flags & IFF_UP) { + if (pQmapDev->link_state) { + netif_carrier_on(usbnetdev->net); + } + } + + if (qmap_net && qmap_net != netdev) { + struct qmap_priv *priv = netdev_priv(qmap_net); + + priv->link_state = !!(pQmapDev->link_state & (1 << offset_id)); + + if (qmap_net->flags & IFF_UP) { + if (priv->link_state) { + netif_carrier_on(qmap_net); + if (netif_queue_stopped(qmap_net) && !netif_queue_stopped(usbnetdev->net)) + netif_wake_queue(qmap_net); + } + else { + netif_carrier_off(qmap_net); + } + } + } + + if (usbnetdev->net->flags & IFF_UP) { + if (!pQmapDev->link_state) { + netif_carrier_off(usbnetdev->net); + } + } + + dev_info(dev, "link_state 0x%x -> 0x%x\n", old_link, pQmapDev->link_state); + } + + return count; +} + +#ifdef QUECTEL_BRIDGE_MODE +static ssize_t bridge_mode_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { + struct net_device *netdev = to_net_dev(dev); + uint old_mode = 0; + uint bridge_mode = simple_strtoul(buf, NULL, 0); + + if (netdev->type != ARPHRD_ETHER) { + return count; + } + + if (is_qmap_netdev(netdev)) { + struct qmap_priv *priv = netdev_priv(netdev); + old_mode = priv->bridge_mode; + priv->bridge_mode = bridge_mode; + } + else { + struct usbnet * usbnetdev = netdev_priv( netdev ); + struct qmi_wwan_state *info = (void *)&usbnetdev->data; + sQmiWwanQmap *pQmapDev = (sQmiWwanQmap *)info->unused; + old_mode = pQmapDev->bridge_mode; + pQmapDev->bridge_mode = bridge_mode; + } + + if (old_mode != bridge_mode) { + dev_info(dev, "bridge_mode change to 0x%x\n", bridge_mode); + } + + return count; +} + +static ssize_t bridge_mode_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct net_device *netdev = to_net_dev(dev); + uint bridge_mode = 0; + + if (is_qmap_netdev(netdev)) { + struct qmap_priv *priv = netdev_priv(netdev); + bridge_mode = priv->bridge_mode; + } + else { + struct usbnet * usbnetdev = netdev_priv( netdev ); + struct qmi_wwan_state *info = (void *)&usbnetdev->data; + sQmiWwanQmap *pQmapDev = (sQmiWwanQmap *)info->unused; + bridge_mode = pQmapDev->bridge_mode; + } + + return snprintf(buf, PAGE_SIZE, "%u\n", bridge_mode); +} + +static ssize_t bridge_ipv4_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct net_device *netdev = to_net_dev(dev); + unsigned int bridge_ipv4 = 0; + unsigned char ipv4[4]; + + if (is_qmap_netdev(netdev)) { + struct qmap_priv *priv = netdev_priv(netdev); + bridge_ipv4 = priv->bridge_ipv4; + } + else { + struct usbnet * usbnetdev = netdev_priv( netdev ); + struct qmi_wwan_state *info = (void *)&usbnetdev->data; + sQmiWwanQmap *pQmapDev = (sQmiWwanQmap *)info->unused; + bridge_ipv4 = pQmapDev->bridge_ipv4; + } + + ipv4[0] = (bridge_ipv4 >> 24) & 0xFF; + ipv4[1] = (bridge_ipv4 >> 16) & 0xFF; + ipv4[2] = (bridge_ipv4 >> 8) & 0xFF; + ipv4[3] = (bridge_ipv4 >> 0) & 0xFF; + + return snprintf(buf, PAGE_SIZE, "%d.%d.%d.%d\n", ipv4[0], ipv4[1], ipv4[2], ipv4[3]); +} + +static ssize_t bridge_ipv4_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { + struct net_device *netdev = to_net_dev(dev); + + if (is_qmap_netdev(netdev)) { + struct qmap_priv *priv = netdev_priv(netdev); + priv->bridge_ipv4 = simple_strtoul(buf, NULL, 16); + } + else { + struct usbnet * usbnetdev = netdev_priv( netdev ); + struct qmi_wwan_state *info = (void *)&usbnetdev->data; + sQmiWwanQmap *pQmapDev = (sQmiWwanQmap *)info->unused; + pQmapDev->bridge_ipv4 = simple_strtoul(buf, NULL, 16); + } + + return count; +} +#endif + +static DEVICE_ATTR(link_state, S_IWUSR | S_IRUGO, link_state_show, link_state_store); +#ifdef QUECTEL_BRIDGE_MODE +static DEVICE_ATTR(bridge_mode, S_IWUSR | S_IRUGO, bridge_mode_show, bridge_mode_store); +static DEVICE_ATTR(bridge_ipv4, S_IWUSR | S_IRUGO, bridge_ipv4_show, bridge_ipv4_store); +#endif + +static struct attribute *qmi_wwan_sysfs_attrs[] = { + &dev_attr_link_state.attr, + &dev_attr_qmap_mode.attr, + &dev_attr_qmap_size.attr, +#ifdef QUECTEL_BRIDGE_MODE + &dev_attr_bridge_mode.attr, + &dev_attr_bridge_ipv4.attr, +#endif + NULL, +}; + +static struct attribute_group qmi_wwan_sysfs_attr_group = { + .attrs = qmi_wwan_sysfs_attrs, +}; + +#ifdef QUECTEL_BRIDGE_MODE +static struct attribute *qmi_qmap_sysfs_attrs[] = { + &dev_attr_bridge_mode.attr, + &dev_attr_bridge_ipv4.attr, + NULL, +}; + +static struct attribute_group qmi_qmap_sysfs_attr_group = { + .attrs = qmi_qmap_sysfs_attrs, +}; +#endif + +static int qmap_open(struct net_device *qmap_net) +{ + struct qmap_priv *priv = netdev_priv(qmap_net); + struct net_device *real_dev = priv->real_dev; + + //printk("%s %s real_dev %d %d %d %d+++\n", __func__, dev->name, + // netif_carrier_ok(real_dev), netif_queue_stopped(real_dev), netif_carrier_ok(dev), netif_queue_stopped(dev)); + + if (!(priv->real_dev->flags & IFF_UP)) + return -ENETDOWN; + + if (priv->link_state) { + netif_carrier_on(real_dev); + netif_carrier_on(qmap_net); + if (netif_queue_stopped(qmap_net) && !netif_queue_stopped(real_dev)) + netif_wake_queue(qmap_net); + } + //printk("%s %s real_dev %d %d %d %d---\n", __func__, dev->name, + // netif_carrier_ok(real_dev), netif_queue_stopped(real_dev), netif_carrier_ok(dev), netif_queue_stopped(dev)); + + return 0; +} + +static int qmap_stop(struct net_device *qmap_net) +{ + //printk("%s %s %d %d+++\n", __func__, dev->name, + // netif_carrier_ok(dev), netif_queue_stopped(dev)); + + netif_carrier_off(qmap_net); + return 0; +} + +static void qmap_wake_queue(sQmiWwanQmap *pQmapDev) +{ + uint i = 0; + + if (!pQmapDev || !pQmapDev->use_rmnet_usb) + return; + + for (i = 0; i < pQmapDev->qmap_mode; i++) { + struct net_device *qmap_net = pQmapDev->mpQmapNetDev[i]; + + if (qmap_net && netif_carrier_ok(qmap_net) && netif_queue_stopped(qmap_net)) { + netif_wake_queue(qmap_net); + } + } +} + +static struct sk_buff * add_qhdr(struct sk_buff *skb, u8 mux_id) { + struct qmap_hdr *qhdr; + int pad = 0; + + pad = skb->len%4; + if (pad) { + pad = 4 - pad; + if (skb_tailroom(skb) < pad) { + printk("skb_tailroom small!\n"); + pad = 0; + } + if (pad) + __skb_put(skb, pad); + } + + qhdr = (struct qmap_hdr *)skb_push(skb, sizeof(struct qmap_hdr)); + qhdr->cd_rsvd_pad = pad; + qhdr->mux_id = mux_id; + qhdr->pkt_len = cpu_to_be16(skb->len - sizeof(struct qmap_hdr)); + + return skb; +} + +static struct sk_buff * add_qhdr_v5(struct sk_buff *skb, u8 mux_id) { + struct rmnet_map_header *map_header; + struct rmnet_map_v5_csum_header *ul_header; + u32 padding, map_datalen; + + map_datalen = skb->len; + padding = map_datalen%4; + if (padding) { + padding = 4 - padding; + if (skb_tailroom(skb) < padding) { + printk("skb_tailroom small!\n"); + padding = 0; + } + if (padding) + __skb_put(skb, padding); + } + + map_header = (struct rmnet_map_header *)skb_push(skb, (sizeof(struct rmnet_map_header) + sizeof(struct rmnet_map_v5_csum_header))); + map_header->cd_bit = 0; + map_header->next_hdr = 1; + map_header->pad_len = padding; + map_header->mux_id = mux_id; + map_header->pkt_len = htons(map_datalen + padding); + + ul_header = (struct rmnet_map_v5_csum_header *)(map_header + 1); + memset(ul_header, 0, sizeof(*ul_header)); + ul_header->header_type = RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD; + if (skb->ip_summed == CHECKSUM_PARTIAL) { +#if 0 //TODO + skb->ip_summed = CHECKSUM_NONE; + /* Ask for checksum offloading */ + ul_header->csum_valid_required = 1; +#endif + } + + return skb; +} + +static void rmnet_vnd_update_rx_stats(struct net_device *net, + unsigned rx_packets, unsigned rx_bytes) { +#if defined(MHI_NETDEV_STATUS64) + struct qmap_priv *dev = netdev_priv(net); + struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64); + + u64_stats_update_begin(&stats64->syncp); + stats64->rx_packets += rx_packets; + stats64->rx_bytes += rx_bytes; + u64_stats_update_end(&stats64->syncp); +#else + net->stats.rx_packets += rx_packets; + net->stats.rx_bytes += rx_bytes; +#endif +} + +static void rmnet_vnd_update_tx_stats(struct net_device *net, + unsigned tx_packets, unsigned tx_bytes) { +#if defined(MHI_NETDEV_STATUS64) + struct qmap_priv *dev = netdev_priv(net); + struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64); + + u64_stats_update_begin(&stats64->syncp); + stats64->tx_packets += tx_packets; + stats64->tx_bytes += tx_bytes; + u64_stats_update_end(&stats64->syncp); +#else + net->stats.tx_packets += tx_packets; + net->stats.tx_bytes += tx_bytes; +#endif +} + +#if defined(MHI_NETDEV_STATUS64) +static struct rtnl_link_stats64 *_rmnet_vnd_get_stats64(struct net_device *net, struct rtnl_link_stats64 *stats) +{ + struct qmap_priv *dev = netdev_priv(net); + unsigned int start; + int cpu; + + netdev_stats_to_stats64(stats, &net->stats); + + if (nss_cb && dev->use_qca_nss) { // rmnet_nss.c:rmnet_nss_tx() will update rx stats + stats->rx_packets = 0; + stats->rx_bytes = 0; + } + + for_each_possible_cpu(cpu) { + struct pcpu_sw_netstats *stats64; + u64 rx_packets, rx_bytes; + u64 tx_packets, tx_bytes; + + stats64 = per_cpu_ptr(dev->stats64, cpu); + + do { + start = u64_stats_fetch_begin_irq(&stats64->syncp); + rx_packets = stats64->rx_packets; + rx_bytes = stats64->rx_bytes; + tx_packets = stats64->tx_packets; + tx_bytes = stats64->tx_bytes; + } while (u64_stats_fetch_retry_irq(&stats64->syncp, start)); + + stats->rx_packets += rx_packets; + stats->rx_bytes += rx_bytes; + stats->tx_packets += tx_packets; + stats->tx_bytes += tx_bytes; + } + + return stats; +} + +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 4,10,0 )) //bc1f44709cf27fb2a5766cadafe7e2ad5e9cb221 +static void rmnet_vnd_get_stats64(struct net_device *net, struct rtnl_link_stats64 *stats) { + _rmnet_vnd_get_stats64(net, stats); +} +#else +static struct rtnl_link_stats64 *rmnet_vnd_get_stats64(struct net_device *net, struct rtnl_link_stats64 *stats) { + return _rmnet_vnd_get_stats64(net, stats); +} +#endif +#endif + +#if defined(QUECTEL_UL_DATA_AGG) +static void usbnet_bh(unsigned long data) { + sQmiWwanQmap *pQmapDev = (sQmiWwanQmap *)data; + struct tasklet_struct *t = &pQmapDev->usbnet_bh; + bool use_callback = false; + +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 5,8,0 )) //c955e329bb9d44fab75cf2116542fcc0de0473c5 + use_callback = t->use_callback; + if (use_callback) + t->callback(&pQmapDev->mpNetDev->bh); +#endif + + if (!use_callback) + t->func(t->data); + + if (!netif_queue_stopped(pQmapDev->mpNetDev->net)) { + qmap_wake_queue((sQmiWwanQmap *)data); + } +} + +static void rmnet_usb_tx_wake_queue(unsigned long data) { + qmap_wake_queue((sQmiWwanQmap *)data); +} + +#if 0 +static void rmnet_usb_tx_skb_destructor(struct sk_buff *skb) { + struct net_device *net = skb->dev; + struct usbnet * dev = netdev_priv( net ); + struct qmi_wwan_state *info = (void *)&dev->data; + sQmiWwanQmap *pQmapDev = (sQmiWwanQmap *)info->unused; + + if (pQmapDev && pQmapDev->use_rmnet_usb) { + int i; + + for (i = 0; i < pQmapDev->qmap_mode; i++) { + struct net_device *qmap_net = pQmapDev->mpQmapNetDev[i]; + + if (qmap_net && netif_carrier_ok(qmap_net) && netif_queue_stopped(qmap_net)) { + tasklet_schedule(&pQmapDev->txq); + break; + } + } + } +} +#endif + +static int rmnet_usb_tx_agg_skip(struct sk_buff *skb, int offset) +{ + u8 *packet_start = skb->data + offset; + int ready2send = 0; + + if (skb->protocol == htons(ETH_P_IP)) { + struct iphdr *ip4h = (struct iphdr *)(packet_start); + + if (ip4h->protocol == IPPROTO_TCP) { + const struct tcphdr *th = (const struct tcphdr *)(packet_start + sizeof(struct iphdr)); + if (th->psh) { + ready2send = 1; + } + } + else if (ip4h->protocol == IPPROTO_ICMP) + ready2send = 1; + + } else if (skb->protocol == htons(ETH_P_IPV6)) { + struct ipv6hdr *ip6h = (struct ipv6hdr *)(packet_start); + + if (ip6h->nexthdr == NEXTHDR_TCP) { + const struct tcphdr *th = (const struct tcphdr *)(packet_start + sizeof(struct ipv6hdr)); + if (th->psh) { + ready2send = 1; + } + } else if (ip6h->nexthdr == NEXTHDR_ICMP) { + ready2send = 1; + } else if (ip6h->nexthdr == NEXTHDR_FRAGMENT) { + struct frag_hdr *frag; + + frag = (struct frag_hdr *)(packet_start + + sizeof(struct ipv6hdr)); + if (frag->nexthdr == IPPROTO_ICMPV6) + ready2send = 1; + } + } + + return ready2send; +} + +static void rmnet_usb_tx_agg_work(struct work_struct *work) +{ + struct qmap_priv *priv = + container_of(work, struct qmap_priv, agg_wq); + struct sk_buff *skb = NULL; + unsigned long flags; + + spin_lock_irqsave(&priv->agg_lock, flags); + if (likely(priv->agg_skb)) { + skb = priv->agg_skb; + priv->agg_skb = NULL; + priv->agg_count = 0; + skb->protocol = htons(ETH_P_MAP); + skb->dev = priv->real_dev; + ktime_get_ts64(&priv->agg_time); + } + spin_unlock_irqrestore(&priv->agg_lock, flags); + + if (skb) { + int err; +#if 0 + if (!skb->destructor) + skb->destructor = rmnet_usb_tx_skb_destructor; +#endif + err = dev_queue_xmit(skb); + if (err != NET_XMIT_SUCCESS) { + priv->self_dev->stats.tx_errors++; + } + } +} + +static enum hrtimer_restart rmnet_usb_tx_agg_timer_cb(struct hrtimer *timer) +{ + struct qmap_priv *priv = + container_of(timer, struct qmap_priv, agg_hrtimer); + + schedule_work(&priv->agg_wq); + return HRTIMER_NORESTART; +} + +static long agg_time_limit __read_mostly = 1000000L; //reduce this time, can get better TPUT performance, but will increase USB interrupts +module_param(agg_time_limit, long, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(agg_time_limit, "Maximum time packets sit in the agg buf"); + +static long agg_bypass_time __read_mostly = 10000000L; +module_param(agg_bypass_time, long, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(agg_bypass_time, "Skip agg when apart spaced more than this"); + +static int rmnet_usb_tx_agg(struct sk_buff *skb, struct qmap_priv *priv) { + struct qmi_wwan_state *info = (void *)&priv->dev->data; + sQmiWwanQmap *pQmapDev = (sQmiWwanQmap *)info->unused; + struct tx_agg_ctx *ctx = &pQmapDev->tx_ctx; + int ready2send = 0; + int xmit_more = 0; + struct timespec64 diff, now; + struct sk_buff *agg_skb = NULL; + unsigned long flags; + int err; + struct net_device *pNet = priv->self_dev; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5,1,0) //6b16f9ee89b8d5709f24bc3ac89ae8b5452c0d7c +#if LINUX_VERSION_CODE > KERNEL_VERSION(3,16,0) + xmit_more = skb->xmit_more; +#endif +#else + xmit_more = netdev_xmit_more(); +#endif + + rmnet_vnd_update_tx_stats(pNet, 1, skb->len); + + if (ctx->ul_data_aggregation_max_datagrams == 1) { + skb->protocol = htons(ETH_P_MAP); + skb->dev = priv->real_dev; +#if 0 + if (!skb->destructor) + skb->destructor = rmnet_usb_tx_skb_destructor; +#endif + err = dev_queue_xmit(skb); + if (err != NET_XMIT_SUCCESS) + pNet->stats.tx_errors++; + return NET_XMIT_SUCCESS; + } + +new_packet: + spin_lock_irqsave(&priv->agg_lock, flags); + agg_skb = NULL; + ready2send = 0; + ktime_get_ts64(&now); + diff = timespec64_sub(now, priv->agg_time); + + if (priv->agg_skb) { + if ((priv->agg_skb->len + skb->len) < ctx->ul_data_aggregation_max_size) { + memcpy(skb_put(priv->agg_skb, skb->len), skb->data, skb->len); + priv->agg_count++; + + if (diff.tv_sec > 0 || diff.tv_nsec > agg_time_limit) { + ready2send = 1; + } + else if (priv->agg_count == ctx->ul_data_aggregation_max_datagrams) { + ready2send = 1; + } + else if (xmit_more == 0) { + struct rmnet_map_header *map_header = (struct rmnet_map_header *)skb->data; + size_t offset = sizeof(struct rmnet_map_header); + if (map_header->next_hdr) + offset += sizeof(struct rmnet_map_v5_csum_header); + + ready2send = rmnet_usb_tx_agg_skip(skb, offset); + } + + dev_kfree_skb_any(skb); + skb = NULL; + } + else { + ready2send = 1; + } + + if (ready2send) { + agg_skb = priv->agg_skb; + priv->agg_skb = NULL; + priv->agg_count = 0; + } + } + else if (skb) { + if (diff.tv_sec > 0 || diff.tv_nsec > agg_bypass_time) { + ready2send = 1; + } + else if (xmit_more == 0) { + struct rmnet_map_header *map_header = (struct rmnet_map_header *)skb->data; + size_t offset = sizeof(struct rmnet_map_header); + if (map_header->next_hdr) + offset += sizeof(struct rmnet_map_v5_csum_header); + + ready2send = rmnet_usb_tx_agg_skip(skb, offset); + } + + if (ready2send == 0) { + priv->agg_skb = alloc_skb(ctx->ul_data_aggregation_max_size, GFP_ATOMIC); + if (priv->agg_skb) { + skb_reset_network_header(priv->agg_skb); //protocol da1a is buggy, dev wwan0 + memcpy(skb_put(priv->agg_skb, skb->len), skb->data, skb->len); + priv->agg_count++; + dev_kfree_skb_any(skb); + skb = NULL; + } + else { + ready2send = 1; + } + } + + if (ready2send) { + agg_skb = skb; + skb = NULL; + } + } + + if (ready2send) { + priv->agg_time = now; + } + spin_unlock_irqrestore(&priv->agg_lock, flags); + + if (agg_skb) { + agg_skb->protocol = htons(ETH_P_MAP); + agg_skb->dev = priv->real_dev; +#if 0 + if (!agg_skb->destructor) + agg_skb->destructor = rmnet_usb_tx_skb_destructor; +#endif + err = dev_queue_xmit(agg_skb); + if (err != NET_XMIT_SUCCESS) { + pNet->stats.tx_errors++; + } + } + + if (skb) { + goto new_packet; + } + + if (priv->agg_skb) { + if (!hrtimer_is_queued(&priv->agg_hrtimer)) + hrtimer_start(&priv->agg_hrtimer, ns_to_ktime(NSEC_PER_MSEC * 2), HRTIMER_MODE_REL); + } + + return NET_XMIT_SUCCESS; +} +#endif + +static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb, + struct net_device *pNet) +{ + int err; + struct qmap_priv *priv = netdev_priv(pNet); + + if (netif_queue_stopped(priv->real_dev)) { + netif_stop_queue(pNet); + return NETDEV_TX_BUSY; + } + + //printk("%s 1 skb=%p, len=%d, protocol=%x, hdr_len=%d\n", __func__, skb, skb->len, skb->protocol, skb->hdr_len); + if (pNet->type == ARPHRD_ETHER) { + skb_reset_mac_header(skb); + +#ifdef QUECTEL_BRIDGE_MODE + if (priv->bridge_mode && bridge_mode_tx_fixup(pNet, skb, priv->bridge_ipv4, priv->bridge_mac) == NULL) { + dev_kfree_skb_any (skb); + return NETDEV_TX_OK; + } +#endif + + if (skb_pull(skb, ETH_HLEN) == NULL) { + dev_kfree_skb_any (skb); + return NETDEV_TX_OK; + } + } + //printk("%s 2 skb=%p, len=%d, protocol=%x, hdr_len=%d\n", __func__, skb, skb->len, skb->protocol, skb->hdr_len); + + if (priv->qmap_version == 5) { + add_qhdr(skb, priv->mux_id); + } + else if (priv->qmap_version == 9) { + add_qhdr_v5(skb, priv->mux_id); + } + else { + dev_kfree_skb_any (skb); + return NETDEV_TX_OK; + } + //printk("%s skb=%p, len=%d, protocol=%x, hdr_len=%d\n", __func__, skb, skb->len, skb->protocol, skb->hdr_len); + + err = rmnet_usb_tx_agg(skb, priv); + + return err; +} + +static int rmnet_vnd_change_mtu(struct net_device *rmnet_dev, int new_mtu) +{ + if (new_mtu < 0) + return -EINVAL; + + if (new_mtu > 1500) + printk("warning, set mtu greater than 1500, %d\n", new_mtu); + + rmnet_dev->mtu = new_mtu; + return 0; +} + +/* drivers may override default ethtool_ops in their bind() routine */ +static const struct ethtool_ops rmnet_vnd_ethtool_ops = { + .get_link = ethtool_op_get_link, +}; + +static const struct net_device_ops rmnet_vnd_ops = { + .ndo_open = qmap_open, + .ndo_stop = qmap_stop, + .ndo_start_xmit = rmnet_vnd_start_xmit, + .ndo_change_mtu = rmnet_vnd_change_mtu, +#if defined(MHI_NETDEV_STATUS64) + .ndo_get_stats64 = rmnet_vnd_get_stats64, +#endif +}; + +static void rmnet_usb_ether_setup(struct net_device *rmnet_dev) +{ + ether_setup(rmnet_dev); + + rmnet_dev->flags |= IFF_NOARP; + rmnet_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST); + +#if LINUX_VERSION_CODE > KERNEL_VERSION(4,10,0) + rmnet_dev->max_mtu = 65535; +#endif + + rmnet_dev->ethtool_ops = &rmnet_vnd_ethtool_ops; + rmnet_dev->netdev_ops = &rmnet_vnd_ops; +} + +static void rmnet_usb_rawip_setup(struct net_device *rmnet_dev) +{ + rmnet_dev->needed_headroom = 16; + + /* Raw IP mode */ + rmnet_dev->header_ops = NULL; /* No header */ + rmnet_dev->type = ARPHRD_RAWIP; + rmnet_dev->hard_header_len = 0; + rmnet_dev->flags |= IFF_NOARP; + rmnet_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST); + + rmnet_dev->ethtool_ops = &rmnet_vnd_ethtool_ops; + rmnet_dev->netdev_ops = &rmnet_vnd_ops; +} + +static rx_handler_result_t qca_nss_rx_handler(struct sk_buff **pskb) +{ + struct sk_buff *skb = *pskb; + + if (!skb) + return RX_HANDLER_CONSUMED; + + //printk("%s skb=%p, len=%d, protocol=%x, hdr_len=%d\n", __func__, skb, skb->len, skb->protocol, skb->hdr_len); + + if (skb->pkt_type == PACKET_LOOPBACK) + return RX_HANDLER_PASS; + + /* Check this so that we dont loop around netif_receive_skb */ + if (skb->cb[0] == 1) { + skb->cb[0] = 0; + + return RX_HANDLER_PASS; + } + + if (nss_cb) { + nss_cb->nss_tx(skb); + return RX_HANDLER_CONSUMED; + } + + return RX_HANDLER_PASS; +} + +static int qmap_register_device(sQmiWwanQmap * pDev, u8 offset_id) +{ + struct net_device *real_dev = pDev->mpNetDev->net; + struct net_device *qmap_net; + struct qmap_priv *priv; + int err; + char name[IFNAMSIZ]; + int use_qca_nss = !!nss_cb; + + sprintf(name, "%s_%d", real_dev->name, offset_id + 1); +#ifdef NET_NAME_UNKNOWN + qmap_net = alloc_netdev(sizeof(struct qmap_priv), name, + NET_NAME_UNKNOWN, rmnet_usb_ether_setup); +#else + qmap_net = alloc_netdev(sizeof(struct qmap_priv), name, + rmnet_usb_ether_setup); +#endif + if (!qmap_net) + return -ENOBUFS; + + SET_NETDEV_DEV(qmap_net, &real_dev->dev); + priv = netdev_priv(qmap_net); + priv->offset_id = offset_id; + priv->real_dev = real_dev; + priv->self_dev = qmap_net; + priv->dev = pDev->mpNetDev; + priv->qmap_version = pDev->qmap_version; + priv->mux_id = QUECTEL_QMAP_MUX_ID + offset_id; + memcpy (qmap_net->dev_addr, real_dev->dev_addr, ETH_ALEN); + +#ifdef QUECTEL_BRIDGE_MODE + priv->bridge_mode = !!(pDev->bridge_mode & BIT(offset_id)); + qmap_net->sysfs_groups[0] = &qmi_qmap_sysfs_attr_group; + if (priv->bridge_mode) + use_qca_nss = 0; +#ifdef CONFIG_BRIDGE_LAN + memcpy(priv->bridge_self_mac, broadcast_mac_addr, ETH_ALEN); +#endif +#endif + + if (nss_cb && use_qca_nss) { + rmnet_usb_rawip_setup(qmap_net); + } + + priv->agg_skb = NULL; + priv->agg_count = 0; + hrtimer_init(&priv->agg_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + priv->agg_hrtimer.function = rmnet_usb_tx_agg_timer_cb; + INIT_WORK(&priv->agg_wq, rmnet_usb_tx_agg_work); + ktime_get_ts64(&priv->agg_time); + spin_lock_init(&priv->agg_lock); + priv->use_qca_nss = 0; + +#if defined(MHI_NETDEV_STATUS64) + priv->stats64 = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); + if (!priv->stats64) { + err = -ENOBUFS; + goto out_free_newdev; + } +#endif + + err = register_netdev(qmap_net); + if (err) + dev_info(&real_dev->dev, "%s(%s)=%d\n", __func__, qmap_net->name, err); + if (err < 0) + goto out_free_newdev; + netif_device_attach (qmap_net); + netif_carrier_off(qmap_net); + + if (nss_cb && use_qca_nss) { + int rc = nss_cb->nss_create(qmap_net); + if (rc) { + /* Log, but don't fail the device creation */ + netdev_err(qmap_net, "Device will not use NSS path: %d\n", rc); + } else { + priv->use_qca_nss = 1; + netdev_info(qmap_net, "NSS context created\n"); + rtnl_lock(); + netdev_rx_handler_register(qmap_net, qca_nss_rx_handler, NULL); + rtnl_unlock(); + } + } + + strcpy(pDev->rmnet_info.ifname[offset_id], qmap_net->name); + pDev->rmnet_info.mux_id[offset_id] = priv->mux_id; + + pDev->mpQmapNetDev[offset_id] = qmap_net; + + dev_info(&real_dev->dev, "%s %s\n", __func__, qmap_net->name); + + return 0; + +out_free_newdev: + free_netdev(qmap_net); + return err; +} + +static void qmap_unregister_device(sQmiWwanQmap * pDev, u8 offset_id) { + struct net_device *qmap_net = pDev->mpQmapNetDev[offset_id]; + + if (qmap_net != NULL && qmap_net != pDev->mpNetDev->net) { + struct qmap_priv *priv = netdev_priv(qmap_net); + unsigned long flags; + + pr_info("qmap_unregister_device(%s)\n", qmap_net->name); + pDev->mpQmapNetDev[offset_id] = NULL; + netif_carrier_off( qmap_net ); + netif_stop_queue( qmap_net ); + + hrtimer_cancel(&priv->agg_hrtimer); + cancel_work_sync(&priv->agg_wq); + spin_lock_irqsave(&priv->agg_lock, flags); + if (priv->agg_skb) { + kfree_skb(priv->agg_skb); + } + spin_unlock_irqrestore(&priv->agg_lock, flags); + + if (nss_cb && priv->use_qca_nss) { + rtnl_lock(); + netdev_rx_handler_unregister(qmap_net); + rtnl_unlock(); + nss_cb->nss_free(qmap_net); + } + +#if defined(MHI_NETDEV_STATUS64) + free_percpu(priv->stats64); +#endif + unregister_netdev (qmap_net); + free_netdev(qmap_net); + } +} + +typedef struct { + unsigned int size; + unsigned int rx_urb_size; + unsigned int ep_type; + unsigned int iface_id; + unsigned int MuxId; + unsigned int ul_data_aggregation_max_datagrams; //0x17 + unsigned int ul_data_aggregation_max_size ;//0x18 + unsigned int dl_minimum_padding; //0x1A +} QMAP_SETTING; + +#ifdef CONFIG_BRIDGE_LAN +typedef struct { + u8 id; + u8 brmac[ETH_ALEN]; +} BRMAC_SETTING; +#endif + +int qma_setting_store(struct device *dev, QMAP_SETTING *qmap_settings, size_t size) { + struct net_device *netdev = to_net_dev(dev); + struct usbnet * usbnetdev = netdev_priv( netdev ); + struct qmi_wwan_state *info = (void *)&usbnetdev->data; + sQmiWwanQmap *pQmapDev = (sQmiWwanQmap *)info->unused; + + if (qmap_settings->size != size) { + dev_err(dev, "ERROR: qmap_settings.size donot match!\n"); + return -EOPNOTSUPP; + } + +#ifdef QUECTEL_UL_DATA_AGG + netif_tx_lock_bh(netdev); + if (pQmapDev->tx_ctx.ul_data_aggregation_max_datagrams == 1 && qmap_settings->ul_data_aggregation_max_datagrams > 1) { + pQmapDev->tx_ctx.ul_data_aggregation_max_datagrams = qmap_settings->ul_data_aggregation_max_datagrams; + pQmapDev->tx_ctx.ul_data_aggregation_max_size = qmap_settings->ul_data_aggregation_max_size; + pQmapDev->tx_ctx.dl_minimum_padding = qmap_settings->dl_minimum_padding; + dev_info(dev, "ul_data_aggregation_max_datagrams=%d, ul_data_aggregation_max_size=%d, dl_minimum_padding=%d\n", + pQmapDev->tx_ctx.ul_data_aggregation_max_datagrams, + pQmapDev->tx_ctx.ul_data_aggregation_max_size, + pQmapDev->tx_ctx.dl_minimum_padding); + } + netif_tx_unlock_bh(netdev); + return 0; +#endif + + return -EOPNOTSUPP; +} + +static int qmap_ndo_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { + struct usbnet * usbnetdev = netdev_priv( dev ); + struct qmi_wwan_state *info = (void *)&usbnetdev->data; + sQmiWwanQmap *pQmapDev = (sQmiWwanQmap *)info->unused; + int rc = -EOPNOTSUPP; + uint link_state = 0; + QMAP_SETTING qmap_settings = {0}; +#ifdef CONFIG_BRIDGE_LAN + BRMAC_SETTING brmac_settings = {0}; +#endif + + switch (cmd) { + case 0x89F1: //SIOCDEVPRIVATE + rc = copy_from_user(&link_state, ifr->ifr_ifru.ifru_data, sizeof(link_state)); + if (!rc) { + char buf[32]; + snprintf(buf, sizeof(buf), "%u", link_state); + link_state_store(&dev->dev, NULL, buf, strlen(buf)); + } + break; + + case 0x89F2: //SIOCDEVPRIVATE + rc = copy_from_user(&qmap_settings, ifr->ifr_ifru.ifru_data, sizeof(qmap_settings)); + if (!rc) { + rc = qma_setting_store(&dev->dev, &qmap_settings, sizeof(qmap_settings)); + } + break; + + case 0x89F3: //SIOCDEVPRIVATE + if (pQmapDev->use_rmnet_usb) { + uint i; + + for (i = 0; i < pQmapDev->qmap_mode; i++) { + struct net_device *qmap_net = pQmapDev->mpQmapNetDev[i]; + + if (!qmap_net) + break; + + strcpy(pQmapDev->rmnet_info.ifname[i], qmap_net->name); + } + rc = copy_to_user(ifr->ifr_ifru.ifru_data, &pQmapDev->rmnet_info, sizeof(pQmapDev->rmnet_info)); + } + break; + +#ifdef CONFIG_BRIDGE_LAN + case 0x89F4: //SIOCDEVPRIVATE + rc = copy_from_user(&brmac_settings, ifr->ifr_ifru.ifru_data, sizeof(brmac_settings)); + if (pQmapDev->use_rmnet_usb && brmac_settings.id < qmap_mode) { + struct net_device *qmap_net = pQmapDev->mpQmapNetDev[brmac_settings.id]; + struct qmap_priv *priv = netdev_priv(qmap_net); + memcpy(priv->bridge_self_mac, brmac_settings.brmac, ETH_ALEN); + pr_info("ioctl 0x89F4 change qmapnet bridge(%d) lan mac -> %02x:%02x:%02x:%02x:%02x:%02x\n", brmac_settings.id, priv->bridge_self_mac[0], + priv->bridge_self_mac[1], priv->bridge_self_mac[2], priv->bridge_self_mac[3], priv->bridge_self_mac[4], priv->bridge_self_mac[5]); + } + else if (!pQmapDev->use_rmnet_usb && brmac_settings.id == 0) { + memcpy(pQmapDev->bridge_self_mac, brmac_settings.brmac, ETH_ALEN); + pr_info("ioctl 0x89F4 change usbnet bridge(%d) lan mac -> %02x:%02x:%02x:%02x:%02x:%02x\n", brmac_settings.id, pQmapDev->bridge_self_mac[0], + pQmapDev->bridge_self_mac[1], pQmapDev->bridge_self_mac[2], pQmapDev->bridge_self_mac[3], pQmapDev->bridge_self_mac[4], pQmapDev->bridge_self_mac[5]); + } + else { + pr_info("ioctl 0x89F4 change bridge(%d) lan mac -> error id\n", brmac_settings.id); + rc = -1; + } + break; +#endif + + default: + break; + } + + return rc; +} + +#ifdef QUECTEL_BRIDGE_MODE +static int is_qmap_netdev(const struct net_device *netdev) { + return netdev->netdev_ops == &rmnet_vnd_ops; +} +#endif +#endif + +static struct sk_buff *qmi_wwan_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) { + //MDM9x07,MDM9628,MDM9x40,SDX20,SDX24 only work on RAW IP mode + if ((dev->driver_info->flags & FLAG_NOARP) == 0) + return skb; + + // Skip Ethernet header from message + if (dev->net->hard_header_len == 0) + return skb; + else + skb_reset_mac_header(skb); + +#ifdef QUECTEL_BRIDGE_MODE +{ + struct qmi_wwan_state *info = (void *)&dev->data; + sQmiWwanQmap *pQmapDev = (sQmiWwanQmap *)info->unused; + + if (pQmapDev->bridge_mode && bridge_mode_tx_fixup(dev->net, skb, pQmapDev->bridge_ipv4, pQmapDev->bridge_mac) == NULL) { + dev_kfree_skb_any (skb); + return NULL; + } +} +#endif + + if (skb_pull(skb, ETH_HLEN)) { + return skb; + } else { + dev_err(&dev->intf->dev, "Packet Dropped "); + } + + // Filter the packet out, release it + dev_kfree_skb_any(skb); + return NULL; +} +#endif + +/* Make up an ethernet header if the packet doesn't have one. + * + * A firmware bug common among several devices cause them to send raw + * IP packets under some circumstances. There is no way for the + * driver/host to know when this will happen. And even when the bug + * hits, some packets will still arrive with an intact header. + * + * The supported devices are only capably of sending IPv4, IPv6 and + * ARP packets on a point-to-point link. Any packet with an ethernet + * header will have either our address or a broadcast/multicast + * address as destination. ARP packets will always have a header. + * + * This means that this function will reliably add the appropriate + * header iff necessary, provided our hardware address does not start + * with 4 or 6. + * + * Another common firmware bug results in all packets being addressed + * to 00:a0:c6:00:00:00 despite the host address being different. + * This function will also fixup such packets. + */ +static int qmi_wwan_rx_fixup(struct usbnet *dev, struct sk_buff *skb) +{ + __be16 proto; + + /* This check is no longer done by usbnet */ + if (skb->len < dev->net->hard_header_len) + return 0; + + switch (skb->data[0] & 0xf0) { + case 0x40: + proto = htons(ETH_P_IP); + break; + case 0x60: + proto = htons(ETH_P_IPV6); + break; + case 0x00: + if (is_multicast_ether_addr(skb->data)) + return 1; + /* possibly bogus destination - rewrite just in case */ + skb_reset_mac_header(skb); + goto fix_dest; + default: + /* pass along other packets without modifications */ + return 1; + } + if (skb_headroom(skb) < ETH_HLEN) + return 0; + skb_push(skb, ETH_HLEN); + skb_reset_mac_header(skb); + eth_hdr(skb)->h_proto = proto; + memset(eth_hdr(skb)->h_source, 0, ETH_ALEN); +#if 1 //Added by Quectel + //some kernel will drop ethernet packet which's souce mac is all zero + memcpy(eth_hdr(skb)->h_source, default_modem_addr, ETH_ALEN); +#endif + +fix_dest: +#ifdef QUECTEL_BRIDGE_MODE +{ + struct qmi_wwan_state *info = (void *)&dev->data; + sQmiWwanQmap *pQmapDev = (sQmiWwanQmap *)info->unused; + bridge_mode_rx_fixup(pQmapDev, dev->net, skb); +} +#else + memcpy(eth_hdr(skb)->h_dest, dev->net->dev_addr, ETH_ALEN); +#endif + + return 1; +} + +#if defined(QUECTEL_WWAN_QMAP) +static struct sk_buff *qmap_qmi_wwan_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) { + struct qmi_wwan_state *info = (void *)&dev->data; + sQmiWwanQmap *pQmapDev = (sQmiWwanQmap *)info->unused; + + if (unlikely(pQmapDev == NULL)) { + goto drop_skb; + } else if (unlikely(pQmapDev->qmap_mode && !pQmapDev->link_state)) { + dev_dbg(&dev->net->dev, "link_state 0x%x, drop skb, len = %u\n", pQmapDev->link_state, skb->len); + goto drop_skb; + } else if (pQmapDev->qmap_mode == 0) { + skb = qmi_wwan_tx_fixup(dev, skb, flags); + } + else if (pQmapDev->qmap_mode > 1) { + WARN_ON(1); //never reach here. + } + else { + if (likely(skb)) { + skb = qmi_wwan_tx_fixup(dev, skb, flags); + + if (skb) { + if(pQmapDev->qmap_version == 5) + add_qhdr(skb, QUECTEL_QMAP_MUX_ID); + else + add_qhdr_v5(skb, QUECTEL_QMAP_MUX_ID); + } + else { + return NULL; + } + } + } + + return skb; +drop_skb: + dev_kfree_skb_any (skb); + return NULL; +} + +static void qmap_packet_decode(sQmiWwanQmap *pQmapDev, + struct sk_buff *skb_in, struct sk_buff_head *skb_chain) +{ + struct device *dev = &pQmapDev->mpNetDev->net->dev; + struct sk_buff *qmap_skb; + uint dl_minimum_padding = 0; + + if (pQmapDev->qmap_version == 9) + dl_minimum_padding = pQmapDev->tx_ctx.dl_minimum_padding; + + /* __skb_queue_head_init() do not call spin_lock_init(&list->lock), + so should not call skb_queue_tail/queue later. */ + __skb_queue_head_init(skb_chain); + + while (skb_in->len > sizeof(struct qmap_hdr)) { + struct rmnet_map_header *map_header = (struct rmnet_map_header *)skb_in->data; + struct rmnet_map_v5_csum_header *ul_header = NULL; + size_t hdr_size = sizeof(struct rmnet_map_header); + struct net_device *qmap_net; + int pkt_len = ntohs(map_header->pkt_len); + int skb_len; + __be16 protocol; + int mux_id; + int skip_nss = 0; + + if (map_header->next_hdr) { + ul_header = (struct rmnet_map_v5_csum_header *)(map_header + 1); + hdr_size += sizeof(struct rmnet_map_v5_csum_header); + } + + skb_len = pkt_len - (map_header->pad_len&0x3F); + skb_len -= dl_minimum_padding; + + mux_id = map_header->mux_id - QUECTEL_QMAP_MUX_ID; + if (mux_id >= pQmapDev->qmap_mode) { + dev_info(dev, "drop qmap unknow mux_id %x\n", map_header->mux_id); + goto error_pkt; + } + + qmap_net = pQmapDev->mpQmapNetDev[mux_id]; + if (qmap_net == NULL) { + dev_info(dev, "drop qmap unknow mux_id %x\n", map_header->mux_id); + goto skip_pkt; + } + + if (skb_len > qmap_net->mtu) { + dev_info(dev, "drop skb_len=%x larger than mtu=%d\n", skb_len, qmap_net->mtu); + goto error_pkt; + } + + if (skb_in->len < (pkt_len + hdr_size)) { + dev_info(dev, "drop qmap unknow pkt, len=%d, pkt_len=%d\n", skb_in->len, pkt_len); + goto error_pkt; + } + + if (map_header->cd_bit) { + dev_info(dev, "skip qmap command packet\n"); + goto skip_pkt; + } + + switch (skb_in->data[hdr_size] & 0xf0) { + case 0x40: +#ifdef CONFIG_QCA_NSS_PACKET_FILTER + { + struct iphdr *ip4h = (struct iphdr *)(&skb_in->data[hdr_size]); + if (ip4h->protocol == IPPROTO_ICMP) { + skip_nss = 1; + } + } +#endif + protocol = htons(ETH_P_IP); + break; + case 0x60: +#ifdef CONFIG_QCA_NSS_PACKET_FILTER + { + struct ipv6hdr *ip6h = (struct ipv6hdr *)(&skb_in->data[hdr_size]); + if (ip6h->nexthdr == NEXTHDR_ICMP) { + skip_nss = 1; + } + } +#endif + protocol = htons(ETH_P_IPV6); + break; + default: + dev_info(dev, "unknow skb->protocol %02x\n", skb_in->data[hdr_size]); + goto error_pkt; + } + + qmap_skb = netdev_alloc_skb(qmap_net, skb_len); + if (qmap_skb) { + skb_put(qmap_skb, skb_len); + memcpy(qmap_skb->data, skb_in->data + hdr_size, skb_len); + } + + if (qmap_skb == NULL) { + dev_info(dev, "fail to alloc skb, pkt_len = %d\n", skb_len); + goto error_pkt; + } + + skb_reset_transport_header(qmap_skb); + skb_reset_network_header(qmap_skb); + qmap_skb->pkt_type = PACKET_HOST; + skb_set_mac_header(qmap_skb, 0); + qmap_skb->protocol = protocol; + + if(skip_nss) + qmap_skb->cb[0] = 1; + + if (ul_header && ul_header->header_type == RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD + && ul_header->csum_valid_required) { +#if 0 //TODO + qmap_skb->ip_summed = CHECKSUM_UNNECESSARY; +#endif + } + + if (qmap_skb->dev->type == ARPHRD_ETHER) { + skb_push(qmap_skb, ETH_HLEN); + skb_reset_mac_header(qmap_skb); + memcpy(eth_hdr(qmap_skb)->h_source, default_modem_addr, ETH_ALEN); + memcpy(eth_hdr(qmap_skb)->h_dest, qmap_net->dev_addr, ETH_ALEN); + eth_hdr(qmap_skb)->h_proto = protocol; +#ifdef QUECTEL_BRIDGE_MODE + bridge_mode_rx_fixup(pQmapDev, qmap_net, qmap_skb); +#endif + } + + __skb_queue_tail(skb_chain, qmap_skb); + +skip_pkt: + skb_pull(skb_in, pkt_len + hdr_size); + } + +error_pkt: + return; +} + +static int qmap_qmi_wwan_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in) +{ + struct qmi_wwan_state *info = (void *)&dev->data; + sQmiWwanQmap *pQmapDev = (sQmiWwanQmap *)info->unused; + struct sk_buff *qmap_skb; + struct sk_buff_head skb_chain; + + if (pQmapDev->qmap_mode == 0) + return qmi_wwan_rx_fixup(dev, skb_in); + + qmap_packet_decode(pQmapDev, skb_in, &skb_chain); + + while ((qmap_skb = __skb_dequeue (&skb_chain))) { + if (qmap_skb->dev != dev->net) { + WARN_ON(1); //never reach here. + } + else { + qmap_skb->protocol = 0; + usbnet_skb_return(dev, qmap_skb); + } + } + + return 0; +} +#endif + +/* very simplistic detection of IPv4 or IPv6 headers */ +static bool possibly_iphdr(const char *data) +{ + return (data[0] & 0xd0) == 0x40; +} + +/* disallow addresses which may be confused with IP headers */ +static int qmi_wwan_mac_addr(struct net_device *dev, void *p) +{ + int ret; + struct sockaddr *addr = p; + + ret = eth_prepare_mac_addr_change(dev, p); + if (ret < 0) + return ret; + if (possibly_iphdr(addr->sa_data)) + return -EADDRNOTAVAIL; + eth_commit_mac_addr_change(dev, p); + return 0; +} + +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 4,10,0 )) //bc1f44709cf27fb2a5766cadafe7e2ad5e9cb221 +static void (*_usbnet_get_stats64)(struct net_device *net, struct rtnl_link_stats64 *stats); + +static void qmi_wwan_get_stats64(struct net_device *net, struct rtnl_link_stats64 *stats) { + if (_usbnet_get_stats64) ////c8b5d129ee293bcf972e7279ac996bb8a138505c + return _usbnet_get_stats64(net, stats); + + netdev_stats_to_stats64(stats, &net->stats); +} +#else +static struct rtnl_link_stats64 * (*_usbnet_get_stats64)(struct net_device *net, struct rtnl_link_stats64 *stats); + +static struct rtnl_link_stats64 * qmi_wwan_get_stats64(struct net_device *net, struct rtnl_link_stats64 *stats) { + if (_usbnet_get_stats64) + return _usbnet_get_stats64(net, stats); + + netdev_stats_to_stats64(stats, &net->stats); + return stats; +} +#endif + +static int qmi_wwan_open (struct net_device *net) { + struct usbnet * usbnetdev = netdev_priv( net ); + struct qmi_wwan_state *info = (void *)&usbnetdev->data; + sQmiWwanQmap *pQmapDev = (sQmiWwanQmap *)info->unused; + int retval; + + retval = usbnet_open(net); + + if (!retval) { + if (pQmapDev && pQmapDev->qmap_mode == 1) { + if (pQmapDev->link_state) + netif_carrier_on(net); + } + } + + return retval; +} + +static netdev_tx_t qmi_wwan_start_xmit (struct sk_buff *skb, + struct net_device *net) +{ + struct usbnet * usbnetdev = netdev_priv( net ); + struct qmi_wwan_state *info = (void *)&usbnetdev->data; + sQmiWwanQmap *pQmapDev = (sQmiWwanQmap *)info->unused; + int retval; + + retval = usbnet_start_xmit(skb, net); + + if (netif_queue_stopped(net) && pQmapDev && pQmapDev->use_rmnet_usb) { + int i; + + for (i = 0; i < pQmapDev->qmap_mode; i++) { + struct net_device *qmap_net = pQmapDev->mpQmapNetDev[i]; + if (qmap_net) { + netif_stop_queue(qmap_net); + } + } + } + + return retval; +} + +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 5,14,0 )) //b9067f5dc4a07c8e24e01a1b277c6722d91be39e +#define use_ndo_siocdevprivate +#endif +#ifdef use_ndo_siocdevprivate +static int qmap_ndo_siocdevprivate(struct net_device *dev, struct ifreq *ifr, void __user *data, int cmd) { + return qmap_ndo_do_ioctl(dev, ifr, cmd); +} +#endif + +static const struct net_device_ops qmi_wwan_netdev_ops = { + .ndo_open = qmi_wwan_open, + .ndo_stop = usbnet_stop, + .ndo_start_xmit = qmi_wwan_start_xmit, + .ndo_tx_timeout = usbnet_tx_timeout, + .ndo_change_mtu = usbnet_change_mtu, + .ndo_get_stats64 = qmi_wwan_get_stats64, + .ndo_set_mac_address = qmi_wwan_mac_addr, + .ndo_validate_addr = eth_validate_addr, +#if defined(QUECTEL_WWAN_QMAP)// && defined(CONFIG_ANDROID) + .ndo_do_ioctl = qmap_ndo_do_ioctl, +#ifdef use_ndo_siocdevprivate + .ndo_siocdevprivate = qmap_ndo_siocdevprivate, +#endif +#endif +}; + +static void ql_net_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info) +{ + /* Inherit standard device info */ + usbnet_get_drvinfo(net, info); + strlcpy(info->driver, driver_name, sizeof(info->driver)); + strlcpy(info->version, VERSION_NUMBER, sizeof(info->version)); +} + +static struct ethtool_ops ql_net_ethtool_ops; + +/* using a counter to merge subdriver requests with our own into a + * combined state + */ +static int qmi_wwan_manage_power(struct usbnet *dev, int on) +{ + struct qmi_wwan_state *info = (void *)&dev->data; + int rv; + + dev_dbg(&dev->intf->dev, "%s() pmcount=%d, on=%d\n", __func__, + atomic_read(&info->pmcount), on); + + if ((on && atomic_add_return(1, &info->pmcount) == 1) || + (!on && atomic_dec_and_test(&info->pmcount))) { + /* need autopm_get/put here to ensure the usbcore sees + * the new value + */ + rv = usb_autopm_get_interface(dev->intf); + dev->intf->needs_remote_wakeup = on; + if (!rv) + usb_autopm_put_interface(dev->intf); + } + return 0; +} + +static int qmi_wwan_cdc_wdm_manage_power(struct usb_interface *intf, int on) +{ + struct usbnet *dev = usb_get_intfdata(intf); + + /* can be called while disconnecting */ + if (!dev) + return 0; + return qmi_wwan_manage_power(dev, on); +} + +/* collect all three endpoints and register subdriver */ +static int qmi_wwan_register_subdriver(struct usbnet *dev) +{ + int rv; + struct usb_driver *subdriver = NULL; + struct qmi_wwan_state *info = (void *)&dev->data; + + /* collect bulk endpoints */ + rv = usbnet_get_endpoints(dev, info->data); + if (rv < 0) + goto err; + + /* update status endpoint if separate control interface */ + if (info->control != info->data) + dev->status = &info->control->cur_altsetting->endpoint[0]; + + /* require interrupt endpoint for subdriver */ + if (!dev->status) { + rv = -EINVAL; + goto err; + } + + /* for subdriver power management */ + atomic_set(&info->pmcount, 0); + + /* register subdriver */ +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 5,12,0 )) //cac6fb015f719104e60b1c68c15ca5b734f57b9c + subdriver = usb_cdc_wdm_register(info->control, &dev->status->desc, + 4096, WWAN_PORT_QMI, &qmi_wwan_cdc_wdm_manage_power); +#else + subdriver = usb_cdc_wdm_register(info->control, &dev->status->desc, + 4096, &qmi_wwan_cdc_wdm_manage_power); + +#endif + if (IS_ERR(subdriver)) { + dev_err(&info->control->dev, "subdriver registration failed\n"); + rv = PTR_ERR(subdriver); + goto err; + } + + /* prevent usbnet from using status endpoint */ + dev->status = NULL; + + /* save subdriver struct for suspend/resume wrappers */ + info->subdriver = subdriver; + +err: + return rv; +} + +static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf) +{ + int status = -1; + struct usb_driver *driver = driver_of(intf); + struct qmi_wwan_state *info = (void *)&dev->data; + + BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data) < + sizeof(struct qmi_wwan_state))); + + /* set up initial state */ + info->control = intf; + info->data = intf; + + status = qmi_wwan_register_subdriver(dev); + if (status < 0 && info->control != info->data) { + usb_set_intfdata(info->data, NULL); + usb_driver_release_interface(driver, info->data); + } + + /* Never use the same address on both ends of the link, even + * if the buggy firmware told us to. + */ + if (ether_addr_equal(dev->net->dev_addr, default_modem_addr)) + eth_hw_addr_random(dev->net); + + /* make MAC addr easily distinguishable from an IP header */ + if (possibly_iphdr(dev->net->dev_addr)) { + dev->net->dev_addr[0] |= 0x02; /* set local assignment bit */ + dev->net->dev_addr[0] &= 0xbf; /* clear "IP" bit */ + } + if (!_usbnet_get_stats64) + _usbnet_get_stats64 = dev->net->netdev_ops->ndo_get_stats64; + dev->net->netdev_ops = &qmi_wwan_netdev_ops; + + ql_net_ethtool_ops = *dev->net->ethtool_ops; + ql_net_ethtool_ops.get_drvinfo = ql_net_get_drvinfo; + dev->net->ethtool_ops = &ql_net_ethtool_ops; + +#if 1 //Added by Quectel + if (dev->driver_info->flags & FLAG_NOARP) { + int ret; + char buf[32] = "Module"; + + ret = usb_string(dev->udev, dev->udev->descriptor.iProduct, buf, sizeof(buf)); + if (ret > 0) { + buf[ret] = '\0'; + } + + dev_info(&intf->dev, "Quectel %s work on RawIP mode\n", buf); + dev->net->flags |= IFF_NOARP; + dev->net->flags &= ~(IFF_BROADCAST | IFF_MULTICAST); + + usb_control_msg( + interface_to_usbdev(intf), + usb_sndctrlpipe(interface_to_usbdev(intf), 0), + 0x22, //USB_CDC_REQ_SET_CONTROL_LINE_STATE + 0x21, //USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE + 1, //active CDC DTR + intf->cur_altsetting->desc.bInterfaceNumber, + NULL, 0, 100); + } + + //to advoid module report mtu 1460, but rx 1500 bytes IP packets, and cause the customer's system crash + //next setting can make usbnet.c:usbnet_change_mtu() do not modify rx_urb_size according to hard mtu + dev->rx_urb_size = ETH_DATA_LEN + ETH_HLEN + 6; + +#if defined(QUECTEL_WWAN_QMAP) + if (qmap_mode > QUECTEL_WWAN_QMAP) + qmap_mode = QUECTEL_WWAN_QMAP; + + if (!status) + { + sQmiWwanQmap *pQmapDev = (sQmiWwanQmap *)kzalloc(sizeof(sQmiWwanQmap), GFP_KERNEL); + + if (pQmapDev == NULL) + return -ENODEV; + +#ifdef QUECTEL_BRIDGE_MODE + pQmapDev->bridge_mode = bridge_mode; +#ifdef CONFIG_BRIDGE_LAN + memcpy(pQmapDev->bridge_self_mac, broadcast_mac_addr, ETH_ALEN); +#endif +#endif + pQmapDev->mpNetDev = dev; + pQmapDev->link_state = 1; + //on OpenWrt, if set rmnet_usb0.1 as WAN, '/sbin/netifd' will auto create VLAN for rmnet_usb0 + dev->net->features |= (NETIF_F_VLAN_CHALLENGED); + + if (dev->driver_info->flags & FLAG_NOARP) + { + int qmap_version = (dev->driver_info->data>>8)&0xFF; + int qmap_size = (dev->driver_info->data)&0xFF; + int idProduct = le16_to_cpu(dev->udev->descriptor.idProduct); + int lte_a = (idProduct == 0x0306 || idProduct == 0x030B || idProduct == 0x0512 || idProduct == 0x0620 || idProduct == 0x0800 || idProduct == 0x0801); + + if (qmap_size > 4096 || dev->udev->speed >= USB_SPEED_SUPER) { //if meet this requirements, must be LTE-A or 5G + lte_a = 1; + } + + pQmapDev->qmap_mode = qmap_mode; + if (lte_a && pQmapDev->qmap_mode == 0) { + pQmapDev->qmap_mode = 1; //force use QMAP + if(qmap_mode == 0) + qmap_mode = 1; //old quectel-CM only check sys/module/wwan0/parameters/qmap_mode + } + + if (pQmapDev->qmap_mode) { + pQmapDev->qmap_version = qmap_version; + pQmapDev->qmap_size = qmap_size*1024; + dev->rx_urb_size = pQmapDev->qmap_size; + //for these modules, if send packet before qmi_start_network, or cause host PC crash, or cause modules crash + pQmapDev->link_state = !lte_a; + + if (pQmapDev->qmap_mode > 1) + pQmapDev->use_rmnet_usb = 1; + else if (idProduct == 0x0800 || idProduct == 0x0801) + pQmapDev->use_rmnet_usb = 1; //benefit for ul data agg +#ifdef QMI_NETDEV_ONE_CARD_MODE + if(pQmapDev->use_rmnet_usb == 1 && pQmapDev->qmap_mode == 1) + one_card_mode = 1; + pQmapDev->rmnet_info.mux_id[0] = QUECTEL_QMAP_MUX_ID; +#endif + pQmapDev->rmnet_info.size = sizeof(RMNET_INFO); + pQmapDev->rmnet_info.rx_urb_size = pQmapDev->qmap_size; + pQmapDev->rmnet_info.ep_type = 2; //DATA_EP_TYPE_HSUSB + pQmapDev->rmnet_info.iface_id = 4; + pQmapDev->rmnet_info.qmap_mode = pQmapDev->qmap_mode; + pQmapDev->rmnet_info.qmap_version = pQmapDev->qmap_version; + pQmapDev->rmnet_info.dl_minimum_padding = 0; + +#if defined(QUECTEL_UL_DATA_AGG) + pQmapDev->tx_ctx.ul_data_aggregation_max_datagrams = 1; + pQmapDev->tx_ctx.ul_data_aggregation_max_size = 1500; +#endif + + if (pQmapDev->use_rmnet_usb && !one_card_mode) { + pQmapDev->driver_info = rmnet_usb_info; + pQmapDev->driver_info.data = dev->driver_info->data; + dev->driver_info = &pQmapDev->driver_info; + } + + if (pQmapDev->use_rmnet_usb && !one_card_mode) { + pQmapDev->usbnet_bh = dev->bh; + tasklet_init(&dev->bh, usbnet_bh, (unsigned long)pQmapDev); + } + } + } + + info->unused = (unsigned long)pQmapDev; + dev->net->sysfs_groups[0] = &qmi_wwan_sysfs_attr_group; + + dev_info(&intf->dev, "rx_urb_size = %zd\n", dev->rx_urb_size); + } +#endif +#endif + + return status; +} + +static void qmi_wwan_unbind(struct usbnet *dev, struct usb_interface *intf) +{ + struct qmi_wwan_state *info = (void *)&dev->data; + struct usb_driver *driver = driver_of(intf); + struct usb_interface *other; + + if (dev->udev && dev->udev->state == USB_STATE_CONFIGURED) { + usb_control_msg( + interface_to_usbdev(intf), + usb_sndctrlpipe(interface_to_usbdev(intf), 0), + 0x22, //USB_CDC_REQ_SET_CONTROL_LINE_STATE + 0x21, //USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE + 0, //deactive CDC DTR + intf->cur_altsetting->desc.bInterfaceNumber, + NULL, 0, 100); + } + + if (info->subdriver && info->subdriver->disconnect) + info->subdriver->disconnect(info->control); + + /* allow user to unbind using either control or data */ + if (intf == info->control) + other = info->data; + else + other = info->control; + + /* only if not shared */ + if (other && intf != other) { + usb_set_intfdata(other, NULL); + usb_driver_release_interface(driver, other); + } + + info->subdriver = NULL; + info->data = NULL; + info->control = NULL; +} + +/* suspend/resume wrappers calling both usbnet and the cdc-wdm + * subdriver if present. + * + * NOTE: cdc-wdm also supports pre/post_reset, but we cannot provide + * wrappers for those without adding usbnet reset support first. + */ +static int qmi_wwan_suspend(struct usb_interface *intf, pm_message_t message) +{ + struct usbnet *dev = usb_get_intfdata(intf); + struct qmi_wwan_state *info = (void *)&dev->data; + int ret; + + /* Both usbnet_suspend() and subdriver->suspend() MUST return 0 + * in system sleep context, otherwise, the resume callback has + * to recover device from previous suspend failure. + */ + ret = usbnet_suspend(intf, message); + if (ret < 0) + goto err; + + if (intf == info->control && info->subdriver && + info->subdriver->suspend) + ret = info->subdriver->suspend(intf, message); + if (ret < 0) + usbnet_resume(intf); +err: + return ret; +} + +static int qmi_wwan_resume(struct usb_interface *intf) +{ + struct usbnet *dev = usb_get_intfdata(intf); + struct qmi_wwan_state *info = (void *)&dev->data; + int ret = 0; + bool callsub = (intf == info->control && info->subdriver && + info->subdriver->resume); + + if (callsub) + ret = info->subdriver->resume(intf); + if (ret < 0) + goto err; + ret = usbnet_resume(intf); + if (ret < 0 && callsub) + info->subdriver->suspend(intf, PMSG_SUSPEND); + +#if defined(QUECTEL_WWAN_QMAP) + if (!netif_queue_stopped(dev->net)) { + qmap_wake_queue((sQmiWwanQmap *)info->unused); + } +#endif + +err: + return ret; +} + +static int qmi_wwan_reset_resume(struct usb_interface *intf) +{ + dev_info(&intf->dev, "device do not support reset_resume\n"); + intf->needs_binding = 1; + return -EOPNOTSUPP; +} + +static struct sk_buff *rmnet_usb_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) +{ + //printk("%s skb=%p, len=%d, protocol=%x, hdr_len=%d\n", __func__, skb, skb->len, skb->protocol, skb->hdr_len); + if (skb->protocol != htons(ETH_P_MAP)) { + dev_kfree_skb_any(skb); + return NULL; + } + + return skb; +} + +static int rmnet_usb_rx_fixup(struct usbnet *dev, struct sk_buff *skb) +{ + struct net_device *net = dev->net; + unsigned headroom = skb_headroom(skb); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION( 3,3,1 )) //7bdd402706cf26bfef9050dfee3f229b7f33ee4f +//some customers port to v3.2 + if (net->type == ARPHRD_ETHER && headroom < ETH_HLEN) { + unsigned tailroom = skb_tailroom(skb); + + if ((tailroom + headroom) >= ETH_HLEN) { + unsigned moveroom = ETH_HLEN - headroom; + + memmove(skb->data + moveroom ,skb->data, skb->len); + skb->data += moveroom; + skb->tail += moveroom; + #ifdef WARN_ONCE + WARN_ONCE(1, "It is better reserve headroom in usbnet.c:rx_submit()!\n"); + #endif + } + } +#endif + + //printk("%s skb=%p, len=%d, protocol=%x, hdr_len=%d\n", __func__, skb, skb->len, skb->protocol, skb->hdr_len); + if (net->type == ARPHRD_ETHER && headroom >= ETH_HLEN) { + //usbnet.c rx_process() usbnet_skb_return() eth_type_trans() + skb_push(skb, ETH_HLEN); + skb_reset_mac_header(skb); + memcpy(eth_hdr(skb)->h_source, default_modem_addr, ETH_ALEN); + memcpy(eth_hdr(skb)->h_dest, net->dev_addr, ETH_ALEN); + eth_hdr(skb)->h_proto = htons(ETH_P_MAP); + + return 1; + } + + return 0; +} + +static rx_handler_result_t rmnet_usb_rx_handler(struct sk_buff **pskb) +{ + struct sk_buff *skb = *pskb; + struct usbnet *dev; + struct qmi_wwan_state *info; + sQmiWwanQmap *pQmapDev; + struct sk_buff *qmap_skb; + struct sk_buff_head skb_chain; + + if (!skb) + goto done; + + //printk("%s skb=%p, protocol=%x, len=%d\n", __func__, skb, skb->protocol, skb->len); + + if (skb->pkt_type == PACKET_LOOPBACK) + return RX_HANDLER_PASS; + + if (skb->protocol != htons(ETH_P_MAP)) { + WARN_ON(1); + return RX_HANDLER_PASS; + } + /* when open hyfi function, run cm will make system crash */ + //dev = rcu_dereference(skb->dev->rx_handler_data); + dev = netdev_priv(skb->dev); + + if (dev == NULL) { + WARN_ON(1); + return RX_HANDLER_PASS; + } + + info = (struct qmi_wwan_state *)&dev->data; + pQmapDev = (sQmiWwanQmap *)info->unused; + + qmap_packet_decode(pQmapDev, skb, &skb_chain); + while ((qmap_skb = __skb_dequeue (&skb_chain))) { + struct net_device *qmap_net = qmap_skb->dev; + + rmnet_vnd_update_rx_stats(qmap_net, 1, qmap_skb->len); + if (qmap_net->type == ARPHRD_ETHER) + __skb_pull(qmap_skb, ETH_HLEN); + netif_receive_skb(qmap_skb); + } + consume_skb(skb); + +done: + return RX_HANDLER_CONSUMED; +} + +static const struct driver_info qmi_wwan_info = { + .description = "WWAN/QMI device", + .flags = FLAG_WWAN, + .bind = qmi_wwan_bind, + .unbind = qmi_wwan_unbind, + .manage_power = qmi_wwan_manage_power, +}; + +#define qmi_wwan_raw_ip_info \ + .description = "WWAN/QMI device", \ + .flags = FLAG_WWAN | FLAG_RX_ASSEMBLE | FLAG_NOARP | FLAG_SEND_ZLP, \ + .bind = qmi_wwan_bind, \ + .unbind = qmi_wwan_unbind, \ + .manage_power = qmi_wwan_manage_power, \ + .tx_fixup = qmap_qmi_wwan_tx_fixup, \ + .rx_fixup = qmap_qmi_wwan_rx_fixup, \ + +static const struct driver_info rmnet_usb_info = { + .description = "RMNET/USB device", + .flags = FLAG_WWAN | FLAG_NOARP | FLAG_SEND_ZLP, + .bind = qmi_wwan_bind, + .unbind = qmi_wwan_unbind, + .manage_power = qmi_wwan_manage_power, + .tx_fixup = rmnet_usb_tx_fixup, + .rx_fixup = rmnet_usb_rx_fixup, +}; + +static const struct driver_info qmi_wwan_raw_ip_info_mdm9x07 = { + qmi_wwan_raw_ip_info + .data = (5<<8)|4, //QMAPV1 and 4KB +}; + +// mdm9x40/sdx12/sdx20/sdx24 share the same config +static const struct driver_info qmi_wwan_raw_ip_info_mdm9x40 = { + qmi_wwan_raw_ip_info + .data = (5<<8)|16, //QMAPV1 and 16KB +}; + +static const struct driver_info qmi_wwan_raw_ip_info_sdx55 = { + qmi_wwan_raw_ip_info + .data = (9<<8)|31, //QMAPV5 and 31KB +}; + +/* map QMI/wwan function by a fixed interface number */ +#define QMI_FIXED_INTF(vend, prod, num) \ + USB_DEVICE_INTERFACE_NUMBER(vend, prod, num), \ + .driver_info = (unsigned long)&qmi_wwan_info + +#define QMI_FIXED_RAWIP_INTF(vend, prod, num, chip) \ + USB_DEVICE_INTERFACE_NUMBER(vend, prod, num), \ + .driver_info = (unsigned long)&qmi_wwan_raw_ip_info_##chip + +static const struct usb_device_id products[] = { + { QMI_FIXED_INTF(0x05C6, 0x9003, 4) }, /* Quectel UC20 */ + { QMI_FIXED_INTF(0x05C6, 0x9215, 4) }, /* Quectel EC20 (MDM9215) */ + { QMI_FIXED_RAWIP_INTF(0x2C7C, 0x0125, 4, mdm9x07) }, /* Quectel EC20 (MDM9X07)/EC25/EG25 */ + { QMI_FIXED_RAWIP_INTF(0x2C7C, 0x0121, 4, mdm9x07) }, /* Quectel EC21 */ + { QMI_FIXED_RAWIP_INTF(0x2C7C, 0x0191, 4, mdm9x07) }, /* Quectel EG91 */ + { QMI_FIXED_RAWIP_INTF(0x2C7C, 0x0195, 4, mdm9x07) }, /* Quectel EG95 */ + { QMI_FIXED_RAWIP_INTF(0x2C7C, 0x0700, 3, mdm9x07) }, /* Quectel BG95 (at+qcfgext="usbnet","rmnet") */ + { QMI_FIXED_RAWIP_INTF(0x2C7C, 0x0306, 4, mdm9x40) }, /* Quectel EG06/EP06/EM06 */ + { QMI_FIXED_RAWIP_INTF(0x2C7C, 0x030B, 4, mdm9x40) }, /* Quectel EG065k/EG060K */ + { QMI_FIXED_RAWIP_INTF(0x2C7C, 0x0512, 4, mdm9x40) }, /* Quectel EG12/EP12/EM12/EG16/EG18 */ + { QMI_FIXED_RAWIP_INTF(0x2C7C, 0x0296, 4, mdm9x07) }, /* Quectel BG96 */ + { QMI_FIXED_RAWIP_INTF(0x2C7C, 0x0435, 4, mdm9x07) }, /* Quectel AG35 */ + { QMI_FIXED_RAWIP_INTF(0x2C7C, 0x0620, 4, mdm9x40) }, /* Quectel EG20 */ + { QMI_FIXED_RAWIP_INTF(0x2C7C, 0x0800, 4, sdx55) }, /* Quectel RG500 */ + { QMI_FIXED_RAWIP_INTF(0x2C7C, 0x0801, 4, sdx55) }, /* Quectel RG520 */ + { } /* END */ +}; +MODULE_DEVICE_TABLE(usb, products); + +static int qmi_wwan_probe(struct usb_interface *intf, + const struct usb_device_id *prod) +{ + struct usb_device_id *id = (struct usb_device_id *)prod; + + /* Workaround to enable dynamic IDs. This disables usbnet + * blacklisting functionality. Which, if required, can be + * reimplemented here by using a magic "blacklist" value + * instead of 0 in the static device id table + */ + if (!id->driver_info) { + dev_dbg(&intf->dev, "setting defaults for dynamic device id\n"); + id->driver_info = (unsigned long)&qmi_wwan_info; + } + + if (intf->cur_altsetting->desc.bInterfaceClass != 0xff) { + dev_info(&intf->dev, "Quectel module not qmi_wwan mode! please check 'at+qcfg=\"usbnet\"'\n"); + return -ENODEV; + } + + return usbnet_probe(intf, id); +} + +#if defined(QUECTEL_WWAN_QMAP) +static int qmap_qmi_wwan_probe(struct usb_interface *intf, + const struct usb_device_id *prod) +{ + int status = qmi_wwan_probe(intf, prod); + + if (!status) { + struct usbnet *dev = usb_get_intfdata(intf); + struct qmi_wwan_state *info = (void *)&dev->data; + sQmiWwanQmap *pQmapDev = (sQmiWwanQmap *)info->unused; + unsigned i; + + if (!pQmapDev) + return status; + + tasklet_init(&pQmapDev->txq, rmnet_usb_tx_wake_queue, (unsigned long)pQmapDev); + + if (pQmapDev->qmap_mode == 1) { + pQmapDev->mpQmapNetDev[0] = dev->net; + if (pQmapDev->use_rmnet_usb && !one_card_mode) { + pQmapDev->mpQmapNetDev[0] = NULL; + qmap_register_device(pQmapDev, 0); + } + } + else if (pQmapDev->qmap_mode > 1) { + for (i = 0; i < pQmapDev->qmap_mode; i++) { + qmap_register_device(pQmapDev, i); + } + } + + if (pQmapDev->use_rmnet_usb && !one_card_mode) { + rtnl_lock(); + /* when open hyfi function, run cm will make system crash */ + //netdev_rx_handler_register(dev->net, rmnet_usb_rx_handler, dev); + netdev_rx_handler_register(dev->net, rmnet_usb_rx_handler, NULL); + rtnl_unlock(); + } + + if (pQmapDev->link_state == 0) { + netif_carrier_off(dev->net); + } + } + + return status; +} + +static void qmap_qmi_wwan_disconnect(struct usb_interface *intf) +{ + struct usbnet *dev = usb_get_intfdata(intf); + struct qmi_wwan_state *info; + sQmiWwanQmap *pQmapDev; + uint i; + + if (!dev) + return; + + info = (void *)&dev->data; + pQmapDev = (sQmiWwanQmap *)info->unused; + + if (!pQmapDev) { + return usbnet_disconnect(intf); + } + + pQmapDev->link_state = 0; + + if (pQmapDev->qmap_mode > 1) { + for (i = 0; i < pQmapDev->qmap_mode; i++) { + qmap_unregister_device(pQmapDev, i); + } + } + + if (pQmapDev->use_rmnet_usb && !one_card_mode) { + qmap_unregister_device(pQmapDev, 0); + rtnl_lock(); + netdev_rx_handler_unregister(dev->net); + rtnl_unlock(); + } + + tasklet_kill(&pQmapDev->txq); + + usbnet_disconnect(intf); + /* struct usbnet *dev had free by usbnet_disconnect()->free_netdev(). + so we should access info. */ + //info->unused = 0; + kfree(pQmapDev); +} +#endif + +static struct usb_driver qmi_wwan_driver = { + .name = "qmi_wwan_q", + .id_table = products, + .probe = qmi_wwan_probe, +#if defined(QUECTEL_WWAN_QMAP) + .probe = qmap_qmi_wwan_probe, + .disconnect = qmap_qmi_wwan_disconnect, +#else + .probe = qmi_wwan_probe, + .disconnect = usbnet_disconnect, +#endif + .suspend = qmi_wwan_suspend, + .resume = qmi_wwan_resume, + .reset_resume = qmi_wwan_reset_resume, + .supports_autosuspend = 1, + .disable_hub_initiated_lpm = 1, +}; + +static int __init qmi_wwan_driver_init(void) +{ +#ifdef CONFIG_QCA_NSS_DRV + nss_cb = rcu_dereference(rmnet_nss_callbacks); + if (!nss_cb) { + printk(KERN_ERR "qmi_wwan_driver_init: driver load must after '/etc/modules.d/42-rmnet-nss'\n"); + } +#endif + return usb_register(&qmi_wwan_driver); +} +module_init(qmi_wwan_driver_init); +static void __exit qmi_wwan_driver_exit(void) +{ + usb_deregister(&qmi_wwan_driver); +} +module_exit(qmi_wwan_driver_exit); + +MODULE_AUTHOR("Bjørn Mork "); +MODULE_DESCRIPTION("Qualcomm MSM Interface (QMI) WWAN driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(QUECTEL_WWAN_VERSION); diff --git a/package/wwan/driver/quectel_QMI_WWAN/src/rmnet_nss.c b/package/wwan/driver/quectel_QMI_WWAN/src/rmnet_nss.c new file mode 100644 index 000000000..e6e841468 --- /dev/null +++ b/package/wwan/driver/quectel_QMI_WWAN/src/rmnet_nss.c @@ -0,0 +1,424 @@ +/* Copyright (c) 2019, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define RMNET_NSS_HASH_BITS 8 +#define hash_add_ptr(table, node, key) \ + hlist_add_head(node, &table[hash_ptr(key, HASH_BITS(table))]) + +static DEFINE_HASHTABLE(rmnet_nss_ctx_hashtable, RMNET_NSS_HASH_BITS); + +struct rmnet_nss_ctx { + struct hlist_node hnode; + struct net_device *rmnet_dev; + struct nss_rmnet_rx_handle *nss_ctx; +}; + +enum __rmnet_nss_stat { + RMNET_NSS_RX_ETH, + RMNET_NSS_RX_FAIL, + RMNET_NSS_RX_NON_ETH, + RMNET_NSS_RX_BUSY, + RMNET_NSS_TX_NO_CTX, + RMNET_NSS_TX_SUCCESS, + RMNET_NSS_TX_FAIL, + RMNET_NSS_TX_NONLINEAR, + RMNET_NSS_TX_BAD_IP, + RMNET_NSS_EXCEPTIONS, + RMNET_NSS_EX_BAD_HDR, + RMNET_NSS_EX_BAD_IP, + RMNET_NSS_EX_SUCCESS, + RMNET_NSS_TX_BAD_FRAGS, + RMNET_NSS_TX_LINEARIZE_FAILS, + RMNET_NSS_TX_NON_ZERO_HEADLEN_FRAGS, + RMNET_NSS_TX_BUSY_LOOP, + RMNET_NSS_NUM_STATS, +}; + +static unsigned long rmnet_nss_stats[RMNET_NSS_NUM_STATS]; + +#define RMNET_NSS_STAT(name, counter, desc) \ + module_param_named(name, rmnet_nss_stats[counter], ulong, 0444); \ + MODULE_PARM_DESC(name, desc) + +RMNET_NSS_STAT(rmnet_nss_rx_ethernet, RMNET_NSS_RX_ETH, + "Number of Ethernet headers successfully removed"); +RMNET_NSS_STAT(rmnet_nss_rx_fail, RMNET_NSS_RX_FAIL, + "Number of Ethernet headers that could not be removed"); +RMNET_NSS_STAT(rmnet_nss_rx_non_ethernet, RMNET_NSS_RX_NON_ETH, + "Number of non-Ethernet packets received"); +RMNET_NSS_STAT(rmnet_nss_rx_busy, RMNET_NSS_RX_BUSY, + "Number of packets dropped decause rmnet_data device was busy"); +RMNET_NSS_STAT(rmnet_nss_tx_slow, RMNET_NSS_TX_NO_CTX, + "Number of packets sent over non-NSS-accelerated rmnet device"); +RMNET_NSS_STAT(rmnet_nss_tx_fast, RMNET_NSS_TX_SUCCESS, + "Number of packets sent over NSS-accelerated rmnet device"); +RMNET_NSS_STAT(rmnet_nss_tx_fail, RMNET_NSS_TX_FAIL, + "Number of packets that NSS could not transmit"); +RMNET_NSS_STAT(rmnet_nss_tx_nonlinear, RMNET_NSS_TX_NONLINEAR, + "Number of non linear sent over NSS-accelerated rmnet device"); +RMNET_NSS_STAT(rmnet_nss_tx_invalid_ip, RMNET_NSS_TX_BAD_IP, + "Number of ingress packets with invalid IP headers"); +RMNET_NSS_STAT(rmnet_nss_tx_invalid_frags, RMNET_NSS_TX_BAD_FRAGS, + "Number of ingress packets with invalid frag format"); +RMNET_NSS_STAT(rmnet_nss_tx_linearize_fail, RMNET_NSS_TX_LINEARIZE_FAILS, + "Number of ingress packets where linearize in tx fails"); +RMNET_NSS_STAT(rmnet_nss_tx_exceptions, RMNET_NSS_EXCEPTIONS, + "Number of times our DL exception handler was invoked"); +RMNET_NSS_STAT(rmnet_nss_exception_non_ethernet, RMNET_NSS_EX_BAD_HDR, + "Number of non-Ethernet exception packets"); +RMNET_NSS_STAT(rmnet_nss_exception_invalid_ip, RMNET_NSS_EX_BAD_IP, + "Number of exception packets with invalid IP headers"); +RMNET_NSS_STAT(rmnet_nss_exception_success, RMNET_NSS_EX_SUCCESS, + "Number of exception packets handled successfully"); +RMNET_NSS_STAT(rmnet_nss_tx_non_zero_headlen_frags, RMNET_NSS_TX_NON_ZERO_HEADLEN_FRAGS, + "Number of packets with non zero headlen"); +RMNET_NSS_STAT(rmnet_nss_tx_busy_loop, RMNET_NSS_TX_BUSY_LOOP, + "Number of times tx packets busy looped"); + +static void rmnet_nss_inc_stat(enum __rmnet_nss_stat stat) +{ + if (stat >= 0 && stat < RMNET_NSS_NUM_STATS) + rmnet_nss_stats[stat]++; +} + +static struct rmnet_nss_ctx *rmnet_nss_find_ctx(struct net_device *dev) +{ + struct rmnet_nss_ctx *ctx; + struct hlist_head *bucket; + u32 hash; + + hash = hash_ptr(dev, HASH_BITS(rmnet_nss_ctx_hashtable)); + bucket = &rmnet_nss_ctx_hashtable[hash]; + hlist_for_each_entry(ctx, bucket, hnode) { + if (ctx->rmnet_dev == dev) + return ctx; + } + + return NULL; +} + +static void rmnet_nss_free_ctx(struct rmnet_nss_ctx *ctx) +{ + if (ctx) { + hash_del(&ctx->hnode); + nss_rmnet_rx_xmit_callback_unregister(ctx->nss_ctx); + nss_rmnet_rx_destroy_sync(ctx->nss_ctx); + kfree(ctx); + } +} + +/* Pull off an ethernet header, if possible */ +static int rmnet_nss_ethhdr_pull(struct sk_buff *skb) +{ + if (!skb->protocol || skb->protocol == htons(ETH_P_802_3)) { + void *ret = skb_pull(skb, sizeof(struct ethhdr)); + + rmnet_nss_inc_stat((ret) ? RMNET_NSS_RX_ETH : + RMNET_NSS_RX_FAIL); + return !ret; + } + + rmnet_nss_inc_stat(RMNET_NSS_RX_NON_ETH); + return -1; +} + +/* Copy headers to linear section for non linear packets */ +static int rmnet_nss_adjust_header(struct sk_buff *skb) +{ + struct iphdr *iph; + skb_frag_t *frag; + int bytes = 0; + u8 transport; + + if (skb_shinfo(skb)->nr_frags != 1) { + rmnet_nss_inc_stat(RMNET_NSS_TX_BAD_FRAGS); + return -EINVAL; + } + + if (skb_headlen(skb)) { + rmnet_nss_inc_stat(RMNET_NSS_TX_NON_ZERO_HEADLEN_FRAGS); + return 0; + } + + frag = &skb_shinfo(skb)->frags[0]; + + iph = (struct iphdr *)(skb_frag_address(frag)); + + if (iph->version == 4) { + bytes = iph->ihl*4; + transport = iph->protocol; + } else if (iph->version == 6) { + struct ipv6hdr *ip6h = (struct ipv6hdr *)iph; + + bytes = sizeof(struct ipv6hdr); + /* Dont have to account for extension headers yet */ + transport = ip6h->nexthdr; + } else { + rmnet_nss_inc_stat(RMNET_NSS_TX_BAD_IP); + return -EINVAL; + } + + if (transport == IPPROTO_TCP) { + struct tcphdr *th; + + th = (struct tcphdr *)((u8 *)iph + bytes); + bytes += th->doff * 4; + } else if (transport == IPPROTO_UDP) { + bytes += sizeof(struct udphdr); + } else { + /* cant do anything else here unfortunately so linearize */ + if (skb_linearize(skb)) { + rmnet_nss_inc_stat(RMNET_NSS_TX_LINEARIZE_FAILS); + return -EINVAL; + } else { + return 0; + } + } + + if (bytes > skb_frag_size(frag)) { + rmnet_nss_inc_stat(RMNET_NSS_TX_BAD_FRAGS); + return -EINVAL; + } + + skb_push(skb, bytes); + memcpy(skb->data, iph, bytes); + + /* subtract to account for skb_push */ + skb->len -= bytes; + + frag->page_offset += bytes; + skb_frag_size_sub(frag, bytes); + + /* subtract to account for skb_frag_size_sub */ + skb->data_len -= bytes; + + return 0; +} + +/* Main downlink handler + * Looks up NSS contex associated with the device. If the context is found, + * we add a dummy ethernet header with the approriate protocol field set, + * the pass the packet off to NSS for hardware acceleration. + */ +int rmnet_nss_tx(struct sk_buff *skb) +{ + struct ethhdr *eth; + struct rmnet_nss_ctx *ctx; + struct net_device *dev = skb->dev; + nss_tx_status_t rc; + unsigned int len; + u8 version; + + if (skb_is_nonlinear(skb)) { + if (rmnet_nss_adjust_header(skb)) + goto fail; + else + rmnet_nss_inc_stat(RMNET_NSS_TX_NONLINEAR); + } + + version = ((struct iphdr *)skb->data)->version; + + ctx = rmnet_nss_find_ctx(dev); + if (!ctx) { + rmnet_nss_inc_stat(RMNET_NSS_TX_NO_CTX); + return -EINVAL; + } + + eth = (struct ethhdr *)skb_push(skb, sizeof(*eth)); + memset(ð->h_dest, 0, ETH_ALEN * 2); + if (version == 4) { + eth->h_proto = htons(ETH_P_IP); + } else if (version == 6) { + eth->h_proto = htons(ETH_P_IPV6); + } else { + rmnet_nss_inc_stat(RMNET_NSS_TX_BAD_IP); + goto fail; + } + + skb->protocol = htons(ETH_P_802_3); + /* Get length including ethhdr */ + len = skb->len; + +transmit: + rc = nss_rmnet_rx_tx_buf(ctx->nss_ctx, skb); + if (rc == NSS_TX_SUCCESS) { + /* Increment rmnet_data device stats. + * Don't call rmnet_data_vnd_rx_fixup() to do this, as + * there's no guarantee the skb pointer is still valid. + */ + dev->stats.rx_packets++; + dev->stats.rx_bytes += len; + rmnet_nss_inc_stat(RMNET_NSS_TX_SUCCESS); + return 0; + } else if (rc == NSS_TX_FAILURE_QUEUE) { + rmnet_nss_inc_stat(RMNET_NSS_TX_BUSY_LOOP); + goto transmit; + } + +fail: + rmnet_nss_inc_stat(RMNET_NSS_TX_FAIL); + kfree_skb(skb); + return 1; +} + +/* Called by NSS in the DL exception case. + * Since the packet cannot be sent over the accelerated path, we need to + * handle it. Remove the ethernet header and pass it onward to the stack + * if possible. + */ +void rmnet_nss_receive(struct net_device *dev, struct sk_buff *skb, + struct napi_struct *napi) +{ + rmnet_nss_inc_stat(RMNET_NSS_EXCEPTIONS); + + if (!skb) + return; + + if (rmnet_nss_ethhdr_pull(skb)) { + rmnet_nss_inc_stat(RMNET_NSS_EX_BAD_HDR); + goto drop; + } + + /* reset header pointers */ + skb_reset_transport_header(skb); + skb_reset_network_header(skb); + skb_reset_mac_header(skb); + + /* reset packet type */ + skb->pkt_type = PACKET_HOST; + + skb->dev = dev; + + /* reset protocol type */ + switch (skb->data[0] & 0xF0) { + case 0x40: + skb->protocol = htons(ETH_P_IP); + break; + case 0x60: + skb->protocol = htons(ETH_P_IPV6); + break; + default: + rmnet_nss_inc_stat(RMNET_NSS_EX_BAD_IP); + goto drop; + } + + rmnet_nss_inc_stat(RMNET_NSS_EX_SUCCESS); + + /* Set this so that we dont loop around netif_receive_skb */ + + skb->cb[0] = 1; + + netif_receive_skb(skb); + return; + +drop: + kfree_skb(skb); +} + +/* Called by NSS in the UL acceleration case. + * We are guaranteed to have an ethernet packet here from the NSS hardware, + * We need to pull the header off and invoke our ndo_start_xmit function + * to handle transmitting the packet to the network stack. + */ +void rmnet_nss_xmit(struct net_device *dev, struct sk_buff *skb) +{ + netdev_tx_t ret; + + skb_pull(skb, sizeof(struct ethhdr)); + rmnet_nss_inc_stat(RMNET_NSS_RX_ETH); + + /* NSS takes care of shaping, so bypassing Qdiscs like this is OK */ + ret = dev->netdev_ops->ndo_start_xmit(skb, dev); + if (unlikely(ret == NETDEV_TX_BUSY)) { + dev_kfree_skb_any(skb); + rmnet_nss_inc_stat(RMNET_NSS_RX_BUSY); + } +} + +/* Create and register an NSS context for an rmnet_data device */ +int rmnet_nss_create_vnd(struct net_device *dev) +{ + struct rmnet_nss_ctx *ctx; + + ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC); + if (!ctx) + return -ENOMEM; + + ctx->rmnet_dev = dev; + ctx->nss_ctx = nss_rmnet_rx_create_sync_nexthop(dev, NSS_N2H_INTERFACE, + NSS_C2C_TX_INTERFACE); + if (!ctx->nss_ctx) { + kfree(ctx); + return -1; + } + + nss_rmnet_rx_register(ctx->nss_ctx, rmnet_nss_receive, dev); + nss_rmnet_rx_xmit_callback_register(ctx->nss_ctx, rmnet_nss_xmit); + hash_add_ptr(rmnet_nss_ctx_hashtable, &ctx->hnode, dev); + return 0; +} + +/* Unregister and destroy the NSS context for an rmnet_data device */ +int rmnet_nss_free_vnd(struct net_device *dev) +{ + struct rmnet_nss_ctx *ctx; + + ctx = rmnet_nss_find_ctx(dev); + rmnet_nss_free_ctx(ctx); + + return 0; +} + +static const struct rmnet_nss_cb rmnet_nss = { + .nss_create = rmnet_nss_create_vnd, + .nss_free = rmnet_nss_free_vnd, + .nss_tx = rmnet_nss_tx, +}; + +int __init rmnet_nss_init(void) +{ + pr_err("%s(): initializing rmnet_nss\n", __func__); + RCU_INIT_POINTER(rmnet_nss_callbacks, &rmnet_nss); + return 0; +} + +void __exit rmnet_nss_exit(void) +{ + struct hlist_node *tmp; + struct rmnet_nss_ctx *ctx; + int bkt; + + pr_err("%s(): exiting rmnet_nss\n", __func__); + RCU_INIT_POINTER(rmnet_nss_callbacks, NULL); + + /* Tear down all NSS contexts */ + hash_for_each_safe(rmnet_nss_ctx_hashtable, bkt, tmp, ctx, hnode) + rmnet_nss_free_ctx(ctx); +} + +#if 0 +MODULE_LICENSE("GPL v2"); +module_init(rmnet_nss_init); +module_exit(rmnet_nss_exit); +#endif diff --git a/package/wwan/driver/quectel_SRPD_PCIE/Makefile b/package/wwan/driver/quectel_SRPD_PCIE/Makefile new file mode 100755 index 000000000..6397c2386 --- /dev/null +++ b/package/wwan/driver/quectel_SRPD_PCIE/Makefile @@ -0,0 +1,47 @@ +# +# Copyright (C) 2015 OpenWrt.org +# +# This is free software, licensed under the GNU General Public License v2. +# See /LICENSE for more information. +# + +include $(TOPDIR)/rules.mk + +PKG_NAME:=sprd_pcie +PKG_VERSION:=1.6 +PKG_RELEASE:=1 + +include $(INCLUDE_DIR)/kernel.mk +include $(INCLUDE_DIR)/package.mk + +define KernelPackage/sprd_pcie + SUBMENU:=PCIE Support + TITLE:=Kernel pcie driver for SPRD device + DEPENDS:= + FILES:=$(PKG_BUILD_DIR)/sprd_pcie.ko + AUTOLOAD:=$(call AutoLoad,41,sprd_pcie) +endef + +define KernelPackage/sprd_pcie/description + Kernel module for register a custom pcispd platform device. +endef + +MAKE_OPTS:= \ + ARCH="$(LINUX_KARCH)" \ + CROSS_COMPILE="$(TARGET_CROSS)" \ + CXXFLAGS="$(TARGET_CXXFLAGS)" \ + M="$(PKG_BUILD_DIR)" \ + $(EXTRA_KCONFIG) + +define Build/Prepare + mkdir -p $(PKG_BUILD_DIR) + $(CP) ./src/* $(PKG_BUILD_DIR)/ +endef + +define Build/Compile + $(MAKE) -C "$(LINUX_DIR)" \ + $(MAKE_OPTS) \ + modules +endef + +$(eval $(call KernelPackage,sprd_pcie)) diff --git a/package/wwan/driver/quectel_SRPD_PCIE/src/Makefile b/package/wwan/driver/quectel_SRPD_PCIE/src/Makefile new file mode 100644 index 000000000..8fc450fe6 --- /dev/null +++ b/package/wwan/driver/quectel_SRPD_PCIE/src/Makefile @@ -0,0 +1,33 @@ +# +# Makefile for the sprd staging modem files +# +EXTRA_CFLAGS += -Wno-error -Wno-packed-bitfield-compat +ccflags-y += -DCONFIG_SPRD_PCIE_EP_DEVICE -DCONFIG_SPRD_SIPA -DCONFIG_SPRD_ETHERNET +obj-m += sprd_pcie.o +sprd_pcie-objs := pcie/sprd_pcie_ep_device.o pcie/pcie_host_resource.o pcie/sprd_pcie_quirks.o sipc/sipc.o sipc/sblock.o sipc/sbuf.o \ + sipc/sipc_debugfs.o sipc/smem.o sipc/smsg.o sipc/spipe.o sipc/spool.o power_manager/power_manager.o \ + sipa/sipa_core.o sipa/sipa_eth.o sipa/sipa_nic.o sipa/sipa_skb_send.o sipa/sipa_skb_recv.o sipa/sipa_dummy.o sipa/sipa_debugfs.o sipa/sipa_dele_cmn.o \ + sipa/sipa_phy_v0/sipa_fifo_irq_hal.o sipa/sipa_phy_v0/sipa_common_fifo_hal.o + +PWD := $(shell pwd) +ifeq ($(ARCH),) +ARCH := $(shell uname -m) +endif +ifeq ($(CROSS_COMPILE),) +CROSS_COMPILE := +endif +ifeq ($(KDIR),) +KDIR := /lib/modules/$(shell uname -r)/build +endif + +sprd_pcie: clean + $(MAKE) ARCH=${ARCH} CROSS_COMPILE=${CROSS_COMPILE} -C $(KDIR) M=$(PWD) modules + #cp sprd_pcie.ko /tftpboot/ + +clean: + $(MAKE) ARCH=${ARCH} CROSS_COMPILE=${CROSS_COMPILE} -C $(KDIR) M=$(PWD) clean + find . -name *.o.ur-safe | xargs rm -f + +install: sprd_pcie + sudo cp sprd_pcie.ko /lib/modules/${shell uname -r}/kernel/drivers/pci/ + sudo depmod diff --git a/package/wwan/driver/quectel_SRPD_PCIE/src/include/mdm_ctrl.h b/package/wwan/driver/quectel_SRPD_PCIE/src/include/mdm_ctrl.h new file mode 100644 index 000000000..677b8379d --- /dev/null +++ b/package/wwan/driver/quectel_SRPD_PCIE/src/include/mdm_ctrl.h @@ -0,0 +1,31 @@ +#ifndef _MDM_CTRL_H +#define _MDM_CTRL_H +/* + * For mcd driver,it offer modem_ctrl_send_abnormal_to_ap + * function for others. It means you can use this function to notify ap, + * some errors has been catched,by this way,ap will triger this error + * and to do something for recovery. + */ + +#include + +enum { + MDM_CTRL_POWER_OFF = 0, + MDM_CTRL_POWER_ON, + MDM_CTRL_WARM_RESET, + MDM_CTRL_COLD_RESET, + MDM_WATCHDOG_RESET, + MDM_ASSERT, + MDM_PANIC, + MDM_CTRL_PCIE_RECOVERY, + MDM_POWER_OFF, + MDM_CTRL_SET_CFG +}; + +void modem_ctrl_send_abnormal_to_ap(int status); +void modem_ctrl_poweron_modem(int on); +void modem_ctrl_enable_cp_event(void); +int modem_ctrl_register_notifier(struct notifier_block *nb); +void modem_ctrl_unregister_notifier(struct notifier_block *nb); + +#endif diff --git a/package/wwan/driver/quectel_SRPD_PCIE/src/include/pcie-rc-sprd.h b/package/wwan/driver/quectel_SRPD_PCIE/src/include/pcie-rc-sprd.h new file mode 100644 index 000000000..4a42963d3 --- /dev/null +++ b/package/wwan/driver/quectel_SRPD_PCIE/src/include/pcie-rc-sprd.h @@ -0,0 +1,49 @@ +#ifndef _PCIE_RC_SPRD_H +#define _PCIE_RC_SPRD_H + +#include + +enum sprd_pcie_event { + SPRD_PCIE_EVENT_INVALID = 0, + SPRD_PCIE_EVENT_LINKDOWN = 0x1, + SPRD_PCIE_EVENT_LINKUP = 0x2, + SPRD_PCIE_EVENT_WAKEUP = 0x4, +}; + +struct sprd_pcie_register_event { + u32 events; + struct platform_device *pdev; + void (*callback)(enum sprd_pcie_event event, void *data); + void *data; +}; + +/* + * SPRD PCIe root complex (e.g. UD710 SoC) can't support PCI hotplug + * capability. Therefore, the standard hotplug driver can't be used. + * + * Whenever one endpoint is plugged or powered on, the EP driver must + * call sprd_pcie_configure_device() in order to add EP device to system + * and probe EP driver. If one endpoint is unplugged or powered off, + * the EP driver must call sprd_pcie_unconfigure_device() in order to + * remove all PCI devices on PCI bus. + * + * return 0 on success, otherwise return a negative number. + */ +/* dumy sprd api */ +static inline int sprd_pcie_configure_device(struct platform_device *pdev) { return 0; } +static inline int sprd_pcie_unconfigure_device(struct platform_device *pdev) { return 0; } +static inline void sprd_pcie_teardown_msi_irq(unsigned int irq) { } +static inline void sprd_pcie_dump_rc_regs(struct platform_device *pdev) { } +static inline int sprd_pcie_register_event(struct sprd_pcie_register_event *reg) { return 0; } +static inline int sprd_pcie_deregister_event(struct sprd_pcie_register_event *reg) { return 0; } + +#ifdef CONFIG_SPRD_PCIE_AER +void sprd_pcie_alloc_irq_vectors(struct pci_dev *dev, int *irqs, int services) { } +#else +static inline void sprd_pcie_alloc_irq_vectors(struct pci_dev *dev, int *irqs, + int services) +{ +} +#endif + +#endif diff --git a/package/wwan/driver/quectel_SRPD_PCIE/src/include/sipa.h b/package/wwan/driver/quectel_SRPD_PCIE/src/include/sipa.h new file mode 100644 index 000000000..9e4c66413 --- /dev/null +++ b/package/wwan/driver/quectel_SRPD_PCIE/src/include/sipa.h @@ -0,0 +1,59 @@ +#ifndef _SIPA_H_ +#define _SIPA_H_ + +#include +#include +#include + +enum sipa_evt_type { + SIPA_RECEIVE, + SIPA_ENTER_FLOWCTRL, + SIPA_LEAVE_FLOWCTRL, + SIPA_ERROR, +}; + +typedef void (*sipa_notify_cb)(void *priv, enum sipa_evt_type evt, + unsigned int data); + +enum sipa_term_type { + SIPA_TERM_PCIE0 = 0x10, + SIPA_TERM_PCIE1 = 0x11, + SIPA_TERM_PCIE2 = 0x12, + SIPA_TERM_CP0 = 0x4, + SIPA_TERM_CP1 = 0x5, + SIPA_TERM_VCP = 0x6, + + SIPA_TERM_MAX = 0x20, /* max 5-bit register */ +}; + +enum sipa_nic_id { + SIPA_NIC_BB0, + SIPA_NIC_BB1, + SIPA_NIC_BB2, + SIPA_NIC_BB3, + SIPA_NIC_BB4, + SIPA_NIC_BB5, + SIPA_NIC_BB6, + SIPA_NIC_BB7, + SIPA_NIC_BB8, + SIPA_NIC_BB9, + SIPA_NIC_BB10, + SIPA_NIC_BB11, + SIPA_NIC_MAX, +}; + +struct sk_buff *sipa_recv_skb(int *netid, int index); +bool sipa_check_recv_tx_fifo_empty(void); +int sipa_nic_open(enum sipa_term_type src, int netid, + sipa_notify_cb cb, void *priv); +void sipa_nic_close(enum sipa_nic_id nic_id); +int sipa_nic_tx(enum sipa_nic_id nic_id, enum sipa_term_type dst, + int netid, struct sk_buff *skb); +int sipa_nic_rx(int *netid, struct sk_buff **out_skb, int index); +int sipa_nic_rx_has_data(enum sipa_nic_id nic_id); +int sipa_nic_trigger_flow_ctrl_work(enum sipa_nic_id nic_id, int err); + +u32 sipa_nic_get_filled_num(void); +void sipa_nic_restore_irq(void); +void sipa_nic_set_tx_fifo_rp(u32 rptr); +#endif diff --git a/package/wwan/driver/quectel_SRPD_PCIE/src/include/sipc.h b/package/wwan/driver/quectel_SRPD_PCIE/src/include/sipc.h new file mode 100644 index 000000000..fe01b6463 --- /dev/null +++ b/package/wwan/driver/quectel_SRPD_PCIE/src/include/sipc.h @@ -0,0 +1,1154 @@ +/* + * Copyright (C) 2012-2019 Spreadtrum Communications Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __SIPC_H +#define __SIPC_H + +#include + +/* ****************************************************************** */ +/* SMSG interfaces */ + +/* sipc processor ID definition */ +enum { + SIPC_ID_AP = 0, /* Application Processor */ + SIPC_ID_MINIAP, /* mini AP processor */ + SIPC_ID_CPW, /* WCDMA processor */ + SIPC_ID_WCN, /* Wireless Connectivity */ + SIPC_ID_GNSS, /* Gps processor(gnss) */ + SIPC_ID_PSCP, /* Protocol stack processor */ + SIPC_ID_PM_SYS, /* Power management processor */ + SIPC_ID_NR_PHY, /* New Radio PHY processor */ + SIPC_ID_V3_PHY, /* MODEM v3 PHY processor */ + SIPC_ID_NR, /* Max processor number */ +}; + +#define SIPC_ID_LTE SIPC_ID_PSCP + +/* share-mem ring buffer short message */ +struct smsg { + u8 channel; /* channel index */ + u8 type; /* msg type */ + u16 flag; /* msg flag */ + u32 value; /* msg value */ +}; + +/* smsg channel definition */ +enum { + SMSG_CH_CTRL = 0, /* some emergency control */ + SMSG_CH_COMM, /* general communication channel */ + SMSG_CH_IMSBR_DATA, /* ims bridge data channel */ + SMSG_CH_IMSBR_CTRL, /* ims bridge control channel */ + SMSG_CH_PIPE, /* general pipe channel */ + SMSG_CH_PLOG, /* pipe for debug log/dump */ + SMSG_CH_TTY, /* virtual serial for telephony */ + SMSG_CH_DATA0, /* 2G/3G wirleless data */ + SMSG_CH_DATA1, /* 2G/3G wirleless data */ + SMSG_CH_DATA2, /* 2G/3G wirleless data */ + SMSG_CH_VBC, /* audio conrol channel */ + SMSG_CH_PLAYBACK, /* audio playback channel */ + SMSG_CH_CAPTURE, /* audio capture channel */ + SMSG_CH_MONITOR_AUDIO, /* audio monitor channel */ + SMSG_CH_CTRL_VOIP, /* audio voip conrol channel */ + SMSG_CH_PLAYBACK_VOIP, /* audio voip playback channel */ + SMSG_CH_CAPTURE_VOIP, /* audio voip capture channel */ + SMSG_CH_MONITOR_VOIP, /* audio voip monitor channel */ + SMSG_CH_DATA3, /* 2G/3G wirleless data */ + SMSG_CH_DATA4, /* 2G/3G wirleless data */ + SMSG_CH_DATA5, /* 2G/3G wirleless data */ + SMSG_CH_DIAG, /* pipe for debug log/dump */ + SMSG_CH_PM_CTRL, /* power management control */ + SMSG_CH_DUAL_SIM_PLUG, /* dual sim plug channel */ + SMSG_CH_END /* will not allow add channel in here */ +}; + +/* smsg channel definition */ +enum { + /* 2G/3G wirleless data, channel 24~39 */ + SMSG_CH_DATA_BASE = 24, + SMSG_CH_DATA6 = SMSG_CH_DATA_BASE, + SMSG_CH_DATA7, + SMSG_CH_DATA8, + SMSG_CH_DATA9, + SMSG_CH_DATA10, + SMSG_CH_DATA11, + SMSG_CH_DATA12, + SMSG_CH_DATA13, + + /* general pipe channel, channel 40~59 */ + SMSG_CH_PIPE_BASE = 40, + SMSG_CH_NV = SMSG_CH_PIPE_BASE, + SMSG_CH_DVFS, + SMSG_CH_PIPE2, + SMSG_CH_PIPE3, + + /* pipe for debug log/dump channel 60~79 */ + SMSG_CH_PLOG_BASE = 60, + SMSG_CH_PLOG0 = SMSG_CH_PLOG_BASE, + SMSG_CH_PLOG1, + SMSG_CH_PLOG2, + SMSG_CH_PLOG3, + + /* virtual serial for telephony, channel 80~99*/ + SMSG_CH_TTY_BASE = 80, + SMSG_CH_TTY0 = SMSG_CH_TTY_BASE, + SMSG_CH_TTY1, + SMSG_CH_TTY2, + SMSG_CH_TTY3, + + /* some emergency control, channel 100~119 */ + SMSG_CH_CTRL_BASE = 100, + SMSG_CH_PMSYS_DBG = SMSG_CH_CTRL_BASE, + SMSG_CH_CTRL1, + SMSG_CH_CTRL2, + SMSG_CH_CTRL3, + + /* general communication, channel 120~129 */ + SMSG_CH_COMM_BASE = 120, + SMSG_CH_COMM_SIPA = SMSG_CH_COMM_BASE, + SMSG_CH_COMM1, + SMSG_CH_COMM2, + SMSG_CH_COMM3, + + /* audio channel, channel 130 ~149 */ + SMSG_CH_AUDIO_BASE = 130, + SMSG_CH_AGDSP_ACCESS = SMSG_CH_AUDIO_BASE,/* audio conrol channel */ + SMSG_CH_PLAYBACK_DEEP, + SMSG_CH_AUDIO2, + SMSG_CH_AUDIO3, + + /* VOIP channel, channel 150 ~169 */ + SMSG_CH_VOIP_BASE = 150, + SMSG_CH_VOIP0 = SMSG_CH_VOIP_BASE,/* audio voip conrol channel */ + SMSG_CH_VOIP_DEEP, /* audio voip playback channel */ + SMSG_CH_VOIP2, /* audio voip capture channel */ + SMSG_CH_VOIP3, /* audio voip monitor channel */ + + /* RPC server channel, channel 170~189 */ + SMSG_CH_RPC_BASE = 170, + SMSG_CH_RPC0 = SMSG_CH_RPC_BASE, + SMSG_CH_RPC1, + SMSG_CH_RPC2, + SMSG_CH_RPC3, + + /* RESERVE group 1, channel 190 ~209 */ + SMSG_CH_RESERVE1_BASE = 190, + + /* RESERVE group 2, channel 210 ~129 */ + SMSG_CH_RESERVE2_BASE = 210, + + /* RESERVE group 3, channel 230 ~244 */ + SMSG_CH_RESERVE3_BASE = 230, + + /* RESERVE group 4, channel 245 ~254 */ + SMSG_CH_RESERVE4_BASE = 245, + + /* total channel number 255, the max chanel number is 254*/ + SMSG_CH_NR = 255 +}; +#define INVALID_CHANEL_INDEX SMSG_CH_NR + +/* modem type */ +enum { + SOC_MODEM = 0, + PCIE_MODEM, +}; + +/* only be configed in sipc_config is valid channel */ +struct sipc_config { + u8 channel; + char *name; +}; + +static const struct sipc_config sipc_cfg[] = { + {SMSG_CH_CTRL, "com control"}, /* chanel 0 */ + {SMSG_CH_COMM, "com communication"}, /* chanel 1 */ + {SMSG_CH_PM_CTRL, "pm contrl"}, /* chanel 22 */ + {SMSG_CH_PMSYS_DBG, "pm debug contrl"}, /* chanel 100 */ + {SMSG_CH_DUAL_SIM_PLUG, "dual sim plug"}, /* chanel 23 */ + {SMSG_CH_PIPE, "pipe0"}, /* chanel 4 */ + {SMSG_CH_PLOG, "plog"}, /* chanel 5 */ + {SMSG_CH_DIAG, "diag"}, /* chanel 21 */ + {SMSG_CH_TTY, "stty chanel"}, /* chanel 6 */ + {SMSG_CH_DATA0, "seth0"}, /* chanel 7 */ + {SMSG_CH_DATA1, "seth1"}, /* chanel 8 */ + {SMSG_CH_DATA2, "seth2"}, /* chanel 9 */ + {SMSG_CH_DATA3, "seth3"}, /* chanel 18 */ + {SMSG_CH_DATA4, "seth4"}, /* chanel 19 */ + {SMSG_CH_DATA5, "seth5"}, /* chanel 20 */ + {SMSG_CH_DATA6, "seth6"}, /* chanel 24 */ + {SMSG_CH_DATA7, "seth7"}, /* chanel 25 */ + {SMSG_CH_DATA8, "seth8"}, /* chanel 26 */ + {SMSG_CH_DATA9, "seth9"}, /* chanel 27 */ + {SMSG_CH_DATA10, "seth10"}, /* chanel 28 */ + {SMSG_CH_DATA11, "seth11"}, /* chanel 29 */ + {SMSG_CH_DATA12, "seth12"}, /* chanel 30 */ + {SMSG_CH_DATA13, "seth13"}, /* chanel 31 */ + {SMSG_CH_VBC, "audio control"}, /* chanel 10 */ + {SMSG_CH_PLAYBACK, "audio playback"}, /* chanel 11 */ + {SMSG_CH_CAPTURE, "audio capture"}, /* chanel 12 */ + {SMSG_CH_MONITOR_AUDIO, "audio monitor"}, /* chanel 13 */ + {SMSG_CH_AGDSP_ACCESS, "agdsp access"}, /* chanel 13 */ + {SMSG_CH_CTRL_VOIP, "VOIP conrol"}, /* chanel 14 */ + {SMSG_CH_PLAYBACK_VOIP, "VOIP playback"}, /* chanel 15 */ + {SMSG_CH_CAPTURE_VOIP, "VOIP capture"}, /* chanel 16 */ + {SMSG_CH_MONITOR_VOIP, "VOIP monitor"}, /* chanel 17 */ + {SMSG_CH_PLAYBACK_DEEP, "audio playback deep"}, /*channel 131*/ + {SMSG_CH_IMSBR_DATA, "imsbr data"}, /* chanel 2 */ + {SMSG_CH_IMSBR_CTRL, "imsbr control"}, /* channel 3 */ + {SMSG_CH_VOIP_DEEP, "audio voip deep"}, /*channel 151*/ + {SMSG_CH_DVFS, "dvfs"}, /* channel 41 */ + {SMSG_CH_COMM_SIPA, "sipa"}, /* channel 120 */ + {SMSG_CH_NV, "nvsync"}, /* channel 40 */ +}; + +#define SMSG_VALID_CH_NR (sizeof(sipc_cfg)/sizeof(struct sipc_config)) + + +/* smsg type definition */ +enum { + SMSG_TYPE_NONE = 0, + SMSG_TYPE_OPEN, /* first msg to open a channel */ + SMSG_TYPE_CLOSE, /* last msg to close a channel */ + SMSG_TYPE_DATA, /* data, value=addr, no ack */ + SMSG_TYPE_EVENT, /* event with value, no ack */ + SMSG_TYPE_CMD, /* command, value=cmd */ + SMSG_TYPE_DONE, /* return of command */ + SMSG_TYPE_SMEM_ALLOC, /* allocate smem, flag=order */ + SMSG_TYPE_SMEM_FREE, /* free smem, flag=order, value=addr */ + SMSG_TYPE_SMEM_DONE, /* return of alloc/free smem */ + SMSG_TYPE_FUNC_CALL, /* RPC func, value=addr */ + SMSG_TYPE_FUNC_RETURN, /* return of RPC func */ + SMSG_TYPE_DIE, + SMSG_TYPE_DFS, + SMSG_TYPE_DFS_RSP, + SMSG_TYPE_ASS_TRG, + SMSG_TYPE_HIGH_OFFSET, /* client sipc get high offset from host */ + SMSG_TYPE_NR, /* total type number */ +}; + +/* flag for OPEN/CLOSE msg type */ +#define SMSG_OPEN_MAGIC 0xBEEE +#define SMSG_CLOSE_MAGIC 0xEDDD + +/** +* sipc_get_wakeup_flag +* after the wakeup flag be set, the fist smsg will be print +* @parameters: void +* @return: int +*/ +int sipc_get_wakeup_flag(void); + +/** +* sipc_set_wakeup_flag +* after the wakeup flag be set, the fist smsg will be print +* @parameters: void +* @return: no return value +*/ +void sipc_set_wakeup_flag(void); + +/** +* sipc_clear_wakeup_flag +* clear the wake up flag +* @parameters: void +* @return: no return value +*/ +void sipc_clear_wakeup_flag(void); + +/** + * smsg_ch_open -- open a channel for smsg + * + * @dst: dest processor ID + * @channel: channel ID + * @timeout: milliseconds, 0 means no wait, -1 means unlimited + * @return: 0 on success, <0 on failure + */ +int smsg_ch_open(u8 dst, u8 channel, int timeout); + +/** + * smsg_ch_close -- close a channel for smsg + * + * @dst: dest processor ID + * @channel: channel ID + * @timeout: milliseconds, 0 means no wait, -1 means unlimited + * @return: 0 on success, <0 on failure + */ +int smsg_ch_close(u8 dst, u8 channel, int timeout); + + /** + * smsg_senddie -- send a MSG_TYPE_DIE message to pubcp + * + * @dst: dest processor ID + * @return: 0 on success, <0 on failure + */ +int smsg_senddie(u8 dst); + +/** + * smsg_send -- send smsg + * + * @dst: dest processor ID + * @msg: smsg body to be sent + * @timeout: milliseconds, 0 means no wait, -1 means unlimited + * @return: 0 on success, <0 on failure + */ +int smsg_send(u8 dst, struct smsg *msg, int timeout); + +/** + * smsg_recv -- poll and recv smsg + * + * @dst: dest processor ID + * @msg: smsg body to be received, channel should be filled as input + * @timeout: milliseconds, 0 means no wait, -1 means unlimited + * @return: 0 on success, <0 on failure + */ +int smsg_recv(u8 dst, struct smsg *msg, int timeout); + +/** + * sipc_channel2index + * + * only be configed in sipc_config is valid channel + * @ch: channel number + * @return: channel index ,if return index is INVALID_CHANEL_INDEX , + * it indicate it is a invalid chanel + */ +u8 sipc_channel2index(u8 ch); + +int smsg_ch_wake_unlock(u8 dst, u8 channel); + +#if defined(CONFIG_DEBUG_FS) +void sipc_debug_putline(struct seq_file *m, char c, int n); +#endif + +/* quickly fill a smsg body */ +static inline void smsg_set(struct smsg *msg, u8 channel, + u8 type, u16 flag, u32 value) +{ + msg->channel = channel; + msg->type = type; + msg->flag = flag; + msg->value = value; +} + +/* ack an open msg for modem recovery */ +static inline void smsg_open_ack(u8 dst, u16 channel) +{ + struct smsg mopen; + + pr_info("%s: channel %d-%d!\n", __func__, dst, channel); + smsg_set(&mopen, channel, SMSG_TYPE_OPEN, SMSG_OPEN_MAGIC, 0); + smsg_send(dst, &mopen, -1); +} + +/* ack an close msg for modem recovery */ +static inline void smsg_close_ack(u8 dst, u16 channel) +{ + struct smsg mclose; + + pr_info("%s: channel %d-%d!\n", __func__, dst, channel); + smsg_set(&mclose, channel, SMSG_TYPE_CLOSE, SMSG_CLOSE_MAGIC, 0); + smsg_send(dst, &mclose, -1); +} + +/* ****************************************************************** */ +/* SMEM interfaces */ +/** + * smem_get_area -- get sipc smem + * + * @dst: dest processor ID + * @base: base pointer + * @size: size pointer + * @return: 0 success, < 0 failed + */ +int smem_get_area(u8 dst, u32 *base, u32 *size); + +/** + * smem_alloc -- allocate shared memory block + * + * @dst: dest processor ID + * @size: size to be allocated, page-aligned + * @return: phys addr or 0 if failed + */ +u32 smem_alloc(u8 dst, u32 size); + +/** + * smem_free -- free shared memory block + * + * @dst: dest processor ID + * @addr: smem phys addr to be freed + * @order: size to be freed + */ +void smem_free(u8 dst, u32 addr, u32 size); + +/** + * shmem_ram_unmap -- for sipc unmap ram address + * + * @mem: vir mem + */ +void shmem_ram_unmap(u8 dst, const void *mem); + +/** + * shmem_ram_vmap_nocache -- for sipc map ram address + * + * @start: start address + * @size: size to be allocated, page-aligned + * @return: phys addr or 0 if failed + */ +void *shmem_ram_vmap_nocache(u8 dst, phys_addr_t start, size_t size); + +/** + * shmem_ram_vmap_cache -- for sipc map ram address + * + * @start: start address + * @size: size to be allocated, page-aligned + * @return: phys addr or 0 if failed + */ +void *shmem_ram_vmap_cache(u8 dst, phys_addr_t start, size_t size); + +void smem_free(u8 dst, u32 addr, u32 size); + +/** + * modem_ram_unmap -- for modem unmap ram address + * + * @mem: vir mem + * @modem_type: soc modem, pcie modem + */ +void modem_ram_unmap(u32 modem_type, const void *mem); + +/** + * shmem_ram_vmap_nocache -- for modem map ram address + * + * @modem_type: soc modem, pcie modem + * @start: start address + * @size: size to be allocated, page-aligned + * @return: phys addr or 0 if failed + */ +void *modem_ram_vmap_nocache(u32 modem_type, phys_addr_t start, size_t size); + +/** + * modem_ram_vmap_cache -- for modem map ram address + * + * @modem_type: soc modem, pcie modem + * @start: start address + * @size: size to be allocated, page-aligned + * @return: phys addr or 0 if failed + */ +void *modem_ram_vmap_cache(u32 modem_type, phys_addr_t start, size_t size); +/** + * sbuf_set_no_need_wake_lock + * + * @dst: dest processor ID + * @bufnum: which buffer to be set + * @return: none + */ + +void sbuf_set_no_need_wake_lock(u8 dst, u8 channel, u32 bufnum); + +/** + * sbuf_create -- create pipe ring buffers on a channel + * + * @dst: dest processor ID + * @channel: channel ID + * @txbufsize: tx buffer size + * @rxbufsize: rx buffer size + * @bufnum: how many buffers to be created + * @return: 0 on success, <0 on failure + */ + +int sbuf_create(u8 dst, u8 channel, u32 bufnum, + u32 txbufsize, u32 rxbufsize); + +/** + * sbuf_destroy -- destroy the pipe ring buffers on a channel + * + * @dst: dest processor ID + * @channel: channel ID + * @return: 0 on success, <0 on failure + */ +void sbuf_destroy(u8 dst, u8 channel); +void sbuf_down(u8 dst, u8 channel); + +/** + * sbuf_write -- write data to a sbuf + * + * @dst: dest processor ID + * @channel: channel ID + * @bufid: buffer ID + * @buf: data to be written + * @len: data length + * @timeout: milliseconds, 0 means no wait, -1 means unlimited + * @return: written bytes on success, <0 on failure + */ +int sbuf_write(u8 dst, u8 channel, u32 bufid, + void *buf, u32 len, int timeout); + +/** + * sbuf_read -- write data to a sbuf + * + * @dst: dest processor ID + * @channel: channel ID + * @bufid: buffer ID + * @buf: data to be written + * @len: data length + * @timeout: milliseconds, 0 means no wait, -1 means unlimited + * @return: read bytes on success, <0 on failure + */ +int sbuf_read(u8 dst, u8 channel, u32 bufid, + void *buf, u32 len, int timeout); + +/** + * sbuf_poll_wait -- poll sbuf read/write, used in spipe driver + * + * @dst: dest processor ID + * @channel: channel ID + * @bufid: buffer ID + * @file: struct file handler + * @wait: poll table + * @return: POLLIN or POLLOUT + */ +int sbuf_poll_wait(u8 dst, u8 channel, u32 bufid, + struct file *file, poll_table *wait); + +/** + * sbuf_status -- get sbuf status + * + * @dst: dest processor ID + * @channel: channel ID + * @return: 0 when ready, <0 when broken + */ +int sbuf_status(u8 dst, u8 channel); + +#define SBUF_NOTIFY_READY 0x00 +#define SBUF_NOTIFY_READ 0x01 +#define SBUF_NOTIFY_WRITE 0x02 +/** + * sbuf_register_notifier -- register a callback that's called + * when a tx sbuf is available or a rx sbuf is received. + * non-blocked sbuf_read can be called. + * + * @dst: dest processor ID + * @channel: channel ID + * @bufid: buf ID + * @handler: a callback handler + * @event: NOTIFY_READ, NOTIFY_WRITE, or both + * @data: opaque data passed to the receiver + * @return: 0 on success, <0 on failure + */ +int sbuf_register_notifier(u8 dst, u8 channel, u32 bufid, + void (*handler)(int event, void *data), void *data); + + +int sipc_init(void); +void sipc_exit(void); + +int spipe_init(void); +void spipe_exit(void); +void spipe_device_down(void); +void spool_device_down(void); + +int spool_init(void); +void spool_exit(void); + +int modem_power_manager_init(void); +void modem_power_manager_exit(void); + +#if 0 +int modem_ctrl_init(void); +void modem_ctrl_exit(void); +#endif + +int sipc_init_debugfs(void); + +/* ****************************************************************** */ +/* SBLOCK interfaces */ + +/* sblock structure: addr is the uncached virtual address */ +struct sblock { + void *addr; + u32 length; +#ifdef CONFIG_SPRD_SIPC_ZERO_COPY_SIPX + u16 index; + u16 offset; +#endif +}; + +/** + * sblock_create -- create sblock manager on a channel + * + * @dst: dest processor ID + * @channel: channel ID + * @txblocknum: tx block number + * @txblocksize: tx block size + * @rxblocknum: rx block number + * @rxblocksize: rx block size + * @return: 0 on success, <0 on failure + */ +int sblock_create(u8 dst, u8 channel, + u32 txblocknum, u32 txblocksize, + u32 rxblocknum, u32 rxblocksize); + +/** + * sblock_create_ex -- merge sblock_create and block_register_notifier + * in one function + * + * @dst: dest processor ID + * @channel: channel ID + * @txblocknum: tx block number + * @txblocksize: tx block size + * @rxblocknum: rx block number + * @rxblocksize: rx block size + * @event: SBLOCK_NOTIFY_GET, SBLOCK_NOTIFY_RECV, or both + * @data: opaque data passed to the receiver + * @return: 0 on success, <0 on failure + */ +int sblock_create_ex(u8 dst, u8 channel, + u32 txblocknum, u32 txblocksize, + u32 rxblocknum, u32 rxblocksize, + void (*handler)(int event, void *data), void *data); + +/* sblock_pcfg_create -- create preconfigured SBLOCK channel. + * + * @dst: dest processor ID + * @channel: channel ID + * @tx_blk_num: tx block number + * @tx_blk_sz: tx block size + * @rx_blk_num: rx block number + * @rx_blk_sz: rx block size + * @return: 0 on success, <0 on failure + * + * The function only allocates the memory for the channel, and will not + * open the channel. The client shall open the channel using + * sblock_pcfg_open and close the channel using sblock_close. + */ +int sblock_pcfg_create(u8 dst, u8 channel, + u32 tx_blk_num, u32 tx_blk_sz, + u32 rx_blk_num, u32 rx_blk_sz); + +/* sblock_pcfg_open -- request to open preconfigured SBLOCK channel. + * + * @dst: dest processor ID + * @channel: channel ID + * @notifier: the event notification callback function. This function can + * not sleep. If this parameter is NULL, no event will be + * reported. + * @event: SBLOCK_NOTIFY_GET, SBLOCK_NOTIFY_RECV, or both + * @client: opaque data passed to the receiver + * @return: if the channel is established, return 0; if the open procedure + * is started and not finished, return SIPC_ERR_IN_PROGRESS; + * otherwise return a negative error code. + * + * The function starts the open procedure. If the open procedure is not + * finished when the function returns, the SBLOCK system will report + * the open result later through the notifier callback. + */ +int sblock_pcfg_open(uint8_t dest, uint8_t channel, + void (*notifier)(int event, void *client), + void *client); + +/* sblock_close -- request to close SBLOCK channel. + * + * @dst: dest processor ID + * @channel: channel ID + * @return: if the channel is closed, return 0; if the close procedure + * is started and not finished, return SIPC_ERR_IN_PROGRESS; + * otherwise return a negative error code. + * + * The function starts the close procedure. If the close procedure is not + * finished when the function returns, the SBLOCK system will report + * the close result later through the notification callback that the + * client set by sblock_pcfg_open. + */ +int sblock_close(uint8_t dest, uint8_t channel); + +/* sblock_get_smem_cp_addr - get the shared memory CP address. + * @dest: destination ID + * @channel: channel number + * @paddr: pointer to the variable to receive the address. + */ +int sblock_get_smem_cp_addr(uint8_t dest, uint8_t channel, + uint32_t *paddr); +/** + * sblock_destroy -- destroy sblock manager on a channel + * + * @dst: dest processor ID + * @channel: channel ID + */ +void sblock_destroy(u8 dst, u8 channel); +void sblock_down(u8 dst, u8 channel); + +#define SBLOCK_NOTIFY_GET 0x01 +#define SBLOCK_NOTIFY_RECV 0x02 +#define SBLOCK_NOTIFY_STATUS 0x04 +#define SBLOCK_NOTIFY_OPEN 0x08 +#define SBLOCK_NOTIFY_CLOSE 0x10 +#define SBLOCK_NOTIFY_OPEN_FAILED 0x20 + +/** + * sblock_register_notifier -- register a callback that's called + * when a tx sblock is available or a rx block is received. + * non-blocked sblock_get or sblock_receive can be called. + * + * @dst: dest processor ID + * @channel: channel ID + * @handler: a callback handler + * @event: SBLOCK_NOTIFY_GET, SBLOCK_NOTIFY_RECV, or both + * @data: opaque data passed to the receiver + * @return: 0 on success, <0 on failure + */ +int sblock_register_notifier(u8 dst, u8 channel, + void (*handler)(int event, void *data), void *data); + +/** + * sblock_get -- get a free sblock for sender + * + * @dst: dest processor ID + * @channel: channel ID + * @blk: return a gotten sblock pointer + * @timeout: milliseconds, 0 means no wait, -1 means unlimited + * @return: 0 on success, <0 on failure + */ +int sblock_get(u8 dst, u8 channel, struct sblock *blk, int timeout); + +/** + * sblock_send -- send a sblock with smsg, it should be from sblock_get + * + * @dst: dest processor ID + * @channel: channel ID + * @blk: the sblock to be sent + * @return: 0 on success, <0 on failure + */ +int sblock_send(u8 dst, u8 channel, struct sblock *blk); + +/** + * sblock_send_prepare -- send a sblock without smsg, + * it should be from sblock_get + * + * @dst: dest processor ID + * @channel: channel ID + * @blk: the sblock to be sent + * @return: 0 on success, <0 on failure + */ +int sblock_send_prepare(u8 dst, u8 channel, struct sblock *blk); + +/** + * sblock_send_finish -- trigger an smsg to notify that sblock has been sent + * + * @dst: dest processor ID + * @channel: channel ID + * @return: 0 on success, <0 on failure + */ +int sblock_send_finish(u8 dst, u8 channel); + +/** + * sblock_receive -- receive a sblock, it should be released after it's handled + * + * @dst: dest processor ID + * @channel: channel ID + * @blk: return a received sblock pointer + * @timeout: milliseconds, 0 means no wait, -1 means unlimited + * @return: 0 on success, <0 on failure + */ +int sblock_receive(u8 dst, u8 channel, + struct sblock *blk, int timeout); + +/** + * sblock_release -- release a sblock from reveiver + * + * @dst: dest processor ID + * @channel: channel ID + * @return: 0 on success, <0 on failure + */ +int sblock_release(u8 dst, u8 channel, struct sblock *blk); + +/** + * sblock_get_arrived_count -- get the count of sblock(s) arrived at + * AP (sblock_send on CP) but not received (sblock_receive on AP). + * + * @dst: dest processor ID + * @channel: channel ID + * @return: >=0 the count of blocks + */ +int sblock_get_arrived_count(u8 dst, u8 channel); + + + +/** + * sblock_get_free_count -- get the count of available sblock(s) resident in + * sblock pool on AP. + * + * @dst: dest processor ID + * @channel: channel ID + * @return: >=0 the count of blocks + */ +int sblock_get_free_count(u8 dst, u8 channel); + + +/** + * sblock_put -- put a free sblock for sender + * + * @dst: dest processor ID + * @channel: channel ID + * @blk: sblock pointer + * @return: void + */ +void sblock_put(u8 dst, u8 channel, struct sblock *blk); + +/** + * sblock_poll_wait -- poll sblock read/write + * + * @dst: dest processor ID + * @channel: channel ID + * @filp: strcut file handle + * @wait: poll table + * @return: POLLIN or POLLOUT + */ +unsigned int sblock_poll_wait(u8 dst, u8 channel, struct file *filp, poll_table *wait); + +/** + * sblock_query -- sblock query status + * + * @dst: dest processor ID + * @channel: channel ID + * @return: 0 on success, <0 on failure + */ +int sblock_query(u8 dst, u8 channel); + + +/* ****************************************************************** */ + +#define SIPX_ACK_BLK_LEN (100) + +/** + * sipx_chan_create -- create a sipx channel + * + * @dst: dest processor ID + * @channel: channel ID + * @return: 0 on success, <0 on failure + */ +int sipx_chan_create(u8 dst, u8 channel); + +/** + * sipx_chan_destroy -- destroy seblock manager on a channel + * + * @dst: dest processor ID + * @channel: channel ID + */ +int sipx_chan_destroy(u8 dst, u8 channel); + +/** + * sipx_get_ack_blk_len -- get sipx ack block max length + * + * @dst: dest processor ID + * @return: length + */ +u32 sipx_get_ack_blk_len(u8 dst); + +/** + * sipx_get -- get a free sblock for sender + * + * @dst: dest processor ID + * @channel: channel ID + * @blk: return a gotten sblock pointer + * @is_ack: if want to get block for ack packet + * @return: 0 on success, <0 on failure + */ +int sipx_get(u8 dst, u8 channel, struct sblock *blk, int is_ack); + +/** + * sipx_chan_register_notifier -- register a callback that's called + * when a tx sblock is available or a rx block is received. + * on-blocked sblock_get or sblock_receive can be called. + * + * @dst: dest processor ID + * @channel: channel ID + * @handler: a callback handler + * @event: SBLOCK_NOTIFY_GET, SBLOCK_NOTIFY_RECV, or both + * @data: opaque data passed to the receiver + * @return: 0 on success, <0 on failure + */ +int sipx_chan_register_notifier(u8 dst, u8 channel, + void (*handler)(int event, void *data), void *data); + +/** + * sipx_send -- send a sblock with smsg, it should be from seblock_get + * + * @dst: dest processor ID + * @channel: channel ID + * @blk: the sblock to be sent + * @return: 0 on success, <0 on failure + */ +int sipx_send(u8 dst, u8 channel, struct sblock *blk); + +/** + * sipx_flush -- trigger an smsg to notify that sblock has been sent + * + * @dst: dest processor ID + * @channel: channel ID + * @return: 0 on success, <0 on failure + */ +int sipx_flush(u8 dst, u8 channel); + +/** + * sipx_receive -- receive a sblock, it should be released after it's handled + * + * @dst: dest processor ID + * @channel: channel ID + * @blk: return a received sblock pointer + * @return: 0 on success, <0 on failure + */ +int sipx_receive(u8 dst, u8 channel, struct sblock *blk); + +/** + * sipx_release -- release a sblock from reveiver + * + * @dst: dest processor ID + * @channel: channel ID + * @return: 0 on success, <0 on failure + */ +int sipx_release(u8 dst, u8 channel, struct sblock *blk); + +/** + * sipx_get_arrived_count -- get the count of sblock(s) arrived at + * AP (sblock_send on CP) but not received (sblock_receive on AP). + * + * @dst: dest processor ID + * @channel: channel ID + * @return: >=0 the count of blocks + */ +int sipx_get_arrived_count(u8 dst, u8 channel); + +/** + * sipx_get_free_count -- get the count of available sblock(s) resident in + * normal pool on AP. + * + * @dst: dest processor ID + * @channel: channel ID + * @return: >=0 the count of blocks + */ +int sipx_get_free_count(u8 dst, u8 channel); + +/** + * sipx_put -- put a free sblock for sender + * + * @dst: dest processor ID + * @channel: channel ID + * @blk: sblock pointer + * @return: void + */ +int sipx_put(u8 dst, u8 channel, struct sblock *blk); + +/* ****************************************************************** */ + +#ifdef CONFIG_SPRD_SIPC_ZERO_COPY_SIPX + +#define SBLOCK_CREATE(dst, channel,\ + txblocknum, txblocksize, txpoolsize, \ + rxblocknum, rxblocksize, rxpoolsize) \ +sipx_chan_create(dst, channel) + + +#define SBLOCK_DESTROY(dst, channel) \ + sipx_chan_destroy(dst, channel) + +#define SBLOCK_GET(dst, channel, blk, ack, timeout) \ + sipx_get(dst, channel, blk, ack) + +#define SBLOCK_REGISTER_NOTIFIER(dst, channel, handler, data) \ + sipx_chan_register_notifier(dst, channel, handler, data) + +#define SBLOCK_SEND(dst, channel, blk) \ + sipx_send(dst, channel, blk) + +#define SBLOCK_SEND_PREPARE(dst, channel, blk) \ + sipx_send(dst, channel, blk) + +#define SBLOCK_SEND_FINISH(dst, channel)\ + sipx_flush(dst, channel) + +#define SBLOCK_RECEIVE(dst, channel, blk, timeout) \ + sipx_receive(dst, channel, blk) + +#define SBLOCK_RELEASE(dst, channel, blk) \ + sipx_release(dst, channel, blk) + +#define SBLOCK_GET_ARRIVED_COUNT(dst, channel) \ + sipx_get_arrived_count(dst, channel) + +#define SBLOCK_GET_FREE_COUNT(dst, channel) \ + sipx_get_free_count(dst, channel) + +#define SBLOCK_PUT(dst, channel, blk) \ + sipx_put(dst, channel, blk) + + +#else /* CONFIG_SPRD_SIPC_ZERO_COPY_SIPX */ + +#define SBLOCK_CREATE(dst, channel,\ + txblocknum, txblocksize, txpoolsize, \ + rxblocknum, rxblocksize, rxpoolsize) \ +sblock_create(dst, channel,\ + txblocknum, txblocksize,\ + rxblocknum, rxblocksize) + +#define SBLOCK_DESTROY(dst, channel) \ + sblock_destroy(dst, channel) + +#define SBLOCK_GET(dst, channel, blk, ack, timeout) \ + sblock_get(dst, channel, blk, timeout) + +#define SBLOCK_REGISTER_NOTIFIER(dst, channel, handler, data) \ + sblock_register_notifier(dst, channel, handler, data) + +#define SBLOCK_SEND(dst, channel, blk) \ + sblock_send(dst, channel, blk) + +#define SBLOCK_SEND_PREPARE(dst, channel, blk) \ + sblock_send_prepare(dst, channel, blk) + +#define SBLOCK_SEND_FINISH(dst, channel)\ + sblock_send_finish(dst, channel) + +#define SBLOCK_RECEIVE(dst, channel, blk, timeout) \ + sblock_receive(dst, channel, blk, timeout) + +#define SBLOCK_RELEASE(dst, channel, blk) \ + sblock_release(dst, channel, blk) + +#define SBLOCK_GET_ARRIVED_COUNT(dst, channel) \ + sblock_get_arrived_count(dst, channel) + +#define SBLOCK_GET_FREE_COUNT(dst, channel) \ + sblock_get_free_count(dst, channel) + +#define SBLOCK_PUT(dst, channel, blk) \ + sblock_put(dst, channel, blk) + +#endif /* CONFIG_SPRD_SIPC_ZERO_COPY_SIPX */ + +#ifdef CONFIG_ARM64 +/** + * unalign_copy_from_user -- unaligned data accesses to addresses + * marked as device will always trigger an exception, this fuction + * can avoid this exception + * + * @to: dest, normal memory + * @from: src, device memory and alignment access must be considered + * @n: bytes + * @return: bytes not copied + */ +static inline unsigned long unalign_copy_to_user(void __user *to, + const void *from, + unsigned long n) +{ + /* from is not 8 byte aligned and n is less than 16 bytes */ + if (((unsigned long)from & 7) && (n < 16)) { + while (n) { + if (copy_to_user(to++, from++, 1)) + break; + n--; + } + return n; + } + + return copy_to_user(to, from, n); +} + +/** + * unalign_copy_from_user -- unaligned data accesses to addresses + * marked as device will always trigger an exception, this fuction + * can avoid this exception + * + * @to: dest, device memory and alignment access must be considered + * @from: src, normal memory + * @n: bytes + * @return: bytes not copied + */ +static inline unsigned long unalign_copy_from_user(void *to, + const void __user *from, + unsigned long n) +{ + unsigned c1, c2, c3; + + /* to is 8 byte aligned and n is less than 16 bytes */ + c1 = !((unsigned long)to & 0x7) && (n < 16); + if (c1) + return copy_from_user(to, from, n); + + /* to and from are 8 byte aligned */ + c2 = !((unsigned long)to & 0x7) && !((unsigned long)from & 0x7); + if (c2) + return copy_from_user(to, from, n); + + /* to and from are the same offset and n is more than 15 bytes */ + c3 = !(((unsigned long)to ^ (unsigned long)from) & 0x7) && (n > 15); + if (c3) + return copy_from_user(to, from, n); + + while (n) { + if (copy_from_user(to++, from++, 1)) + break; + n--; + } + + return n; +} + +static inline void unalign_memcpy(void *to, const void *from, size_t n) +{ + if (((unsigned long)to & 7) == ((unsigned long)from & 7)) { + while (((unsigned long)from & 7) && n) { + *(char *)(to++) = *(char *)(from++); + n--; + } + memcpy(to, from, n); + } else if (((unsigned long)to & 3) == ((unsigned long)from & 3)) { + while (((unsigned long)from & 3) && n) { + *(char *)(to++) = *(char *)(from++); + n--; + } + while (n >= 4) { + *(u32 *)(to) = *(u32 *)(from); + to += 4; + from += 4; + n -= 4; + } + while (n) { + *(char *)(to++) = *(char *)(from++); + n--; + } + } else { + while (n) { + *(char *)(to++) = *(char *)(from++); + n--; + } + } +} +#else +static inline unsigned long unalign_copy_to_user(void __user *to, + const void *from, + unsigned long n) +{ + return copy_to_user(to, from, n); +} +static inline unsigned long unalign_copy_from_user(void *to, + const void __user *from, + unsigned long n) +{ + return copy_from_user(to, from, n); +} +static inline void *unalign_memcpy(void *to, const void *from, size_t n) +{ + return memcpy(to, from, n); +} +#endif + +#endif diff --git a/package/wwan/driver/quectel_SRPD_PCIE/src/include/sipc_big_to_little.h b/package/wwan/driver/quectel_SRPD_PCIE/src/include/sipc_big_to_little.h new file mode 100644 index 000000000..d70e61b0b --- /dev/null +++ b/package/wwan/driver/quectel_SRPD_PCIE/src/include/sipc_big_to_little.h @@ -0,0 +1,85 @@ +/* + * Copyright (C) 2019 Spreadtrum Communications Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __SIPC_BIG_TO_LITTLE_H +#define __SIPC_BIG_TO_LITTLE_H +//#define CONFIG_SIPC_BIG_TO_LITTLE /* sipc little */ + +#define BL_READB(addr) \ + ({ unsigned char __v = (*(volatile unsigned char *) (addr)); __v; }) +#define BL_WRITEB(b,addr) (void)((*(volatile unsigned int *) (addr)) = (b)) + +#define BL_GETB(v) ((v)) +#define BL_SETB(v, b) ((v) = (b)) + + +#ifdef CONFIG_SIPC_BIG_TO_LITTLE +/* little 0x78563412 + 0x12 + 0x34 + 0x56 + 0x78 + read: + big: 0x12345678==>0x78563412 + write: 0x78563412 ===> 0x12345678*/ +#define BL_READW(addr) \ + ({ unsigned short __t = (*(volatile unsigned short *) (addr)); \ + unsigned short __v = ((__t & 0x00ff) << 8) + ((__t & 0xff00) >> 8); \ + __v; }) +#define BL_READL(addr) \ + ({ unsigned int __t = (*(volatile unsigned int *) (addr)); \ + unsigned int __v = ((__t & 0x000000ff) << 24) + ((__t & 0x0000ff00) << 8) + \ + ((__t & 0x00ff0000) >> 8) + ((__t & 0xff000000) >> 24); \ + __v; }) + +#define BL_WRITEW(b,addr) \ + ({ unsigned short __v = (((b) & 0x00ff) << 8) + (((b) & 0xff00) >> 8); \ + (*(volatile unsigned short *) (addr)) = __v; }) + +#define BL_WRITEL(b,addr) \ + ({ unsigned int __v = (((b) & 0x000000ff) << 24) + (((b) & 0xff00) >> 8) + \ + (((b) & 0x00ff0000) >> 8) + (((b) & 0xff000000) >> 24); \ + (*(volatile unsigned int *) (addr)) = __v; }) + +#define BL_GETL(v) \ +({unsigned int __v = (((v) & 0x000000ff) << 24) + (((v) & 0x0000ff00) << 8) + \ + (((v) & 0x00ff0000) >> 8) + (((v) & 0xff000000) >> 24); \ + __v; }) +#define BL_SETL(v, b) \ + ((v) = (((b) & 0x000000ff) << 24) + (((b) & 0x0000ff00) << 8) + \ + (((b) & 0x00ff0000) >> 8) + (((b) & 0xff000000) >> 24)) +#define BL_GETW(v) \ + ({unsigned int __v = (((v) & 0x00ff) << 8) + (((v) & 0xff00) >> 8); \ + __v; }) +#define BL_SETW(v, b) \ + ((v) = (((b) & 0x00ff) << 8) + (((b) & 0xff00) >> 8)) + +#else +#define BL_GETW(v) v +#define BL_GETL(v) v + +#define BL_SETW(v, b) ((v) = (b)) +#define BL_SETL(v, b) ((v) = (b)) + +#define BL_READW(addr) \ + ({ unsigned short __v = (*(volatile unsigned short *) (addr)); __v; }) +#define BL_READL(addr) \ + ({ unsigned int __v = (*(volatile unsigned int *) (addr)); __v; }) + +#define BL_WRITEW(b,addr) (void)((*(volatile unsigned short *) (addr)) = (b)) +#define BL_WRITEL(b,addr) (void)((*(volatile unsigned int *) (addr)) = (b)) + +#endif + +#endif + diff --git a/package/wwan/driver/quectel_SRPD_PCIE/src/include/sprd_mpm.h b/package/wwan/driver/quectel_SRPD_PCIE/src/include/sprd_mpm.h new file mode 100644 index 000000000..f9ffe7457 --- /dev/null +++ b/package/wwan/driver/quectel_SRPD_PCIE/src/include/sprd_mpm.h @@ -0,0 +1,184 @@ +/* + * Copyright (C) 2019 Spreadtrum Communications Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + + /* MPM: modem power manger + * PMS: power manage source which be used to request + * a modem power manage resource. + */ +#ifndef _SPRD_MPM_H +#define _SPRD_MPM_H +/* + * MPM modem powermanger source state define, + * if in idle state, we can release + * the related resources(such as pcie) of modem. + */ +enum { + SPRD_MPM_IDLE = 0, + SPRD_MPM_BUSY +}; + +/* + * @sprd_pms: the power manager source data struct, + * can usd it to request wake lock or request modem resource. + * + * @name: the name of a pms. + * @data: the point of MPM. + * @multitask: whether to support multitasking, default is false. + * false, the source can only be used in single task context. + * true, the source can be used multitask context. + * @awake: whether stay awake. + * @awake_cnt: total awake times. + * @pre_awake_cnt pre_awake_cnt. + * @active_cnt: the active counter of the pms. + * @expires: the timer expires value. + * @active_lock: use for protect the active_cnt member. + * @expires_lock: use for protect expires member. + * @entry: an entry of all pms list. + * @wake_timer: used for delay release wakelock. + */ +struct sprd_pms { + const char *name; + void *data; + bool multitask; + bool awake; + unsigned int awake_cnt; + unsigned int pre_awake_cnt; + unsigned int active_cnt; + unsigned long expires; + spinlock_t active_lock; + spinlock_t expires_lock; + struct list_head entry; + struct timer_list wake_timer; +}; + +/** + * sprd_mpm_create - create a modem powermanger source instacnce. + * + * @dst, which mpm (PSCP, SP, WCN, etc.) will be created. + * @later_idle, will release resource later (in ms). + */ +int sprd_mpm_create(unsigned int dst, + const char *name, + unsigned int later_idle); + +/** + * sprd_mpm_init_resource_ops - int resource ops for mpm. + * + * @wait_resource, used to wait request resource ready. + * @request_resource, used to request a resource + * @release_resource, used to release a resource + */ +int sprd_mpm_init_resource_ops(unsigned int dst, + int (*wait_resource)(unsigned int dst, + int timeout), + int (*request_resource)(unsigned int dst), + int (*release_resource)(unsigned int dst)); + +/** + * sprd_mpm_destroy - destroy a modem powermanger source instacnce. + * + * @dst, which mpm (PSCP, SP, WCN, etc.) will be destroyed. + */ +int sprd_mpm_destroy(unsigned int dst); + +/** + * sprd_pms_create - init a pms, + * a module which used it to request a modem power manage resource. + * All the pms interface are not safe in multi-thread or multi-cpu. + * if you want use in multi-thread, please use the pms_ext interface. + * + * @dst, the pms belong to which mpm. + * @name, the name of this pms. + * @pms, the point of this pms. + * @multitask: support multitask. + * + * Returns: NULL failed, > 0 succ. + */ +struct sprd_pms *sprd_pms_create(unsigned int dst, + const char *name, bool multitask); + +/** + * sprd_pms_destroy - destroy a pms. + * + * @pms, the point of this pms. + */ +void sprd_pms_destroy(struct sprd_pms *pms); + +/** + * sprd_pms_request_resource - request mpm resource + * + * @pms, the point of this pms. + * @timeout, in ms. + * + * Returns: + * 0 resource ready, + * < 0 resoure not ready, + * -%ERESTARTSYS if it was interrupted by a signal. + */ +int sprd_pms_request_resource(struct sprd_pms *pms, int timeout); + +/** + * sprd_pms_release_resource - release mpm resource. + * + * @pms, the point of this pms. + */ +void sprd_pms_release_resource(struct sprd_pms *pms); + +/** + * sprd_pms_request_wakelock - request wakelock + * + * @pms, the point of this pms. + */ +void sprd_pms_request_wakelock(struct sprd_pms *pms); + +/** + * sprd_pms_release_wakelock - release wakelock + * + * @pms, the point of this pms. + */ +void sprd_pms_release_wakelock(struct sprd_pms *pms); + +/** + * sprd_pms_request_wakelock_period - + * request wake lock, and will auto reaslse in msec ms. + * + * @pms, the point of this pms. + * @msec, will auto reaslse in msec ms + */ +void sprd_pms_request_wakelock_period(struct sprd_pms *pms, unsigned int msec); + +/** + * sprd_pms_release_wakelock_later - release wakelock later. + * + * @pms, the point of this pms. + * @msec, later time (in ms). + */ +void sprd_pms_release_wakelock_later(struct sprd_pms *pms, + unsigned int msec); + +/** + * sprd_pms_power_up - just powe up, not wait result. + * + * @pms, the point of this pms. + */ +void sprd_pms_power_up(struct sprd_pms *pms); + +/** + * sprd_pms_power_up - just power down,. + * + * @pms, the point of this pms. + * @immediately, whether immediately power down. + */ +void sprd_pms_power_down(struct sprd_pms *pms, bool immediately); + +#endif diff --git a/package/wwan/driver/quectel_SRPD_PCIE/src/include/sprd_pcie_ep_device.h b/package/wwan/driver/quectel_SRPD_PCIE/src/include/sprd_pcie_ep_device.h new file mode 100644 index 000000000..7b6245610 --- /dev/null +++ b/package/wwan/driver/quectel_SRPD_PCIE/src/include/sprd_pcie_ep_device.h @@ -0,0 +1,99 @@ +/** + * SPRD ep device driver in host side for Spreadtrum SoCs + * + * Copyright (C) 2019 Spreadtrum Co., Ltd. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 of + * the License as published by the Free Software Foundation. + * + * This program is used to control ep device driver in host side for + * Spreadtrum SoCs. + */ + +#ifndef __SPRD_PCIE_EP_DEVICE_H +#define __SPRD_PCIE_EP_DEVICE_H + +#include + +/* host receive msi irq */ +enum { + PCIE_MSI_SIPC_IRQ = 0, + PCIE_MSI_REQUEST_RES, + PCIE_MSI_EP_READY_FOR_RESCAN, + PCIE_MSI_RELEASE_RES, + PCIE_MSI_SCANNED_RESPOND, + PCIE_MSI_REMOVE_RESPOND, + PCIE_MSI_IPA, + PCIE_MSI_MAX_IRQ +}; + +/* host send doorbell irq */ +enum { + PCIE_DBELL_SIPC_IRQ = 0, + PCIE_DBEL_EP_SCANNED, + PCIE_DBEL_EP_REMOVING, + PCIE_DBEL_IRQ_MAX +}; + +enum { + PCIE_EP_MODEM = 0, + /* PCIE_EP_WCN, */ + PCIE_EP_NR +}; + +enum { + PCIE_EP_PROBE = 0, + PCIE_EP_REMOVE, + PCIE_EP_PROBE_BEFORE_SPLIT_BAR +}; + +#ifdef CONFIG_SPRD_SIPA +enum { + PCIE_IPA_TYPE_MEM = 0, + PCIE_IPA_TYPE_REG +}; +#endif + +#define MINI_REGION_SIZE 0x10000 /*64 K default */ + +int sprd_ep_dev_register_notify(int ep, + void (*notify)(int event, void *data), + void *data); +int sprd_ep_dev_unregister_notify(int ep); +int sprd_ep_dev_register_irq_handler(int ep, + int irq, + irq_handler_t handler, + void *data); +int sprd_ep_dev_unregister_irq_handler(int ep, int irq); +int sprd_ep_dev_register_irq_handler_ex(int ep, + int from_irq, + int to_irq, + irq_handler_t handler, + void *data); +int sprd_ep_dev_unregister_irq_handler_ex(int ep, + int from_irq, + int to_irq); +int sprd_ep_dev_set_irq_addr(int ep, void __iomem *irq_addr); +int sprd_ep_dev_raise_irq(int ep, int irq); +int sprd_ep_dev_clear_doolbell_irq(int ep, int irq); +int sprd_ep_dev_set_backup(int ep); +int sprd_ep_dev_clear_backup(int ep); + +void __iomem *sprd_ep_map_memory(int ep, + phys_addr_t cpu_addr, + size_t size); +void sprd_ep_unmap_memory(int ep, const void __iomem *bar_addr); +int sprd_ep_dev_pass_smem(int ep, u32 base, u32 size); +int sipa_module_init(struct device *dev); +void sipa_module_exit(void); +int sipa_eth_init(void); +void sipa_eth_exit(void); +int sipa_dummy_init(void); +void sipa_dummy_exit(void); + +#ifdef CONFIG_SPRD_SIPA +phys_addr_t sprd_ep_ipa_map(int type, phys_addr_t target_addr, size_t size); +int sprd_ep_ipa_unmap(int type, phys_addr_t cpu_addr); +#endif +#endif diff --git a/package/wwan/driver/quectel_SRPD_PCIE/src/include/sprd_pcie_resource.h b/package/wwan/driver/quectel_SRPD_PCIE/src/include/sprd_pcie_resource.h new file mode 100644 index 000000000..902de45cf --- /dev/null +++ b/package/wwan/driver/quectel_SRPD_PCIE/src/include/sprd_pcie_resource.h @@ -0,0 +1,107 @@ +/* + * Copyright (C) 2019 Spreadtrum Communications Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + + /* mpms: modem powermanger source */ +#ifndef _SPRD_PCIE_RESOURCE_H +#define _SPRD_PCIE_RESOURCE_H + +#ifdef CONFIG_SPRD_PCIE_EP_DEVICE +#include +#endif + +#if 0 +//#undef pr_debug +//#define pr_debug pr_emerg + +#undef pr_info +#define pr_info pr_emerg + +#undef pr_err +#define pr_err pr_emerg + +#undef dev_dbg +#define dev_dbg dev_emerg + +#undef dev_info +#define dev_info dev_emerg + +#undef dev_err +#define dev_err dev_emerg +#endif + +#if defined(CONFIG_SPRD_PCIE_EP_DEVICE) || defined(CONFIG_PCIE_EPF_SPRD) +/* + * sprd_pcie_wait_resource + * Returns: + * 0 resource ready, + * < 0 resoure not ready, + * -%ERESTARTSYS if it was interrupted by a signal. + */ +int sprd_pcie_wait_resource(u32 dst, int timeout); + +int sprd_pcie_request_resource(u32 dst); +int sprd_pcie_release_resource(u32 dst); +int sprd_pcie_resource_trash(u32 dst); +bool sprd_pcie_is_defective_chip(void); +#else +/* dummy functions */ +static inline int sprd_pcie_wait_resource(u32 dst, int timeout) {return 0; } + +static inline int sprd_pcie_request_resource(u32 dst) {return 0; } +static inline int sprd_pcie_release_resource(u32 dst) {return 0; } +static inline int sprd_pcie_resource_trash(u32 dst) {return 0; } +static inline bool sprd_pcie_is_defective_chip(void) {return false; } +#endif + +#ifdef CONFIG_PCIE_EPF_SPRD +int sprd_pcie_resource_client_init(u32 dst, u32 ep_fun); +int sprd_register_pcie_resource_first_ready(u32 dst, + void (*notify)(void *p), + void *data); +#endif + +#ifdef CONFIG_SPRD_PCIE_EP_DEVICE +int sprd_pcie_resource_host_init(u32 dst, u32 ep_dev, + struct platform_device *pcie_dev); + +/* + * sprd_pcie_resource_reboot_ep + * reboot ep contains rescan ep device. + */ +void sprd_pcie_resource_reboot_ep(u32 dst); + +/* + * sprd_pcie_wait_load_resource + * In case of the open the feature CONFIG_PCIE_SPRD_SPLIT_BAR, + * It has 2 times pcie scan action in host side boot process. + * After the first scan, the ep only have 2 bar can be used for + * memory map, the pcie resource is not completely ready, + * but the host can load images for ep, so we add the special api + * sprd_pcie_wait_load_resource, this api will return after + * the first scan action. + * Returns: + * 0 resource ready, + * < 0 resoure not ready, + * -%ERESTARTSYS if it was interrupted by a signal. + */ +int sprd_pcie_wait_load_resource(u32 dst); + + +/* Because the ep bar can only be split by ep itself, + * After all modem images be loaded, notify pcie resource + * can rescan ep now. + */ +void sprd_pcie_resource_notify_load_done(u32 dst); +#endif /* CONFIG_SPRD_PCIE_EP_DEVICE */ + +#endif /* _SPRD_PCIE_RESOURCE_H */ diff --git a/package/wwan/driver/quectel_SRPD_PCIE/src/mcd/Kconfig b/package/wwan/driver/quectel_SRPD_PCIE/src/mcd/Kconfig new file mode 100644 index 000000000..a7f3db524 --- /dev/null +++ b/package/wwan/driver/quectel_SRPD_PCIE/src/mcd/Kconfig @@ -0,0 +1,7 @@ +config SPRD_MCD + tristate "SPRD modem power control module" + default n + help + mcd is a module for spreadtrum AP/CP communicaiton control driver, + it can control modem power on/off,triger modem event of assert,watchdog + reset,panic. diff --git a/package/wwan/driver/quectel_SRPD_PCIE/src/mcd/Makefile b/package/wwan/driver/quectel_SRPD_PCIE/src/mcd/Makefile new file mode 100644 index 000000000..78cb07515 --- /dev/null +++ b/package/wwan/driver/quectel_SRPD_PCIE/src/mcd/Makefile @@ -0,0 +1 @@ +obj-y += modem_ctrl.o diff --git a/package/wwan/driver/quectel_SRPD_PCIE/src/mcd/modem_ctrl.c b/package/wwan/driver/quectel_SRPD_PCIE/src/mcd/modem_ctrl.c new file mode 100644 index 000000000..720b92340 --- /dev/null +++ b/package/wwan/driver/quectel_SRPD_PCIE/src/mcd/modem_ctrl.c @@ -0,0 +1,814 @@ +/* + * Copyright (C) 2019 Spreadtrum Communications Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_PCIE_PM_NOTIFY +#include +#endif + +#include "../include/sprd_pcie_resource.h" +#include "../include/sipc.h" +#include "../include/mdm_ctrl.h" + +enum { + ROC1_SOC = 0, + ORCA_SOC +}; + +static char *const mdm_stat[] = { + "mdm_power_off", "mdm_power_on", "mdm_warm_reset", "mdm_cold_reset", + "mdm_watchdog_reset", "mdm_assert", "mdm_panic" +}; + +#define REBOOT_MODEM_DELAY 1000 +#define POWERREST_MODEM_DELAY 2000 +#define RESET_MODEM_DELAY 50 + +char cdev_name[] = "mdm_ctrl"; + +struct modem_ctrl_init_data { + char *name; + struct gpio_desc *gpio_poweron; /* Poweron */ + struct gpio_desc *gpio_reset; /* Reset modem */ + struct gpio_desc *gpio_preset; /* Pcie reset */ + struct gpio_desc *gpio_cpwatchdog; + struct gpio_desc *gpio_cpassert; + struct gpio_desc *gpio_cppanic; + struct gpio_desc *gpio_cppoweroff; + u32 irq_cpwatchdog; + u32 irq_cpassert; + u32 irq_cppanic; + u32 irq_cppoweroff; + u32 modem_status; + bool enable_cp_event; +}; + +struct modem_ctrl_device { + struct modem_ctrl_init_data *init; + int major; + int minor; + struct cdev cdev; + struct device *dev; + int soc_type; +}; + +static struct class *modem_ctrl_class; +static struct modem_ctrl_device *mcd_dev; + +/* modem control evnet notify */ +static ATOMIC_NOTIFIER_HEAD(modem_ctrl_chain); + +int modem_ctrl_register_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_register(&modem_ctrl_chain, nb); +} +EXPORT_SYMBOL(modem_ctrl_register_notifier); + +void modem_ctrl_unregister_notifier(struct notifier_block *nb) +{ + atomic_notifier_chain_unregister(&modem_ctrl_chain, nb); +} +EXPORT_SYMBOL(modem_ctrl_unregister_notifier); + +static void send_event_msg(struct kobject *kobj) +{ + char *msg[3]; + char buff[100]; + char mbuff[100]; + + memset(mbuff, 0, sizeof(mbuff)); + if (!mcd_dev || !mcd_dev->init || !kobj) + return; + + snprintf(buff, sizeof(buff), "MODEM_STAT=%d", + mcd_dev->init->modem_status); + snprintf(mbuff, sizeof(mbuff), "MODEM_EVENT=%s", + mdm_stat[mcd_dev->init->modem_status]); + msg[0] = buff; + msg[1] = mbuff; + msg[2] = NULL; + kobject_uevent_env(kobj, KOBJ_CHANGE, msg); + dev_dbg(mcd_dev->dev, "send uevent to userspace\n"); +} + +static irqreturn_t cpwatchdogtriger_handler(int irq, void *dev_id) +{ + if (!mcd_dev || !mcd_dev->init || !mcd_dev->init->enable_cp_event) + return IRQ_NONE; + + mcd_dev->init->modem_status = MDM_WATCHDOG_RESET; + atomic_notifier_call_chain(&modem_ctrl_chain, MDM_WATCHDOG_RESET, NULL); + send_event_msg(&mcd_dev->dev->kobj); + return IRQ_HANDLED; +} + +static irqreturn_t cpasserttriger_handler(int irq, void *dev_id) +{ + if (!mcd_dev || !mcd_dev->init || !mcd_dev->init->enable_cp_event) + return IRQ_NONE; + + mcd_dev->init->modem_status = MDM_ASSERT; + atomic_notifier_call_chain(&modem_ctrl_chain, MDM_ASSERT, NULL); + send_event_msg(&mcd_dev->dev->kobj); + return IRQ_HANDLED; +} + +static irqreturn_t cppanictriger_handler(int irq, void *dev_id) +{ + if (!mcd_dev || !mcd_dev->init || !mcd_dev->init->enable_cp_event) + return IRQ_NONE; + + mcd_dev->init->modem_status = MDM_PANIC; + atomic_notifier_call_chain(&modem_ctrl_chain, MDM_PANIC, NULL); + send_event_msg(&mcd_dev->dev->kobj); + return IRQ_HANDLED; +} + +static irqreturn_t cppoweroff_handler(int irq, void *dev_id) +{ + if (!mcd_dev || !mcd_dev->init) + return IRQ_NONE; + /* To this reserve here for receve power off event from AP*/ + atomic_notifier_call_chain(&modem_ctrl_chain, + MDM_POWER_OFF, NULL); + kernel_power_off(); + return IRQ_HANDLED; +} + +static int request_gpio_to_irq(struct gpio_desc *cp_gpio, + struct modem_ctrl_device *mcd_dev) +{ + int ret = 0; + + if (!mcd_dev || !mcd_dev->init) + return -EINVAL; + + ret = gpiod_to_irq(cp_gpio); + if (ret < 0) { + dev_err(mcd_dev->dev, "requset irq %d failed\n", ret); + return ret; + } + dev_dbg(mcd_dev->dev, "gpio to irq %d\n", ret); + if (cp_gpio == mcd_dev->init->gpio_cpwatchdog) { + mcd_dev->init->irq_cpwatchdog = ret; + ret = devm_request_threaded_irq(mcd_dev->dev, + mcd_dev->init->irq_cpwatchdog, + NULL, cpwatchdogtriger_handler, + IRQF_ONESHOT | IRQF_TRIGGER_FALLING, + "cpwatchdog_irq", mcd_dev); + if (ret < 0) { + dev_err(mcd_dev->dev, "can not request irq for cp watchdog\n"); + return ret; + } + enable_irq_wake(mcd_dev->init->irq_cpwatchdog); + } else if (cp_gpio == mcd_dev->init->gpio_cpassert) { + mcd_dev->init->irq_cpassert = ret; + ret = devm_request_threaded_irq(mcd_dev->dev, + mcd_dev->init->irq_cpassert, + NULL, cpasserttriger_handler, + IRQF_ONESHOT | IRQF_TRIGGER_FALLING, + "cpassert_irq", mcd_dev); + if (ret < 0) { + dev_err(mcd_dev->dev, "can not request irq for cp assert\n"); + return ret; + } + enable_irq_wake(mcd_dev->init->irq_cpassert); + } else if (cp_gpio == mcd_dev->init->gpio_cppanic) { + mcd_dev->init->irq_cppanic = ret; + ret = devm_request_threaded_irq(mcd_dev->dev, + mcd_dev->init->irq_cppanic, + NULL, cppanictriger_handler, + IRQF_ONESHOT | IRQF_TRIGGER_FALLING, + "cppanic_irq", mcd_dev); + if (ret < 0) { + dev_err(mcd_dev->dev, + "can not request irq for panic\n"); + return ret; + } + enable_irq_wake(mcd_dev->init->irq_cppanic); + } else if (cp_gpio == mcd_dev->init->gpio_cppoweroff) { + mcd_dev->init->irq_cppoweroff = ret; + ret = devm_request_threaded_irq(mcd_dev->dev, + mcd_dev->init->irq_cppoweroff, + NULL, cppoweroff_handler, + IRQF_ONESHOT | IRQF_TRIGGER_LOW, + "cppoweroff_irq", mcd_dev); + if (ret < 0) { + dev_err(mcd_dev->dev, + "can not request irq for cppoweroff\n"); + return ret; + } + enable_irq_wake(mcd_dev->init->irq_cppoweroff); + } + return 0; +} + +static int modem_gpios_init(struct modem_ctrl_device *mcd_dev, int soc_type) +{ + int ret; + + if (!mcd_dev || !mcd_dev->init) + return -EINVAL; + if (soc_type == ROC1_SOC) { + gpiod_direction_input(mcd_dev->init->gpio_cpwatchdog); + gpiod_direction_input(mcd_dev->init->gpio_cpassert); + gpiod_direction_input(mcd_dev->init->gpio_cppanic); + + ret = request_gpio_to_irq(mcd_dev->init->gpio_cpwatchdog, + mcd_dev); + if (ret) + return ret; + ret = request_gpio_to_irq(mcd_dev->init->gpio_cpassert, + mcd_dev); + if (ret) + return ret; + ret = request_gpio_to_irq(mcd_dev->init->gpio_cppanic, + mcd_dev); + if (ret) + return ret; + + /* IRQF_TRIGGER_LOW, default must set to high */ + gpiod_set_value_cansleep(mcd_dev->init->gpio_cppoweroff, 1); + } else { + gpiod_direction_input(mcd_dev->init->gpio_cppoweroff); + ret = request_gpio_to_irq(mcd_dev->init->gpio_cppoweroff, + mcd_dev); + if (ret) + return ret; + + /* TRIGGER_FALLING, defaultmust set to high */ + gpiod_set_value_cansleep(mcd_dev->init->gpio_cpwatchdog, 1); + gpiod_set_value_cansleep(mcd_dev->init->gpio_cpassert, 1); + gpiod_set_value_cansleep(mcd_dev->init->gpio_cppanic, 1); + } + return 0; +} + +void modem_ctrl_enable_cp_event(void) +{ + if (mcd_dev && mcd_dev->init) + mcd_dev->init->enable_cp_event = true; +} +EXPORT_SYMBOL_GPL(modem_ctrl_enable_cp_event); + +void modem_ctrl_send_abnormal_to_ap(int status) +{ + struct gpio_desc *gpiodesc; + + if (!mcd_dev || !mcd_dev->init) + return; + if (mcd_dev->soc_type != ORCA_SOC) { + dev_err(mcd_dev->dev, "operation not be allowed for %d\n", + mcd_dev->soc_type); + return; + } + switch (status) { + case MDM_WATCHDOG_RESET: + gpiodesc = mcd_dev->init->gpio_cpwatchdog; + break; + case MDM_ASSERT: + gpiodesc = mcd_dev->init->gpio_cpassert; + break; + case MDM_PANIC: + gpiodesc = mcd_dev->init->gpio_cppanic; + break; + default: + dev_info(mcd_dev->dev, + "get status %d is not right for operation\n", status); + return; + } + mcd_dev->init->modem_status = status; + dev_info(mcd_dev->dev, + "operation unnormal status %d send to ap\n", + status); + if (!IS_ERR(gpiodesc)) + gpiod_set_value_cansleep(gpiodesc, 0); +} + +static void modem_ctrl_send_cmd_to_cp(int status) +{ + struct gpio_desc *gpiodesc = NULL; + + if (!mcd_dev || !mcd_dev->init) + return; + if (mcd_dev->soc_type != ROC1_SOC) { + dev_err(mcd_dev->dev, "operation not be allowed for %d\n", + mcd_dev->soc_type); + return; + } + if (status == MDM_POWER_OFF) + gpiodesc = mcd_dev->init->gpio_cppoweroff; + + mcd_dev->init->modem_status = status; + dev_info(mcd_dev->dev, + "operation cmd %d ms send to cp\n", + status); + if (!IS_ERR(gpiodesc)) { + gpiod_set_value_cansleep(gpiodesc, 0); + msleep(20); + gpiod_set_value_cansleep(gpiodesc, 20); + } +} + +static void modem_ctrl_notify_abnormal_status(int status) +{ + if (!mcd_dev || !mcd_dev->init) + return; + if (mcd_dev->soc_type != ORCA_SOC) { + dev_err(mcd_dev->dev, "operation not be allowed for %d\n", + mcd_dev->soc_type); + return; + } + if (status < MDM_WATCHDOG_RESET || status > MDM_PANIC) { + dev_err(mcd_dev->dev, + "operation not be allowed for status %d\n", status); + return; + } + modem_ctrl_send_abnormal_to_ap(status); +} + +void modem_ctrl_poweron_modem(int on) +{ + if (!mcd_dev || !mcd_dev->init) + return; + switch (on) { + case MDM_CTRL_POWER_ON: + if (!IS_ERR(mcd_dev->init->gpio_poweron)) { + atomic_notifier_call_chain(&modem_ctrl_chain, + MDM_CTRL_POWER_ON, NULL); + dev_info(mcd_dev->dev, "set modem_poweron: %d\n", on); + gpiod_set_value_cansleep(mcd_dev->init->gpio_poweron, + 1); + /* Base the spec modem boot flow that need to wait 1s */ + msleep(REBOOT_MODEM_DELAY); + mcd_dev->init->modem_status = MDM_CTRL_POWER_ON; + gpiod_set_value_cansleep(mcd_dev->init->gpio_poweron, + 0); + } + break; + case MDM_CTRL_POWER_OFF: + /* + *To do + */ + break; + case MDM_CTRL_SET_CFG: + /* + *To do + */ + break; + case MDM_CTRL_WARM_RESET: + if (!IS_ERR(mcd_dev->init->gpio_reset)) { + atomic_notifier_call_chain(&modem_ctrl_chain, + MDM_CTRL_WARM_RESET, NULL); + dev_dbg(mcd_dev->dev, "set warm reset: %d\n", on); + gpiod_set_value_cansleep(mcd_dev->init->gpio_reset, 1); + /* Base the spec modem that need to wait 50ms */ + msleep(RESET_MODEM_DELAY); + mcd_dev->init->modem_status = MDM_CTRL_WARM_RESET; + gpiod_set_value_cansleep(mcd_dev->init->gpio_reset, 0); + } + break; + case MDM_CTRL_COLD_RESET: + if (!IS_ERR(mcd_dev->init->gpio_poweron)) { + mcd_dev->init->enable_cp_event = false; + atomic_notifier_call_chain(&modem_ctrl_chain, + MDM_CTRL_COLD_RESET, NULL); + dev_info(mcd_dev->dev, "modem_power reset: %d\n", on); + gpiod_set_value_cansleep(mcd_dev->init->gpio_poweron, + 1); + /* Base the spec modem boot flow that need to wait 2s */ + msleep(POWERREST_MODEM_DELAY); + mcd_dev->init->modem_status = MDM_CTRL_COLD_RESET; + gpiod_set_value_cansleep(mcd_dev->init->gpio_poweron, + 0); + } + break; + case MDM_CTRL_PCIE_RECOVERY: +#ifdef CONFIG_PCIE_PM_NOTIFY + pcie_ep_pm_notify(PCIE_EP_POWER_OFF); + /* PCIE poweroff to poweron need 100ms*/ + msleep(100); + pcie_ep_pm_notify(PCIE_EP_POWER_ON); +#endif + break; + case MDM_POWER_OFF: + atomic_notifier_call_chain(&modem_ctrl_chain, + MDM_POWER_OFF, NULL); + modem_ctrl_send_cmd_to_cp(MDM_POWER_OFF); + break; + default: + dev_err(mcd_dev->dev, "cmd not support: %d\n", on); + } +} +EXPORT_SYMBOL_GPL(modem_ctrl_poweron_modem); + +#if defined(CONFIG_DEBUG_FS) +static int modem_ctrl_debug_show(struct seq_file *m, void *private) +{ + dev_dbg(mcd_dev->dev, "%s\n", __func__); + return 0; +} + +static int modem_ctrl_debug_open(struct inode *inode, struct file *file) +{ + return single_open(file, modem_ctrl_debug_show, inode->i_private); +} + +static const struct file_operations modem_ctrl_debug_fops = { + .open = modem_ctrl_debug_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +#endif /* CONFIG_DEBUG_FS */ + +static int modem_ctrl_open(struct inode *inode, struct file *filp) +{ + struct modem_ctrl_device *modem_ctrl; + + modem_ctrl = container_of(inode->i_cdev, + struct modem_ctrl_device, cdev); + filp->private_data = modem_ctrl; + dev_dbg(modem_ctrl->dev, "modem_ctrl: %s\n", __func__); + return 0; +} + +static int modem_ctrl_release(struct inode *inode, struct file *filp) +{ + struct modem_ctrl_device *modem_ctrl; + + modem_ctrl = container_of(inode->i_cdev, + struct modem_ctrl_device, cdev); + dev_dbg(modem_ctrl->dev, "modem_ctrl: %s\n", __func__); + + return 0; +} + +static ssize_t modem_ctrl_read(struct file *filp, + char __user *buf, + size_t count, + loff_t *ppos) +{ + char tmpbuf[30]; + int r; + struct modem_ctrl_device *mcd_dev = filp->private_data; + + if (!mcd_dev || !mcd_dev->init) + return -EINVAL; + + r = snprintf(tmpbuf, sizeof(tmpbuf), "%s\n", + mdm_stat[mcd_dev->init->modem_status]); + + return simple_read_from_buffer(buf, count, ppos, tmpbuf, r); +} + +static ssize_t modem_ctrl_write(struct file *filp, + const char __user *buf, + size_t count, loff_t *ppos) +{ + char sbuf[100]; + int ret; + u32 mcd_cmd; + struct modem_ctrl_device *mcd_dev = filp->private_data; + + if (!mcd_dev) + return -EINVAL; + + if (unalign_copy_from_user((void *)sbuf, buf, count)) { + dev_err(mcd_dev->dev, "copy buf %s error\n", buf); + return -EFAULT; + } + dev_dbg(mcd_dev->dev, "get info:%s", sbuf); + sbuf[count - 1] = '\0'; + ret = kstrtouint(sbuf, 10, &mcd_cmd); + if (ret) { + dev_err(mcd_dev->dev, "Invalid input!\n"); + return ret; + } + if (mcd_dev->soc_type == ROC1_SOC) { + if (mcd_cmd >= MDM_CTRL_POWER_OFF && + mcd_cmd <= MDM_CTRL_SET_CFG) + modem_ctrl_poweron_modem(mcd_cmd); + else + dev_info(mcd_dev->dev, "cmd not support!\n"); + } else { + modem_ctrl_notify_abnormal_status(mcd_cmd); + } + return count; +} + +static long modem_ctrl_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + + if (!mcd_dev || mcd_dev->soc_type == ORCA_SOC) + return -EINVAL; + switch (cmd) { + case MDM_CTRL_POWER_OFF: + modem_ctrl_poweron_modem(MDM_CTRL_POWER_OFF); + break; + case MDM_CTRL_POWER_ON: + modem_ctrl_poweron_modem(MDM_CTRL_POWER_ON); + break; + case MDM_CTRL_WARM_RESET: + modem_ctrl_poweron_modem(MDM_CTRL_WARM_RESET); + break; + case MDM_CTRL_COLD_RESET: + modem_ctrl_poweron_modem(MDM_CTRL_COLD_RESET); + break; + case MDM_CTRL_PCIE_RECOVERY: + modem_ctrl_poweron_modem(MDM_CTRL_PCIE_RECOVERY); + break; + case MDM_CTRL_SET_CFG: + break; + default: + return -EINVAL; + } + return 0; +} + +static const struct file_operations modem_ctrl_fops = { + .open = modem_ctrl_open, + .release = modem_ctrl_release, + .read = modem_ctrl_read, + .write = modem_ctrl_write, + .unlocked_ioctl = modem_ctrl_ioctl, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static int modem_ctrl_parse_modem_dt(struct modem_ctrl_init_data **init, + struct device *dev) +{ + struct modem_ctrl_init_data *pdata = NULL; + + pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return -ENOMEM; + pdata->name = cdev_name; + + /* Triger watchdog,assert,panic of orca */ + pdata->gpio_cpwatchdog = devm_gpiod_get(dev, + "cpwatchdog", + GPIOD_OUT_HIGH); + if (IS_ERR(pdata->gpio_cpwatchdog)) + return PTR_ERR(pdata->gpio_cpwatchdog); + + pdata->gpio_cpassert = devm_gpiod_get(dev, "cpassert", GPIOD_OUT_HIGH); + if (IS_ERR(pdata->gpio_cpassert)) + return PTR_ERR(pdata->gpio_cpassert); + + pdata->gpio_cppanic = devm_gpiod_get(dev, "cppanic", GPIOD_OUT_HIGH); + if (IS_ERR(pdata->gpio_cppanic)) + return PTR_ERR(pdata->gpio_cppanic); + + pdata->gpio_cppoweroff = devm_gpiod_get(dev, "cppoweroff", GPIOD_IN); + if (IS_ERR(pdata->gpio_cpassert)) + return PTR_ERR(pdata->gpio_cppoweroff); + + *init = pdata; + return 0; +} + +static int modem_ctrl_parse_dt(struct modem_ctrl_init_data **init, + struct device *dev) +{ + struct modem_ctrl_init_data *pdata; + + pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return -ENOMEM; + pdata->name = cdev_name; + pdata->gpio_poweron = devm_gpiod_get(dev, "poweron", GPIOD_OUT_LOW); + if (IS_ERR(pdata->gpio_poweron)) + return PTR_ERR(pdata->gpio_poweron); + + pdata->gpio_reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW); + if (IS_ERR(pdata->gpio_reset)) + return PTR_ERR(pdata->gpio_reset); + + /* Triger watchdog,assert,panic of orca */ + pdata->gpio_cpwatchdog = devm_gpiod_get(dev, "cpwatchdog", GPIOD_IN); + if (IS_ERR(pdata->gpio_cpwatchdog)) + return PTR_ERR(pdata->gpio_cpwatchdog); + + pdata->gpio_cpassert = devm_gpiod_get(dev, "cpassert", GPIOD_IN); + if (IS_ERR(pdata->gpio_cpassert)) + return PTR_ERR(pdata->gpio_cpassert); + + pdata->gpio_cppanic = devm_gpiod_get(dev, "cppanic", GPIOD_IN); + if (IS_ERR(pdata->gpio_cppanic)) + return PTR_ERR(pdata->gpio_cppanic); + + pdata->gpio_cppoweroff = devm_gpiod_get(dev, + "cppoweroff", GPIOD_OUT_HIGH); + if (IS_ERR(pdata->gpio_cpassert)) + return PTR_ERR(pdata->gpio_cppoweroff); + + pdata->modem_status = MDM_CTRL_POWER_OFF; + *init = pdata; + return 0; +} + +static inline void +modem_ctrl_destroy_pdata(struct modem_ctrl_init_data **init) +{ + struct modem_ctrl_init_data *pdata = *init; + + pdata = NULL; +} + +static int modem_ctrl_restart_handle(struct notifier_block *this, + unsigned long mode, void *cmd) +{ + if (!mcd_dev || mcd_dev->soc_type == ROC1_SOC) + return NOTIFY_DONE; + modem_ctrl_notify_abnormal_status(MDM_PANIC); + while (1) + ; + return NOTIFY_DONE; +} + +static struct notifier_block modem_ctrl_restart_handler = { + .notifier_call = modem_ctrl_restart_handle, + .priority = 150, +}; + +static int modem_ctrl_probe(struct platform_device *pdev) +{ + struct modem_ctrl_init_data *init = pdev->dev.platform_data; + struct modem_ctrl_device *modem_ctrl_dev; + dev_t devid; + int rval; + struct device *dev = &pdev->dev; + + modem_ctrl_dev = devm_kzalloc(dev, sizeof(*modem_ctrl_dev), GFP_KERNEL); + if (!modem_ctrl_dev) + return -ENOMEM; + mcd_dev = modem_ctrl_dev; + if (of_device_is_compatible(pdev->dev.of_node, "sprd,roc1-modem-ctrl")) + modem_ctrl_dev->soc_type = ROC1_SOC; + else + modem_ctrl_dev->soc_type = ORCA_SOC; + + if (modem_ctrl_dev->soc_type == ROC1_SOC) { + rval = modem_ctrl_parse_dt(&init, &pdev->dev); + if (rval) { + dev_err(dev, + "Failed to parse modem_ctrl device tree, ret=%d\n", + rval); + return rval; + } + } else { + rval = modem_ctrl_parse_modem_dt(&init, &pdev->dev); + if (rval) { + dev_err(dev, + "Failed to parse modem_ctrl device tree, ret=%d\n", + rval); + return rval; + } + } + + dev_dbg(dev, "after parse device tree, name=%s soctype=%d\n", + init->name, + modem_ctrl_dev->soc_type); + + rval = alloc_chrdev_region(&devid, 0, 1, init->name); + if (rval != 0) { + dev_err(dev, "Failed to alloc modem_ctrl chrdev\n"); + goto error3; + } + cdev_init(&modem_ctrl_dev->cdev, &modem_ctrl_fops); + rval = cdev_add(&modem_ctrl_dev->cdev, devid, 1); + if (rval != 0) { + dev_err(dev, "Failed to add modem_ctrl cdev\n"); + goto error2; + } + + modem_ctrl_dev->major = MAJOR(devid); + modem_ctrl_dev->minor = MINOR(devid); + modem_ctrl_dev->dev = device_create(modem_ctrl_class, NULL, + MKDEV(modem_ctrl_dev->major, + modem_ctrl_dev->minor), + NULL, "%s", init->name); + if (!modem_ctrl_dev->dev) { + dev_err(dev, "create dev failed\n"); + rval = -ENODEV; + goto error1; + } + modem_ctrl_dev->init = init; + platform_set_drvdata(pdev, modem_ctrl_dev); + rval = modem_gpios_init(modem_ctrl_dev, modem_ctrl_dev->soc_type); + if (rval) { + dev_err(dev, "request gpios error\n"); + goto error0; + } + + rval = register_restart_handler(&modem_ctrl_restart_handler); + if (rval) { + dev_err(dev, "cannot register restart handler err=%d\n", rval); + goto error0; + } + return 0; +error0: + device_destroy(modem_ctrl_class, + MKDEV(modem_ctrl_dev->major, + modem_ctrl_dev->minor)); +error1: + cdev_del(&modem_ctrl_dev->cdev); +error2: + unregister_chrdev_region(devid, 1); +error3: + modem_ctrl_destroy_pdata(&init); + return rval; +} + +static int modem_ctrl_remove(struct platform_device *pdev) +{ + struct modem_ctrl_device *modem_ctrl_dev = platform_get_drvdata(pdev); + + unregister_reboot_notifier(&modem_ctrl_restart_handler); + device_destroy(modem_ctrl_class, + MKDEV(modem_ctrl_dev->major, + modem_ctrl_dev->minor)); + cdev_del(&modem_ctrl_dev->cdev); + unregister_chrdev_region(MKDEV(modem_ctrl_dev->major, + modem_ctrl_dev->minor), 1); + modem_ctrl_destroy_pdata(&modem_ctrl_dev->init); + platform_set_drvdata(pdev, NULL); + return 0; +} + +static void modem_ctrl_shutdown(struct platform_device *pdev) +{ + if (mcd_dev->soc_type == ROC1_SOC) { + atomic_notifier_call_chain(&modem_ctrl_chain, + MDM_POWER_OFF, NULL); + /* + * sleep 50 ms for other module to do something + * before orca power down. + */ + msleep(50); + modem_ctrl_send_cmd_to_cp(MDM_POWER_OFF); + /* Sleep 500ms for cp to deal power down process otherwise + * cp will not power down clearly. + */ + msleep(500); + } +} + +static const struct of_device_id modem_ctrl_match_table[] = { + {.compatible = "sprd,roc1-modem-ctrl", }, + {.compatible = "sprd,orca-modem-ctrl", }, +}; + +static struct platform_driver modem_ctrl_driver = { + .driver = { + .name = "modem_ctrl", + .of_match_table = modem_ctrl_match_table, + }, + .probe = modem_ctrl_probe, + .remove = modem_ctrl_remove, + .shutdown = modem_ctrl_shutdown, +}; + +int modem_ctrl_init(void) +{ + modem_ctrl_class = class_create(THIS_MODULE, "modem_ctrl"); + if (IS_ERR(modem_ctrl_class)) + return PTR_ERR(modem_ctrl_class); + return platform_driver_register(&modem_ctrl_driver); +} +EXPORT_SYMBOL_GPL(modem_ctrl_init); + +void modem_ctrl_exit(void) +{ + class_destroy(modem_ctrl_class); + platform_driver_unregister(&modem_ctrl_driver); +} +EXPORT_SYMBOL_GPL(modem_ctrl_exit); diff --git a/package/wwan/driver/quectel_SRPD_PCIE/src/pcie/Kconfig b/package/wwan/driver/quectel_SRPD_PCIE/src/pcie/Kconfig new file mode 100644 index 000000000..ac88510e7 --- /dev/null +++ b/package/wwan/driver/quectel_SRPD_PCIE/src/pcie/Kconfig @@ -0,0 +1,7 @@ + +config SPRD_PCIE_EP_DEVICE + tristate "SPRD PCIE EP device" + default n + depends on PCI + help + SPRD pcie ep device driver in host side for Spreadtrum. \ No newline at end of file diff --git a/package/wwan/driver/quectel_SRPD_PCIE/src/pcie/Makefile b/package/wwan/driver/quectel_SRPD_PCIE/src/pcie/Makefile new file mode 100644 index 000000000..0f62c8a12 --- /dev/null +++ b/package/wwan/driver/quectel_SRPD_PCIE/src/pcie/Makefile @@ -0,0 +1,6 @@ +ccflags-y += -DCONFIG_SPRD_PCIE_EP_DEVICE -DCONFIG_SPRD_SIPA +obj-y += sprd_pcie_ep_device.o +obj-y += pcie_host_resource.o +obj-y += sprd_pcie_quirks.o +obj-$(CONFIG_PCIE_EPF_SPRD) += pcie_client_resource.o +obj-$(CONFIG_SPRD_SIPA_RES) += pcie_sipa_res.o diff --git a/package/wwan/driver/quectel_SRPD_PCIE/src/pcie/pcie_client_resource.c b/package/wwan/driver/quectel_SRPD_PCIE/src/pcie/pcie_client_resource.c new file mode 100644 index 000000000..74e5d72ff --- /dev/null +++ b/package/wwan/driver/quectel_SRPD_PCIE/src/pcie/pcie_client_resource.c @@ -0,0 +1,528 @@ +/* + * Copyright (C) 2019 Spreadtrum Communications Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../include/sprd_pcie_resource.h" +#ifdef CONFIG_SPRD_SIPA_RES +#include "pcie_sipa_res.h" +#endif + +enum ep_msg { + RC_SCANNED_MSG = 0, + RC_REMOVING_MSG, + EPC_UNLINK_MSG, + EPC_LINKUP_MSG +}; + +enum pcie_ep_state { + SPRD_PCIE_WAIT_FIRST_READY = 0, + SPRD_PCIE_WAIT_SCANNED, + SPRD_PCIE_SCANNED, + SPRD_PCIE_WAIT_REMOVED, + SPRD_PCIE_REMOVED, + SPRD_PCIE_WAIT_POWER_OFF +}; + +struct sprd_pci_res_notify { + void (*notify)(void *p); + void *data; +}; + +struct sprd_pcie_res { + u32 dst; + u32 ep_fun; + enum pcie_ep_state state; + bool msi_later; + bool wakeup_later; + +#ifdef CONFIG_SPRD_SIPA_RES + void *sipa_res; +#endif + + /* + * in client(Orca), The PCIE module wll blocks the chip Deep, + * so we must get a wake lock when pcie work to avoid this situation: + * the system is deep, but the PCIE is still working. + */ + struct wakeup_source ws; + wait_queue_head_t wait_pcie_ready; + struct sprd_pci_res_notify first_ready_notify; +}; + +static struct sprd_pcie_res *g_pcie_res[SIPC_ID_NR]; + +/* the state machine of ep, init SPRD_PCIE_WAIT_FIRST_READY. + * SPRD_PCIE_WAIT_FIRST_READY (receive RC scanned) ==> SPRD_PCIE_SCANNED + * SPRD_PCIE_SCANNED (receive RC removing)==> SPRD_PCIE_WAIT_REMOVED + * SPRD_PCIE_WAIT_REMOVED(receive epc unlink)==>SPRD_PCIE_REMOVED + * SPRD_PCIE_REMOVED(receive epc linkup)==>SPRD_PCIE_WAIT_SCANNED + * SPRD_PCIE_WAIT_SCANNED(receive RC scanned)==>SPRD_PCIE_SCANNED + * SPRD_PCIE_WAIT_POWER_OFF can do nothing, just wait shutdown. + */ +static const char *change_msg[EPC_LINKUP_MSG + 1] = { + "rc scanned", + "rc removing", + "epc unlink", + "epc linkup" +}; + +static const char *state_msg[SPRD_PCIE_REMOVED + 1] = { + "wait first ready", + "wait sacanned", + "scanned", + "wait remove", + "removed" +}; + +static void pcie_resource_client_change_state(struct sprd_pcie_res *res, + enum ep_msg msg) +{ + u32 old_state = res->state; + + if (old_state == SPRD_PCIE_WAIT_POWER_OFF) + return; + + pr_debug("pcie res: change state msg=%s, old_state=%s.\n", + change_msg[msg], state_msg[old_state]); + + switch (msg) { + case RC_SCANNED_MSG: + if (old_state != SPRD_PCIE_WAIT_FIRST_READY + && old_state != SPRD_PCIE_WAIT_SCANNED) { + pr_err("pcie res: %s msg err, old state=%s", + change_msg[msg], state_msg[old_state]); + return; + } + res->state = SPRD_PCIE_SCANNED; + break; + + case RC_REMOVING_MSG: + if (old_state != SPRD_PCIE_SCANNED) { + pr_err("pcie res: %s msg err, old state=%s", + change_msg[msg], state_msg[old_state]); + return; + } + res->state = SPRD_PCIE_WAIT_REMOVED; + break; + + case EPC_UNLINK_MSG: + if (old_state != SPRD_PCIE_WAIT_REMOVED) { + if (old_state != SPRD_PCIE_WAIT_FIRST_READY) + pr_err("pcie res: %s msg err, old state=%s", + change_msg[msg], state_msg[old_state]); + return; + } + res->state = SPRD_PCIE_REMOVED; + break; + + case EPC_LINKUP_MSG: + if (old_state != SPRD_PCIE_REMOVED) { + if (old_state != SPRD_PCIE_WAIT_FIRST_READY) + pr_err("pcie res: %s msg err, old state=%s", + change_msg[msg], state_msg[old_state]); + return; + } + res->state = SPRD_PCIE_WAIT_SCANNED; + break; + } + + pr_info("pcie res: change state from %s to %s.\n", + state_msg[old_state], state_msg[res->state]); +} + +static void sprd_pcie_resource_first_ready_notify(struct sprd_pcie_res *res) +{ + void (*notify)(void *p); + + pr_info("pcie res: first ready.\n"); + +#ifdef CONFIG_SPRD_SIPA_RES + /* + * in client side, producer res id is SIPA_RM_RES_PROD_PCIE_EP, + * consumer res id is SIPA_RM_RES_CONS_WWAN_DL. + */ + res->sipa_res = pcie_sipa_res_create(res->dst, + SIPA_RM_RES_PROD_PCIE_EP, + SIPA_RM_RES_CONS_WWAN_DL); + if (!res->sipa_res) + pr_err("pcie res:create ipa res failed.\n"); +#endif + + notify = res->first_ready_notify.notify; + if (notify) + notify(res->first_ready_notify.data); +} + +static void pcie_resource_client_epf_notify(int event, void *private) +{ + struct sprd_pcie_res *res = (struct sprd_pcie_res *)private; + + if (res->state == SPRD_PCIE_WAIT_POWER_OFF) + return; + + switch (event) { + case SPRD_EPF_BIND: + pr_info("pcie res: epf be binded.\n"); + if (sprd_pcie_is_defective_chip()) + sprd_pci_epf_raise_irq(res->ep_fun, + PCIE_MSI_EP_READY_FOR_RESCAN); + break; + + case SPRD_EPF_UNBIND: + pr_info("pcie res: epf be unbinded.\n"); + break; + + case SPRD_EPF_REMOVE: + pr_info("pcie res: epf be removed.\n"); + break; + + case SPRD_EPF_LINK_UP: + /* get a wakelock */ + __pm_stay_awake(&res->ws); + + pr_info("pcie res: epf linkup.\n"); + pcie_resource_client_change_state(res, EPC_LINKUP_MSG); + + /* first ready notify */ + if (res->state == SPRD_PCIE_WAIT_FIRST_READY) + sprd_pcie_resource_first_ready_notify(res); + + break; + + case SPRD_EPF_UNLINK: + /* Here need this log to debug pcie scan and remove */ + pr_info("pcie res: epf unlink.\n"); + pcie_resource_client_change_state(res, EPC_UNLINK_MSG); + + /* if has wakeup pending, send wakeup to rc */ + if (res->wakeup_later) { + res->wakeup_later = false; + pr_info("pcie res: send wakeup to rc.\n"); + if (sprd_pci_epf_start(res->ep_fun)) + pr_err("pcie res: send wakeup to rc failed.\n"); + } + + /* relax a wakelock */ + __pm_relax(&res->ws); + break; + + default: + break; + } +} + +static irqreturn_t pcie_resource_client_irq_handler(int irq, void *private) +{ + struct sprd_pcie_res *res = (struct sprd_pcie_res *)private; + + if (res->state == SPRD_PCIE_WAIT_POWER_OFF) + return IRQ_HANDLED; + + if (irq == PCIE_DBEL_EP_SCANNED) { + pcie_resource_client_change_state(res, RC_SCANNED_MSG); + + /* wakeup all blocked thread */ + pr_info("pcie res: scanned, wakup all.\n"); + wake_up_interruptible_all(&res->wait_pcie_ready); + + /* if has msi pending, send msi to rc */ + if (res->msi_later) { + res->msi_later = false; + pr_info("pcie res: request msi to rc.\n"); + sprd_pci_epf_raise_irq(res->ep_fun, + PCIE_MSI_REQUEST_RES); + } + } else if (irq == PCIE_DBEL_EP_REMOVING) { + pr_info("pcie res: removing.\n"); + pcie_resource_client_change_state(res, RC_REMOVING_MSG); + } + + return IRQ_HANDLED; +} + +static int sprd_pcie_resource_client_mcd(struct notifier_block *nb, + unsigned long mode, void *cmd) +{ + struct sprd_pcie_res *res; + int i; + + pr_info("pcie res: mcd event mode=%ld.\n", mode); + + if (mode != MDM_POWER_OFF) + return NOTIFY_DONE; + + for (i = 0; i < SIPC_ID_NR; i++) { + res = g_pcie_res[i]; + if (res) + res->state = SPRD_PCIE_WAIT_POWER_OFF; + } + + return NOTIFY_DONE; +} + +static struct notifier_block mcd_notify = { + .notifier_call = sprd_pcie_resource_client_mcd, + .priority = 149, +}; + +int sprd_pcie_resource_client_init(u32 dst, u32 ep_fun) +{ + struct sprd_pcie_res *res; + + if (dst >= SIPC_ID_NR) + return -EINVAL; + + res = kzalloc(sizeof(*res), GFP_KERNEL); + if (!res) + return -ENOMEM; + + res->dst = dst; + res->state = SPRD_PCIE_WAIT_FIRST_READY; + res->ep_fun = ep_fun; + + wakeup_source_init(&res->ws, "pcie_res"); + + init_waitqueue_head(&res->wait_pcie_ready); + sprd_pci_epf_register_irq_handler_ex(res->ep_fun, + PCIE_DBEL_EP_SCANNED, + PCIE_DBEL_EP_REMOVING, + pcie_resource_client_irq_handler, + res); + sprd_pci_epf_register_notify(res->ep_fun, + pcie_resource_client_epf_notify, + res); + + modem_ctrl_register_notifier(&mcd_notify); + + g_pcie_res[dst] = res; + + return 0; +} + +int sprd_pcie_resource_trash(u32 dst) +{ + struct sprd_pcie_res *res; + + if (dst >= SIPC_ID_NR || !g_pcie_res[dst]) + return -EINVAL; + + res = g_pcie_res[dst]; + +#ifdef CONFIG_SPRD_SIPA_RES + if (res->sipa_res) + pcie_sipa_res_destroy(res->sipa_res); +#endif + + sprd_pci_epf_unregister_irq_handler_ex(res->ep_fun, + PCIE_DBEL_EP_SCANNED, + PCIE_DBEL_EP_REMOVING); + sprd_pci_epf_unregister_notify(res->ep_fun); + modem_ctrl_unregister_notifier(&mcd_notify); + + kfree(res); + g_pcie_res[dst] = NULL; + + return 0; +} + +int sprd_pcie_wait_resource(u32 dst, int timeout) +{ + struct sprd_pcie_res *res; + int ret, wait; + unsigned long delay; + + if (dst >= SIPC_ID_NR || !g_pcie_res[dst]) + return -EINVAL; + + res = g_pcie_res[dst]; + + /* pcie ready, return succ immediately */ + if (res->state == SPRD_PCIE_SCANNED) + return 0; + + if (timeout == 0) + return -ETIME; + + if (timeout < 0) { + wait = wait_event_interruptible( + res->wait_pcie_ready, + res->state == SPRD_PCIE_SCANNED + ); + ret = wait; + } else { + /* + * timeout must add 1s, + * because the pcie rescan may took some time. + */ + delay = msecs_to_jiffies(timeout + 1000); + wait = wait_event_interruptible_timeout(res->wait_pcie_ready, + res->state == + SPRD_PCIE_SCANNED, + delay); + if (wait == 0) + ret = -ETIME; + else if (wait > 0) + ret = 0; + else + ret = wait; + } + + if (ret < 0 && ret != -ERESTARTSYS) + pr_err("pcie res: wait resource, val=%d.\n", ret); + + return ret; +} + +int sprd_pcie_request_resource(u32 dst) +{ + struct sprd_pcie_res *res; + int ret = 0; + + if (dst >= SIPC_ID_NR || !g_pcie_res[dst]) + return -EINVAL; + + res = g_pcie_res[dst]; + + if (res->state == SPRD_PCIE_WAIT_POWER_OFF) + return -EINVAL; + + pr_debug("pcie res: request res, state=%d.\n", res->state); + + switch (res->state) { + case SPRD_PCIE_WAIT_FIRST_READY: + case SPRD_PCIE_WAIT_SCANNED: + pr_info("pcie res: later send request msi to rc.\n"); + res->msi_later = true; + break; + + case SPRD_PCIE_WAIT_REMOVED: + pr_info("pcie res: later send wakeup to rc.\n"); + res->wakeup_later = true; + break; + + case SPRD_PCIE_SCANNED: + /* + * if pcie state is SCANNED, just send + * PCIE_MSI_REQUEST_RES to the host. + * After host receive res msi interrupt, + * it will increase one vote in modem power manger. + */ + pr_info("pcie res: send request msi to rc.\n"); + ret = sprd_pci_epf_raise_irq(res->ep_fun, + PCIE_MSI_REQUEST_RES); + break; + + case SPRD_PCIE_REMOVED: + /* + * if pcie state is removed, poll wake_up singnal + * to host, and he host will rescan the pcie. + */ + pr_info("pcie res: send wakeup to rc.\n"); + if (sprd_pci_epf_start(res->ep_fun) == 0) + break; + + /* may receive ep reset, wait linkup and scanned */ + pr_info("pcie res: later send request msi to rc.\n"); + res->msi_later = true; + break; + + default: + pr_err("pcie res: request res err, state=%d.\n", + res->state); + ret = -EPERM; + break; + } + + return ret; +} + +int sprd_pcie_release_resource(u32 dst) +{ + struct sprd_pcie_res *res; + int ret = 0; + + if (dst >= SIPC_ID_NR || !g_pcie_res[dst]) + return -EINVAL; + + res = g_pcie_res[dst]; + + if (res->state == SPRD_PCIE_WAIT_POWER_OFF) + return -EINVAL; + + switch (res->state) { + case SPRD_PCIE_SCANNED: + /* + * if pcie state is SCANNED, send PCIE_MSI_RELEASE_RES + * to the host, else, do nothing. After host receive res msi + * interrupt, it will decrease one vote in modem power manger, + * and if modem power manger is idle, the host will remove + * the pcie. + */ + pr_info("pcie res: send release msi to rc.\n"); + ret = sprd_pci_epf_raise_irq(res->ep_fun, + PCIE_MSI_RELEASE_RES); + break; + + case SPRD_PCIE_WAIT_FIRST_READY: + /* if has msi pending, remove it */ + if (res->msi_later) + res->msi_later = false; + break; + + default: + pr_err("pcie res: release res state=%d.\n", res->state); + ret = -EPERM; + break; + } + + return ret; +} + +int sprd_register_pcie_resource_first_ready(u32 dst, + void (*notify)(void *p), void *data) +{ + struct sprd_pcie_res *res; + + if (dst >= SIPC_ID_NR || !g_pcie_res[dst]) + return -EINVAL; + + res = g_pcie_res[dst]; + + res->first_ready_notify.data = data; + res->first_ready_notify.notify = notify; + + return 0; +} + +bool sprd_pcie_is_defective_chip(void) +{ + static bool first_read = true, defective; + + if (first_read) { + first_read = false; + defective = sprd_kproperty_chipid("UD710-AB") == 0; + } + + return defective; +} diff --git a/package/wwan/driver/quectel_SRPD_PCIE/src/pcie/pcie_host_resource.c b/package/wwan/driver/quectel_SRPD_PCIE/src/pcie/pcie_host_resource.c new file mode 100644 index 000000000..5b753440d --- /dev/null +++ b/package/wwan/driver/quectel_SRPD_PCIE/src/pcie/pcie_host_resource.c @@ -0,0 +1,720 @@ +/* + * Copyright (C) 2019 Spreadtrum Communications Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_SPRD_SIPA_RES +#include "pcie_sipa_res.h" +#endif + +#include "../include/pcie-rc-sprd.h" +#include "../include/sipc.h" +//#include "../include/mdm_ctrl.h" +#include "../include/sprd_pcie_ep_device.h" +#include "../include/sprd_mpm.h" +#include "../include/sprd_pcie_resource.h" + +#define PCIE_REMOVE_SCAN_GAP msecs_to_jiffies(200) +#define MAX_PMS_WAIT_TIME 5000 +#define MAX_PMS_DEFECTIVE_CHIP_FIRST_WAIT_TIME (55 * 1000) + +enum rc_state { + SPRD_PCIE_WAIT_FIRST_READY = 0, + SPRD_PCIE_WAIT_SCANNED, + SPRD_PCIE_SCANNED, + SPRD_PCIE_WAIT_REMOVED, + SPRD_PCIE_REMOVED, + SPRD_PCIE_SCANNED_2BAR, + SPRD_PCIE_WAIT_POWER_OFF +}; + +struct sprd_pcie_res { + u32 dst; + u32 ep_dev; + u32 state; + u32 scan_cnt; + u32 max_wait_time; + bool ep_power_on; + bool ep_dev_probe; + bool smem_send_to_ep; + unsigned long action_jiff; + + struct sprd_pms *pms; + char pms_name[20]; + + wait_queue_head_t wait_pcie_ready; + bool ep_ready_for_rescan; + wait_queue_head_t wait_load_ready; + wait_queue_head_t wait_first_rescan; + struct task_struct *thread; + +#ifdef CONFIG_SPRD_SIPA_RES + void *sipa_res; +#endif +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 4,18,0 )) + struct wakeup_source *ws; +#else + struct wakeup_source ws; +#endif + struct work_struct scan_work; + struct work_struct remove_work; + struct workqueue_struct *wq; + + struct platform_device *pcie_dev; + struct sprd_pcie_register_event reg_event; +}; + +static int sprd_pcie_resource_rescan(struct sprd_pcie_res *res); + +static struct sprd_pcie_res *g_pcie_res[SIPC_ID_NR]; + +static void sprd_pcie_resource_host_first_rescan_do(struct sprd_pcie_res *res) +{ + int ret = sprd_pcie_register_event(&res->reg_event); + + if (ret) + pr_err("pcie res: register pci ret=%d.\n", ret); + + /* power up for ep after the first scan. */ + res->ep_power_on = true; + sprd_pms_power_up(res->pms); + +#ifdef CONFIG_SPRD_SIPA_RES + /* + * in host side, producer res id is SIPA_RM_RES_PROD_PCIE3, + * consumer res id is SIPA_RM_RES_CONS_WWAN_UL. + */ + res->sipa_res = pcie_sipa_res_create(res->dst, + SIPA_RM_RES_PROD_PCIE3, + SIPA_RM_RES_CONS_WWAN_UL); + if (!res->sipa_res) + pr_err("pcie res:create ipa res failed.\n"); +#endif + +} + +static void sprd_pcie_resource_host_ep_notify(int event, void *data) +{ + struct sprd_pcie_res *res = (struct sprd_pcie_res *)data; + u32 base, size; + + /* wait power off, do nothing */ + if (res->state == SPRD_PCIE_WAIT_POWER_OFF) + return; + + switch (event) { + case PCIE_EP_PROBE: + /* set state to scanned */ + res->state = SPRD_PCIE_SCANNED; + res->scan_cnt++; + res->ep_dev_probe = true; + //modem_ctrl_enable_cp_event(); + + if (smem_get_area(SIPC_ID_MINIAP, &base, &size) == 0) + sprd_ep_dev_pass_smem(res->ep_dev, base, size); + + pr_info("pcie res: ep_notify, probed cnt=%d.\n", + res->scan_cnt); + + /* firsrt scan do somtehing */ + if (res->scan_cnt == 1) + sprd_pcie_resource_host_first_rescan_do(res); + + /* clear removed irq and notify ep scanned */ + sprd_ep_dev_clear_doolbell_irq(res->ep_dev, + PCIE_DBEL_EP_REMOVING); + sprd_ep_dev_raise_irq(res->ep_dev, PCIE_DBEL_EP_SCANNED); + + /* wakeup all blocked thread */ + wake_up_interruptible_all(&res->wait_pcie_ready); + break; + + case PCIE_EP_REMOVE: + pr_info("pcie res: ep_notify, removed.\n"); + res->state = SPRD_PCIE_REMOVED; + res->ep_dev_probe = false; + break; + + case PCIE_EP_PROBE_BEFORE_SPLIT_BAR: + res->state = SPRD_PCIE_SCANNED_2BAR; + res->ep_dev_probe = true; + pr_info("pcie res: probed before split bar.\n"); + if (!res->ep_ready_for_rescan) { + wake_up_interruptible_all(&res->wait_load_ready); + } else { + pr_info("pcie res: bar err, rescan.\n"); + sprd_pcie_resource_rescan(res); + } + break; + + default: + break; + } +} + +static irqreturn_t sprd_pcie_resource_host_irq_handler(int irq, void *private) +{ + struct sprd_pcie_res *res = (struct sprd_pcie_res *)private; + + if (irq == PCIE_MSI_REQUEST_RES) { + pr_info("pcie res: ep request res.\n"); + /* + * client modem power up, + * no need wake lock and no need wait resource. + */ + if (!res->ep_power_on) { + res->ep_power_on = true; + sprd_pms_power_up(res->pms); + } + + /* only after received ep request can backup the ep configs. */ + sprd_ep_dev_set_backup(res->ep_dev); + } else if (irq == PCIE_MSI_RELEASE_RES) { + pr_info("pcie res: ep release res.\n"); + /* + * client modem power down, + * no need wake lock. + */ + if (res->ep_power_on) { + res->ep_power_on = false; + sprd_pms_power_down(res->pms, false); + } + } else if (irq == PCIE_MSI_EP_READY_FOR_RESCAN) { + pr_info("pcie res: ep ready for rescan.\n"); + res->ep_ready_for_rescan = true; + wake_up_interruptible_all(&res->wait_first_rescan); + } + + return IRQ_HANDLED; +} + +static void sprd_pcie_resource_scan_fn(struct work_struct *work) +{ + unsigned long diff; + unsigned int delay; + int ret; + struct sprd_pcie_res *res = container_of(work, struct sprd_pcie_res, + scan_work); + + /* wait power off, do nothing */ + if (res->state == SPRD_PCIE_WAIT_POWER_OFF) + return; + + /* request wakelock */ + sprd_pms_request_wakelock(res->pms); + + diff = jiffies - res->action_jiff; + if (diff < PCIE_REMOVE_SCAN_GAP) { + /* must ensure that the scan starts after a period of remove. */ + delay = jiffies_to_msecs(PCIE_REMOVE_SCAN_GAP - diff); + msleep(delay); + } + + pr_info("pcie res: scan\n"); + + ret = sprd_pcie_configure_device(res->pcie_dev); + if (ret) + pr_err("pcie res: scan error = %d!\n", ret); + + /* record the last scan jiffies */ + res->action_jiff = jiffies; + + /* release wakelock */ + sprd_pms_release_wakelock(res->pms); +} + +static void sprd_pcie_resource_remove_fn(struct work_struct *work) +{ + unsigned long diff; + unsigned int delay; + int ret; + struct sprd_pcie_res *res = container_of(work, struct sprd_pcie_res, + remove_work); + /* request wakelock */ + sprd_pms_request_wakelock(res->pms); + + pr_info("pcie res: remove work!\n"); + + diff = jiffies - res->action_jiff; + if (diff < PCIE_REMOVE_SCAN_GAP) { + /* must ensure that the remove starts after a period of scan. */ + delay = jiffies_to_msecs(PCIE_REMOVE_SCAN_GAP - diff); + msleep(delay); + } + + /* + * in wait power off state, or ep device is not probing, + * can't access ep. + */ + if (res->state == SPRD_PCIE_WAIT_POWER_OFF || + !res->ep_dev_probe) { + /* release wakelock */ + sprd_pms_release_wakelock(res->pms); + return; + } + + /* notify ep removed, must before removed */ + sprd_ep_dev_clear_doolbell_irq(res->ep_dev, PCIE_DBEL_EP_SCANNED); + sprd_ep_dev_raise_irq(res->ep_dev, PCIE_DBEL_EP_REMOVING); + + /* waiting for the doorbell irq to ep */ + msleep(50); + + pr_info("pcie res: remove\n"); + + /* start removed ep*/ + ret = sprd_pcie_unconfigure_device(res->pcie_dev); + if (ret) + pr_err("pcie res: remove error = %d.\n!", ret); + + /* record the last remov jiffies */ + res->action_jiff = jiffies; + + /* release wakelock */ + sprd_pms_release_wakelock(res->pms); +} + +static void sprd_pcie_resource_start_scan(struct sprd_pcie_res *res) +{ + if (res->state == SPRD_PCIE_SCANNED || + res->state == SPRD_PCIE_WAIT_SCANNED) { + pr_info("pcie res: scanned, do nothing!\n"); + } else { + pr_info("pcie res: start scan!\n"); + queue_work(res->wq, &res->scan_work); + } +} + +static void sprd_pcie_resource_start_remove(struct sprd_pcie_res *res) +{ + /* wait power off, do nothing */ + if (res->state == SPRD_PCIE_WAIT_POWER_OFF) + return; + + if (res->state == SPRD_PCIE_SCANNED || + res->state == SPRD_PCIE_WAIT_FIRST_READY + || (res->state == SPRD_PCIE_SCANNED_2BAR) + ) { + res->state = SPRD_PCIE_WAIT_REMOVED; + pr_info("pcie res: start remove."); + queue_work(res->wq, &res->remove_work); + } else { + pr_err("pcie res: start remove, err=%d.", res->state); + } +} + +static void sprd_pcie_resource_event_process(enum sprd_pcie_event event, + void *data) +{ + struct sprd_pcie_res *res = data; + + if (event == SPRD_PCIE_EVENT_WAKEUP) { + pr_info("pcie res: wakeup by ep, event=%d.\n", event); + if (!res->ep_power_on) { + res->ep_power_on = true; + sprd_pms_power_up(res->pms); + } + } +} + +/* + * sprd_pcie_resource_rescan + * Because the ep bar can only be split by ep itself, + * After all modem images be loaded at the first time, + * the ep will run and split 2 64bit bar to 4 32bit bar. + * host must rescan the pcie ep device agian by this api, + * after receive ep driver ready for rescan msg and all + * modem images load done. + */ +static int sprd_pcie_resource_rescan(struct sprd_pcie_res *res) +{ + pr_info("pcie res: rescan.\n"); + + sprd_pcie_resource_start_remove(res); + sprd_pcie_resource_start_scan(res); + + return 0; +} + +static int sprd_pcie_resource_check_first_rescan(void *data) +{ + struct sprd_pcie_res *res = data; + int ret; + + pr_info("pcie res: check first rescan.\n"); + + while (!kthread_should_stop()) { + ret = wait_event_interruptible( + res->wait_first_rescan, + res->ep_ready_for_rescan); + if (!ret) { + pr_info("pcie res:first resacn ready.\n"); + sprd_pcie_resource_rescan(res); + break; + } + } + + /* After the first rescan, restore the normal wait time. */ + if (sprd_pcie_is_defective_chip()) + res->max_wait_time = MAX_PMS_WAIT_TIME; + + res->thread = NULL; + return 0; +} + +#if 0 +static int sprd_pcie_resource_host_mcd(struct notifier_block *nb, + unsigned long mode, void *cmd) +{ + struct sprd_pcie_res *res; + int i; + u32 state; + + pr_info("pcie res: mcd mode=%ld.\n", mode); + + switch (mode) { + case MDM_POWER_OFF: + state = SPRD_PCIE_WAIT_POWER_OFF; + break; + + default: + return NOTIFY_DONE; + } + + for (i = 0; i < SIPC_ID_NR; i++) { + res = g_pcie_res[i]; + + /* wait power off, do nothing */ + if (res->state == SPRD_PCIE_WAIT_POWER_OFF) + continue; + + if (res) { + res->state = state; + cancel_work_sync(&res->scan_work); + cancel_work_sync(&res->remove_work); + } + } + + return NOTIFY_DONE; +} + +static struct notifier_block mcd_notify = { + .notifier_call = sprd_pcie_resource_host_mcd, + .priority = 149, +}; +#endif + +/* Because the ep bar can only be split by ep itself, + * After all modem images be loaded, notify the pcie resource. + */ +void sprd_pcie_resource_notify_load_done(u32 dst) +{ + struct sprd_pcie_res *res; + + pr_info("pcie res: load done.\n"); + + if (dst >= SIPC_ID_NR || !g_pcie_res[dst]) + return; + + res = g_pcie_res[dst]; + + res->thread = kthread_create(sprd_pcie_resource_check_first_rescan, res, + "first rescan"); + if (IS_ERR(res->thread)) + pr_err("pcie res: Failed to create rescan thread.\n"); + else + wake_up_process(res->thread); +} + +int sprd_pcie_wait_load_resource(u32 dst) +{ + struct sprd_pcie_res *res; + + if (dst >= SIPC_ID_NR || !g_pcie_res[dst]) + return -EINVAL; + + res = g_pcie_res[dst]; + + /* can load image, return immediately */ + if (res->state == SPRD_PCIE_SCANNED || + res->state == SPRD_PCIE_SCANNED_2BAR) + return 0; + + return wait_event_interruptible( + res->wait_load_ready, + (res->state == SPRD_PCIE_SCANNED || + res->state == SPRD_PCIE_SCANNED_2BAR)); +} + +void sprd_pcie_resource_reboot_ep(u32 dst) +{ + struct sprd_pcie_res *res; + + pr_info("pcie res: reboot ep.\n"); + + if (dst >= SIPC_ID_NR || !g_pcie_res[dst]) + return; + + res = g_pcie_res[dst]; + + /* wait power off, do nothing */ + if (res->state == SPRD_PCIE_WAIT_POWER_OFF) + return; + + res->state = SPRD_PCIE_WAIT_FIRST_READY; + res->smem_send_to_ep = false; + res->ep_ready_for_rescan = false; + + /* The defective chip , the first wait time must be enough long. */ + if (sprd_pcie_is_defective_chip()) + res->max_wait_time = MAX_PMS_DEFECTIVE_CHIP_FIRST_WAIT_TIME; + else + res->max_wait_time = MAX_PMS_WAIT_TIME; + + /* after ep reboot, can't backup ep configs*/ + sprd_ep_dev_clear_backup(res->ep_dev); + + sprd_pcie_resource_start_remove(res); + //modem_ctrl_poweron_modem(MDM_CTRL_COLD_RESET); + sprd_pcie_resource_start_scan(res); +} + +int sprd_pcie_resource_host_init(u32 dst, u32 ep_dev, + struct platform_device *pcie_dev) +{ + struct sprd_pcie_res *res; + + if (dst >= SIPC_ID_NR) + return -EINVAL; + + res = kzalloc(sizeof(*res), GFP_KERNEL); + if (!res) + return -ENOMEM; + + res->wq = create_singlethread_workqueue("pcie_res"); + if (!res->wq) { + pr_err("pcie res:create wq failed.\n"); + kfree(res); + return -ENOMEM; + } + + init_waitqueue_head(&res->wait_load_ready); + init_waitqueue_head(&res->wait_first_rescan); + +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 4,18,0 )) + res->ws = wakeup_source_register(NULL, "pcie_res"); +#else + wakeup_source_init(&res->ws, "pcie_res"); +#endif + + res->dst = dst; + res->state = SPRD_PCIE_WAIT_FIRST_READY; + res->pcie_dev = pcie_dev; + + /* The defective chip , the first wait time must be enough long. */ + if (sprd_pcie_is_defective_chip()) + res->max_wait_time = MAX_PMS_DEFECTIVE_CHIP_FIRST_WAIT_TIME; + else + res->max_wait_time = MAX_PMS_WAIT_TIME; + + init_waitqueue_head(&res->wait_pcie_ready); + INIT_WORK(&res->scan_work, sprd_pcie_resource_scan_fn); + INIT_WORK(&res->remove_work, sprd_pcie_resource_remove_fn); + + sprintf(res->pms_name, "ep-request-%d", dst); + res->pms = sprd_pms_create(dst, res->pms_name, false); + if (!res->pms) + pr_err("pcie res:create pms failed.\n"); + + sprd_ep_dev_register_irq_handler_ex(res->ep_dev, + PCIE_MSI_REQUEST_RES, + PCIE_MSI_RELEASE_RES, + sprd_pcie_resource_host_irq_handler, res); + + sprd_ep_dev_register_notify(res->ep_dev, + sprd_pcie_resource_host_ep_notify, res); + + //modem_ctrl_register_notifier(&mcd_notify); + + /* init wake up event callback */ + res->reg_event.events = SPRD_PCIE_EVENT_WAKEUP; + res->reg_event.pdev = pcie_dev; + res->reg_event.callback = sprd_pcie_resource_event_process; + res->reg_event.data = res; + + g_pcie_res[dst] = res; + + return 0; +} + +int sprd_pcie_resource_trash(u32 dst) +{ + struct sprd_pcie_res *res; + + if (dst >= SIPC_ID_NR || !g_pcie_res[dst]) + return -EINVAL; + + res = g_pcie_res[dst]; + + if (!IS_ERR_OR_NULL(res->thread)) + kthread_stop(res->thread); + +#ifdef CONFIG_SPRD_SIPA_RES + if (res->sipa_res) + pcie_sipa_res_destroy(res->sipa_res); +#endif + + cancel_work_sync(&res->scan_work); + cancel_work_sync(&res->remove_work); + destroy_workqueue(res->wq); + + sprd_pcie_deregister_event(&res->reg_event); + + sprd_ep_dev_unregister_irq_handler_ex(res->ep_dev, + PCIE_MSI_REQUEST_RES, + PCIE_MSI_RELEASE_RES); + sprd_ep_dev_unregister_notify(res->ep_dev); + //modem_ctrl_unregister_notifier(&mcd_notify); + sprd_pms_destroy(res->pms); + + kfree(res); + g_pcie_res[dst] = NULL; + + return 0; +} + +int sprd_pcie_wait_resource(u32 dst, int timeout) +{ + struct sprd_pcie_res *res; + int ret, wait; + unsigned long delay; + + if (dst >= SIPC_ID_NR || !g_pcie_res[dst]) + return -EINVAL; + + res = g_pcie_res[dst]; + + /* pcie ready, return succ immediately. */ + if (res->state == SPRD_PCIE_SCANNED) + return 0; + + if (timeout == 0) + return -ETIME; + + /* + * In some case, orca may has an exception, And the pcie + * resource may never ready again. So we must set a + * maximum wait time for let user to know thereis an + * exception in pcie, and can return an error code to the user. + */ + if (timeout < 0 || timeout > res->max_wait_time) + timeout = res->max_wait_time; + + /* + * timeout must add 1s, + * because the pcie scan may took some time. + */ + delay = msecs_to_jiffies(timeout + 1000); + wait = wait_event_interruptible_timeout(res->wait_pcie_ready, + res->state == + SPRD_PCIE_SCANNED, + delay); + if (wait == 0) + ret = -ETIME; + else if (wait > 0) + ret = 0; + else + ret = wait; + + + if (ret < 0 && ret != -ERESTARTSYS) + pr_err("pcie res: wait resource, val=%d.\n", ret); + + return ret; +} + +int sprd_pcie_request_resource(u32 dst) +{ + struct sprd_pcie_res *res; + + if (dst >= SIPC_ID_NR || !g_pcie_res[dst]) + return -EINVAL; + + res = g_pcie_res[dst]; + + /* get a wakelock */ +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 4,18,0 )) + __pm_stay_awake(res->ws); +#else + __pm_stay_awake(&res->ws); +#endif + + pr_info("pcie res: request resource, state=%d.\n", res->state); + +#ifdef CONFIG_SPRD_PCIE + /* The first scan is start by pcie driver automatically. */ + if (res->state != SPRD_PCIE_WAIT_FIRST_READY) + sprd_pcie_resource_start_scan(res); +#endif + + return 0; +} + +int sprd_pcie_release_resource(u32 dst) +{ + struct sprd_pcie_res *res; + + if (dst >= SIPC_ID_NR || !g_pcie_res[dst]) + return -EINVAL; + + res = g_pcie_res[dst]; + + /* relax a wakelock */ +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 4,18,0 )) + __pm_relax(res->ws); +#else + __pm_relax(&res->ws); +#endif + +#ifdef CONFIG_SPRD_PCIE + pr_info("pcie res: release resource.\n"); + + sprd_pcie_resource_start_remove(res); +#endif + return 0; +} + +bool sprd_pcie_is_defective_chip(void) +{ +#ifndef CONFIG_SPRD_PCIE + return false; +#else + static bool first_read = true, defective; + + if (first_read) { + first_read = false; + defective = sprd_kproperty_chipid("UD710-AB") == 0; + } + + return defective; +#endif +} diff --git a/package/wwan/driver/quectel_SRPD_PCIE/src/pcie/pcie_sipa_res.c b/package/wwan/driver/quectel_SRPD_PCIE/src/pcie/pcie_sipa_res.c new file mode 100644 index 000000000..dea3da3f3 --- /dev/null +++ b/package/wwan/driver/quectel_SRPD_PCIE/src/pcie/pcie_sipa_res.c @@ -0,0 +1,195 @@ +/* + * Copyright (C) 2018-2019 Unisoc Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +#include +#include +#include +#include + +#include "pcie_sipa_res.h" +#include "../include/sprd_pcie_resource.h" + +struct pcie_sipa_res_prod { + u8 dst; + enum sipa_rm_res_id prod_id; /* producer res id */ + enum sipa_rm_res_id cons_id; /* consumer res id */ + struct sprd_pms *pms; + char pms_name[20]; + struct work_struct wait_work; + struct delayed_work rm_work; +}; + +static void pcie_sipa_res_wait_res_work_fn(struct work_struct *work) +{ + int ret; + struct pcie_sipa_res_prod *res = container_of(work, + struct pcie_sipa_res_prod, + wait_work); + + ret = sprd_pcie_wait_resource(res->dst, -1); + + /* pcie not ready, just return. */ + if (ret) { + pr_err("pcie_sipa_res: wait res error = %d!\n", ret); + return; + } + + /* notify ipa module that pcie is ready. */ + sipa_rm_notify_completion(SIPA_RM_EVT_GRANTED, + res->prod_id); +} + +static int pcie_sipa_res_request_resource(void *data) +{ + int ret; + struct pcie_sipa_res_prod *res = data; + + pr_info("pcie_sipa_res: request resource.\n"); + + sprd_pms_power_up(res->pms); + + /* + * when the resource is not ready, the IPA module doesn't want be + * blocked in here until the pcie ready, the IPA owner designed + * a notification api sipa_rm_notify_completion to notify the + * IPA module that the resource requested by IPA is ready. + * The designated error value is -EINPROGRESS, so we must override the + * return value -ETIME to -EINPROGRESS. + */ + ret = sprd_pcie_wait_resource(res->dst, 0); + + if (ret == -ETIME) { + /* add a work to wait pcie ready */ + schedule_work(&res->wait_work); + ret = -EINPROGRESS; + } + + return ret; +} + +static int pcie_sipa_res_release_resource(void *data) +{ + struct pcie_sipa_res_prod *res = data; + + pr_info("pcie_sipa_res: release resource.\n"); + + sprd_pms_release_resource(res->pms); + + return 0; +} + +static void pcie_sipa_res_create_rm_work_fn(struct work_struct *work) +{ + int ret; + struct sipa_rm_create_params rm_params; + struct pcie_sipa_res_prod *res = container_of(to_delayed_work(work), + struct pcie_sipa_res_prod, + rm_work); + + rm_params.name = res->prod_id; + rm_params.floor_voltage = 0; + rm_params.reg_params.notify_cb = NULL; + rm_params.reg_params.user_data = res; + rm_params.request_resource = pcie_sipa_res_request_resource; + rm_params.release_resource = pcie_sipa_res_release_resource; + ret = sipa_rm_create_resource(&rm_params); + + /* defer to create rm */ + if (ret == -EPROBE_DEFER) { + schedule_delayed_work(&res->rm_work, msecs_to_jiffies(1000)); + return; + } + + /* add dependencys */ + ret = sipa_rm_add_dependency(res->cons_id, res->prod_id); + if (ret < 0 && ret != -EINPROGRESS) { + pr_err("pcie_sipa_res: add_dependency error = %d!\n", ret); + sipa_rm_delete_resource(res->prod_id); + sprd_pms_destroy(res->pms); + kfree(res); + } +} + +void *pcie_sipa_res_create(u8 dst, enum sipa_rm_res_id prod_id, + enum sipa_rm_res_id cons_id) +{ + int ret; + struct sipa_rm_create_params rm_params; + struct pcie_sipa_res_prod *res; + + res = kzalloc(sizeof(*res), GFP_KERNEL); + if (!res) + return NULL; + + /* init wait pcie res work */ + INIT_WORK(&res->wait_work, pcie_sipa_res_wait_res_work_fn); + INIT_DELAYED_WORK(&res->rm_work, pcie_sipa_res_create_rm_work_fn); + + /* create pms */ + strncpy(res->pms_name, "sipa", sizeof(res->pms_name)); + res->pms = sprd_pms_create(dst, res->pms_name, false); + if (!res->pms) { + pr_err("pcie_sipa_res: create pms failed!\n"); + kfree(res); + return NULL; + } + + res->dst = dst; + res->prod_id = prod_id; + res->cons_id = cons_id; + + /* create prod */ + rm_params.name = prod_id; + rm_params.floor_voltage = 0; + rm_params.reg_params.notify_cb = NULL; + rm_params.reg_params.user_data = res; + rm_params.request_resource = pcie_sipa_res_request_resource; + rm_params.release_resource = pcie_sipa_res_release_resource; + ret = sipa_rm_create_resource(&rm_params); + + /* defer to create rm */ + if (ret == -EPROBE_DEFER) { + schedule_delayed_work(&res->rm_work, msecs_to_jiffies(1000)); + return res; + } else if (ret) { + pr_err("pcie_sipa_res: create rm error = %d!\n", ret); + sprd_pms_destroy(res->pms); + kfree(res); + return NULL; + } + + /* add dependencys */ + ret = sipa_rm_add_dependency(cons_id, prod_id); + if (ret < 0 && ret != -EINPROGRESS) { + pr_err("pcie_sipa_res: add_dependency error = %d!\n", ret); + sipa_rm_delete_resource(prod_id); + sprd_pms_destroy(res->pms); + kfree(res); + return NULL; + } + + return res; +} + +void pcie_sipa_res_destroy(void *data) +{ + struct pcie_sipa_res_prod *res = data; + + cancel_work_sync(&res->wait_work); + cancel_delayed_work_sync(&res->rm_work); + + sprd_pms_destroy(res->pms); + sipa_rm_delete_dependency(res->cons_id, res->prod_id); + sipa_rm_delete_resource(res->prod_id); + kfree(res); +} + diff --git a/package/wwan/driver/quectel_SRPD_PCIE/src/pcie/pcie_sipa_res.h b/package/wwan/driver/quectel_SRPD_PCIE/src/pcie/pcie_sipa_res.h new file mode 100644 index 000000000..144bcc5b5 --- /dev/null +++ b/package/wwan/driver/quectel_SRPD_PCIE/src/pcie/pcie_sipa_res.h @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2018-2019 Unisoc Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#ifndef PCIE_SIPA_RES_H +#define PCIE_SIPA_RES_H + +#include "../include/sipa.h" + +/* + * pcie_sipa_res_create - create pcie res for sipa module. + * @prod_id: which res is the producer. + * @cons_id: which res is the consumer. + * + * Returns: + * failed, return NULL, + * succ, return a void * pointer. + */ +void *pcie_sipa_res_create(u8 dst, enum sipa_rm_res_id prod_id, + enum sipa_rm_res_id cons_id); + +/* + * pcie_sipa_res_destroy -detroy pcie res for sipa module + * @res_id: the return point of call function pcie_sipa_res_create. + */ +void pcie_sipa_res_destroy(void *res); +#endif + diff --git a/package/wwan/driver/quectel_SRPD_PCIE/src/pcie/sprd_pcie_ep_device.c b/package/wwan/driver/quectel_SRPD_PCIE/src/pcie/sprd_pcie_ep_device.c new file mode 100644 index 000000000..05275c6f9 --- /dev/null +++ b/package/wwan/driver/quectel_SRPD_PCIE/src/pcie/sprd_pcie_ep_device.c @@ -0,0 +1,1663 @@ +/** + * SPRD ep device driver in host side for Spreadtrum SoCs + * + * Copyright (C) 2018 Spreadtrum Co., Ltd. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 of + * the License as published by the Free Software Foundation. + * + * This program is used to control ep device driver in host side for + * Spreadtrum SoCs. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "../include/pcie-rc-sprd.h" +#include "../include/sprd_pcie_ep_device.h" +#include "../include/sipc_big_to_little.h" +#include "../include/sprd_pcie_resource.h" +#include "../include/sipc.h" +#include "../include/sipa.h" +#include "../sipa/sipa_core.h" + +#define DRV_MODULE_NAME "sprd-pcie-ep-device" +#define CONFIG_SPRD_IPA_PCIE_WORKROUND + +enum dev_pci_barno { + BAR_0 = 0, + BAR_1, + BAR_2, + BAR_3, + BAR_4, + BAR_5, + BAR_CNT +}; + +#define MAX_SUPPORT_IRQ 32 +#define IPA_HW_IRQ_CNT 4 +#define IPA_HW_IRQ_BASE 16 +#define IPA_HW_IRQ_BASE_DEFECT 0 + +#define REQUEST_BASE_IRQ (IPA_HW_IRQ_BASE + IPA_HW_IRQ_CNT) +#define REQUEST_BASE_IRQ_DEFECT 16 + +#ifdef CONFIG_SPRD_IPA_PCIE_WORKROUND +/* the bar0 and the bar1 are used for ipa */ +#define IPA_MEM_BAR BAR_0 +#define IPA_REG_BAR BAR_1 +#define BAR_MIN BAR_2 +#else +#define BAR_MIN BAR_0 +#endif + +/* the bar4 and the bar5 are specail bars */ +#define BAR_MAX BAR_4 + +#define PCI_VENDOR_ID_SPRD 0x16c3 +#define PCI_DEVICE_ID_SPRD_ORCA 0xabcd +#define PCI_CLASS_ID_SPRD_ORCA 0x80d00 + +/* Parameters for the waiting for iATU enabled routine */ +#define LINK_WAIT_MAX_IATU_RETRIES 5 +#define LINK_WAIT_IATU_MIN 9000 +#define LINK_WAIT_IATU_MAX 10000 + +/* ep config bar bar4 , can config ep iatu reg and door bell reg */ +#define EP_CFG_BAR BAR_4 +#define DOOR_BELL_BASE 0x00000 +#define IATU_REG_BASE 0x10000 + +#define DOOR_BELL_ENABLE 0x10 +#define DOOR_BELL_STATUS 0x14 + +/* used 0x18 & 0x1c to save the smem base & size. */ +#define DOOR_BELL_SMEMBASE 0x18 +#define DOOR_BELL_SMEMSIZE 0x1C + +/* one bit can indicate one irq , if stauts[i] & enable[i] , irq = i */ +#define DOOR_BELL_IRQ_VALUE(irq) BIT((irq)) +#define DOOR_BELL_IRQ_CNT 32 +#define IATU_MAX_REGION 8 +#define PCIE_ATU_VIEWPORT 0x900 +#define PCIE_ATU_CR1 0x904 +#define PCIE_ATU_CR2 0x908 +#define PCIE_ATU_LOWER_BASE 0x90c +#define PCIE_ATU_UPPER_BASE 0x910 +#define PCIE_ATU_LIMIT 0x914 +#define PCIE_ATU_LOWER_TARGET 0x918 +#define PCIE_ATU_UPPER_TARGET 0x91c + +#define PCIE_ATU_REGION_INBOUND BIT(31) +#define PCIE_ATU_ENABLE BIT(31) +#define PCIE_ATU_BAR_MODE_ENABLE BIT(30) +#define PCIE_ATU_TYPE_MEM 0x0 + +#define PCIE_ATU_UNR_REGION_CTRL1 0x00 +#define PCIE_ATU_UNR_REGION_CTRL2 0x04 +#define PCIE_ATU_UNR_LOWER_BASE 0x08 +#define PCIE_ATU_UNR_UPPER_BASE 0x0c +#define PCIE_ATU_UNR_LIMIT 0x10 +#define PCIE_ATU_UNR_LOWER_TARGET 0x14 +#define PCIE_ATU_UNR_UPPER_TARGET 0x18 + +/* bar4 + 0x10000 has map to ep base + 0x18000 ((0x3 << 15)) */ +#define PCIE_ATU_IB_REGION(region) (((region) << 9) | (0x1 << 8)) +#define PCIE_ATU_OB_REGION(region) ((region) << 9) + +#define PCIE_SAVE_REGION_NUM (IATU_MAX_REGION * 2) +#define PCIE_SAVE_REG_NUM 8 + +#define PCIE_LEGACY_CLEAR_BASE 0x29000000 +#define PCIE_LEGACY_CLEAR_REG 0x2c +#define PCIE_LEGACY_CLEAR_MASK BIT(0) +#define BIT_SET_OFFSET 0x1000 +#define BIT_CLR_OFFSET 0x2000 + +struct sprd_ep_dev_notify { + void (*notify)(int event, void *data); + void *data; +}; + +struct sprd_pci_ep_dev { + struct pci_dev *pdev; + void __iomem *cfg_base; /* ep config bar base in rc */ + spinlock_t irq_lock; /* irq spinlock */ + spinlock_t bar_lock; /* bar spinlock */ + spinlock_t set_irq_lock; /* set irq spinlock */ + spinlock_t set_bar_lock; /* set bar spinlock */ + unsigned long bar_res; + + u32 base_irq; + u32 ipa_base_irq; + u32 bak_irq_status; + u8 iatu_unroll_enabled; + u8 ep; + u8 irq_cnt; + bool no_msi; + bool need_backup; + void __iomem *legacy_addr; + + struct resource *bar[BAR_CNT]; + void __iomem *bar_vir[BAR_MAX]; + void __iomem *cpu_vir[BAR_MAX]; + dma_addr_t src_addr[BAR_MAX]; + dma_addr_t target_addr[BAR_MAX]; + size_t map_size[BAR_MAX]; + int event; + struct work_struct notify_work; +}; + +struct sprd_pci_ep_dev_save { + bool save_succ; + unsigned long bar_res; + void __iomem *bar_vir[BAR_MAX]; + void __iomem *cpu_vir[BAR_MAX]; + dma_addr_t src_addr[BAR_MAX]; + dma_addr_t target_addr[BAR_MAX]; + size_t map_size[BAR_MAX]; + u32 doorbell_enable; + u32 doorbell_status; + void __iomem *cfg_base; + u32 save_reg[PCIE_SAVE_REGION_NUM][PCIE_SAVE_REG_NUM]; +}; + +#define VERSION_NUMBER "V1.1.6" +#define QUECTEL_SPRD_PCIE_VERSION "Quectel_Linux&Android_SPRD_PCIE_Driver_"VERSION_NUMBER + +static void __iomem *g_irq_addr[PCIE_EP_NR]; +static struct sprd_pci_ep_dev_save g_ep_save[PCIE_EP_NR]; +static struct sprd_pci_ep_dev *g_ep_dev[PCIE_EP_NR]; +static irq_handler_t ep_dev_handler[PCIE_EP_NR][PCIE_MSI_MAX_IRQ]; +static void *ep_dev_handler_data[PCIE_EP_NR][PCIE_MSI_MAX_IRQ]; +static struct sprd_ep_dev_notify g_ep_dev_notify[PCIE_EP_NR]; + +static int sprd_ep_dev_get_bar(int ep); +static int sprd_ep_dev_put_bar(int ep, int bar); +static int sprd_ep_dev_adjust_region(struct sprd_pci_ep_dev *ep_dev, + int bar, dma_addr_t *cpu_addr_ptr, + size_t *size_ptr, dma_addr_t *offset_ptr); +static int sprd_ep_dev_just_map_bar(struct sprd_pci_ep_dev *ep_dev, int bar, + dma_addr_t cpu_addr, size_t size); +static int sprd_ep_dev_just_unmap_bar(struct sprd_pci_ep_dev *ep_dev, int bar); +static void __iomem *sprd_ep_dev_map_bar(int ep, int bar, + dma_addr_t cpu_addr, + size_t size); +static int sprd_ep_dev_unmap_bar(int ep, int bar); +static void sprd_pci_ep_dev_backup(struct sprd_pci_ep_dev *ep_dev); + +static void sprd_pcie_iommu_init(struct device *dev) +{ + struct iommu_domain *domain = NULL; + unsigned long pg_size; + dma_addr_t start, end, addr; + u32 base, size; + int ret = 0; + + domain = iommu_get_domain_for_dev(dev); + if(!domain) { + dev_info(dev, "sprd_pcie_iommu_init domian null"); + return; + } + + pg_size = 1UL << __ffs(domain->pgsize_bitmap); + smem_get_area(SIPC_ID_MINIAP, &base, &size); + start = ALIGN(base, pg_size); + end = ALIGN(base + size, pg_size); + + for (addr = start; addr < end; addr += pg_size) { + phys_addr_t phys_addr; + + phys_addr = iommu_iova_to_phys(domain, addr); + if (phys_addr) { + dev_info(dev, "sprd_pcie_iommu_init iova:%d have been used", (u32)addr); + continue; + } + ret = iommu_map(domain, addr, addr, pg_size, IOMMU_READ | IOMMU_WRITE); + if (ret) { + dev_info(dev, "sprd_pcie_iommu_init iommu_map failed"); + return; + } + } +} + + + +#if (LINUX_VERSION_CODE < KERNEL_VERSION( 3,11,0 )) +int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec) +{ + int nvec = maxvec; + int rc; + + if (maxvec < minvec) + return -ERANGE; + + do { + rc = pci_enable_msi_block(dev, nvec); + if (rc < 0) { + return rc; + } else if (rc > 0) { + if (rc < minvec) + return -ENOSPC; + nvec = rc; + } + } while (rc); + + return nvec; +} +#endif + +int sprd_ep_dev_register_notify(int ep, + void (*notify)(int event, void *data), + void *data) +{ + struct sprd_ep_dev_notify *dev_notify; + + if (ep >= PCIE_EP_NR) + return -EINVAL; + + dev_notify = &g_ep_dev_notify[ep]; + dev_notify->notify = notify; + dev_notify->data = data; + + return 0; +} +EXPORT_SYMBOL_GPL(sprd_ep_dev_register_notify); + +int sprd_ep_dev_unregister_notify(int ep) +{ + struct sprd_ep_dev_notify *notify; + + if (ep >= PCIE_EP_NR) + return -EINVAL; + + notify = &g_ep_dev_notify[ep]; + notify->notify = NULL; + notify->data = NULL; + + return 0; +} +EXPORT_SYMBOL_GPL(sprd_ep_dev_unregister_notify); + +int sprd_ep_dev_set_irq_addr(int ep, void __iomem *irq_addr) +{ + if (ep >= PCIE_EP_NR) + return -EINVAL; + + g_irq_addr[ep] = irq_addr; + + return 0; +} +EXPORT_SYMBOL_GPL(sprd_ep_dev_set_irq_addr); + +int sprd_ep_dev_register_irq_handler(int ep, int irq, + irq_handler_t handler, void *data) +{ + struct sprd_pci_ep_dev *ep_dev; + + if (ep >= PCIE_EP_NR || irq >= PCIE_MSI_MAX_IRQ) + return -EINVAL; + + ep_dev_handler[ep][irq] = handler; + ep_dev_handler_data[ep][irq] = data; + ep_dev = g_ep_dev[ep]; + + if (handler && ep_dev && + (BIT(irq) & ep_dev->bak_irq_status)) { + ep_dev->bak_irq_status &= ~BIT(irq); + handler(irq, data); + } + + return 0; +} +EXPORT_SYMBOL_GPL(sprd_ep_dev_register_irq_handler); + +int sprd_ep_dev_unregister_irq_handler(int ep, int irq) +{ + if (ep < PCIE_EP_NR && irq < PCIE_MSI_MAX_IRQ) { + ep_dev_handler[ep][irq] = NULL; + ep_dev_handler_data[ep][irq] = NULL; + return 0; + } + + return -EINVAL; +} +EXPORT_SYMBOL_GPL(sprd_ep_dev_unregister_irq_handler); + +int sprd_ep_dev_register_irq_handler_ex(int ep, + int from_irq, + int to_irq, + irq_handler_t handler, + void *data) +{ + int i, ret; + + for (i = from_irq; i < to_irq + 1; i++) { + ret = sprd_ep_dev_register_irq_handler(ep, + i, handler, data); + if (ret) + return ret; + } + + return 0; +} + +int sprd_ep_dev_unregister_irq_handler_ex(int ep, + int from_irq, + int to_irq) +{ + int i, ret; + + for (i = from_irq; i < to_irq + 1; i++) { + ret = sprd_ep_dev_unregister_irq_handler(ep, i); + if (ret) + return ret; + } + + return 0; +} + +void __iomem *sprd_ep_map_memory(int ep, + phys_addr_t cpu_addr, + size_t size) +{ + int bar; + void __iomem *bar_addr; + + bar = sprd_ep_dev_get_bar(ep); + if (bar < 0) { + pr_err("%s: get bar err = %d\n", __func__, bar); + return NULL; + } + + bar_addr = sprd_ep_dev_map_bar(ep, bar, cpu_addr, size); + if (!bar_addr) { + pr_err("%s: map bar = %d err!\n", __func__, bar); + sprd_ep_dev_put_bar(ep, bar); + return NULL; + } + + return bar_addr; +} +EXPORT_SYMBOL_GPL(sprd_ep_map_memory); + +void sprd_ep_unmap_memory(int ep, const void __iomem *bar_addr) +{ + int bar; + struct sprd_pci_ep_dev *ep_dev; + + if (ep >= PCIE_EP_NR || !g_ep_dev[ep]) + return; + + ep_dev = g_ep_dev[ep]; + + for (bar = 0; bar < BAR_MAX; bar++) { + if (bar_addr == ep_dev->cpu_vir[bar]) { + sprd_ep_dev_unmap_bar(ep, bar); + sprd_ep_dev_put_bar(ep, bar); + break; + } + } +} +EXPORT_SYMBOL_GPL(sprd_ep_unmap_memory); + +#ifdef CONFIG_SPRD_SIPA +phys_addr_t sprd_ep_ipa_map(int type, phys_addr_t target_addr, size_t size) +{ + int bar, ep = PCIE_EP_MODEM; + dma_addr_t offset; + struct sprd_pci_ep_dev *ep_dev; + struct pci_dev *pdev; + struct device *dev; + struct resource *res; + + ep_dev = g_ep_dev[ep]; + if (!ep_dev) + return 0; + + ep_dev = g_ep_dev[ep]; + pdev = ep_dev->pdev; + dev = &pdev->dev; +#ifdef CONFIG_SPRD_IPA_PCIE_WORKROUND + bar = type == PCIE_IPA_TYPE_MEM ? IPA_MEM_BAR : IPA_REG_BAR; +#else + bar = sprd_ep_dev_get_bar(ep); + if (bar < 0) { + dev_err(dev, "ep: ipa map, get bar err = %d\n", bar); + return 0; + } +#endif + res = &pdev->resource[bar]; + + dev_info(dev, "ep: ipa map type=%d, addr=0x%lx, size=0x%lx\n", + type, + (unsigned long)target_addr, + (unsigned long)size); + + /* 1st, adjust the map region */ + if (sprd_ep_dev_adjust_region(ep_dev, bar, &target_addr, + &size, &offset)) + return 0; + + /* than, map bar */ + if (sprd_ep_dev_just_map_bar(ep_dev, bar, target_addr, size)) + return 0; + + /* save for unmap */ + ep_dev->src_addr[bar] = res->start + offset; + ep_dev->target_addr[bar] = target_addr; + ep_dev->map_size[bar] = size; + + /* return the cpu phy address */ + return res->start + offset; +} + +int sprd_ep_ipa_unmap(int type, const phys_addr_t cpu_addr) +{ + int bar, ep = PCIE_EP_MODEM; + bool find_bar = false; + struct sprd_pci_ep_dev *ep_dev; + struct pci_dev *pdev; + struct resource *res; + + ep_dev = g_ep_dev[ep]; + if (!ep_dev) + return -EINVAL; + + pdev = ep_dev->pdev; + res = &pdev->resource[bar]; + + dev_info(&pdev->dev, "ep: ipa unmap cpu_addr=0x%lx\n", + (unsigned long)cpu_addr); + +#ifdef CONFIG_SPRD_IPA_PCIE_WORKROUND + bar = type == PCIE_IPA_TYPE_MEM ? IPA_MEM_BAR : IPA_REG_BAR; + if (ep_dev->src_addr[bar] == cpu_addr) + find_bar = true; +#else + for (bar = 0; bar < BAR_MAX; bar++) { + if (cpu_addr == ep_dev->src_addr[bar]) { + find_bar = true; + break; + } + } +#endif + + if (!find_bar) { + dev_err(&pdev->dev, "ep: ipa unmap can't find bar!"); + return -EINVAL; + } + + ep_dev->target_addr[bar] = 0; + ep_dev->target_addr[bar] = 0; + ep_dev->map_size[bar] = 0; + return sprd_ep_dev_just_unmap_bar(ep_dev, bar); +} +#endif + +int sprd_ep_dev_raise_irq(int ep, int irq) +{ + struct pci_dev *pdev; + struct device *dev; + struct sprd_pci_ep_dev *ep_dev; + void __iomem *base; + u32 value; + + if (ep >= PCIE_EP_NR || !g_ep_dev[ep]) + return -ENODEV; + + ep_dev = g_ep_dev[ep]; + pdev = ep_dev->pdev; + dev = &pdev->dev; + + dev_dbg(dev, "ep: raise, ep=%d, irq=%d\n", ep, irq); + + if (irq >= DOOR_BELL_IRQ_CNT) { + dev_err(&pdev->dev, "raise err, irq=%d\n", irq); + return -EINVAL; + } + + spin_lock(&ep_dev->set_irq_lock); + base = ep_dev->cfg_base + DOOR_BELL_BASE; + value = readl_relaxed(base + DOOR_BELL_STATUS); + writel_relaxed(value | DOOR_BELL_IRQ_VALUE(irq), + base + DOOR_BELL_STATUS); + spin_unlock(&ep_dev->set_irq_lock); + + return 0; +} +EXPORT_SYMBOL_GPL(sprd_ep_dev_raise_irq); + +int sprd_ep_dev_clear_doolbell_irq(int ep, int irq) +{ + struct pci_dev *pdev; + struct device *dev; + struct sprd_pci_ep_dev *ep_dev; + void __iomem *base; + u32 value; + + if (ep >= PCIE_EP_NR || !g_ep_dev[ep]) + return -ENODEV; + + ep_dev = g_ep_dev[ep]; + pdev = ep_dev->pdev; + dev = &pdev->dev; + + dev_dbg(dev, "ep: clear doorbell, ep=%d, irq=%d\n", ep, irq); + + if (irq >= DOOR_BELL_IRQ_CNT) + return -EINVAL; + + spin_lock(&ep_dev->set_irq_lock); + base = ep_dev->cfg_base + DOOR_BELL_BASE; + value = readl_relaxed(base + DOOR_BELL_STATUS); + if (value & DOOR_BELL_IRQ_VALUE(irq)) + writel_relaxed(value & (~DOOR_BELL_IRQ_VALUE(irq)), + base + DOOR_BELL_STATUS); + spin_unlock(&ep_dev->set_irq_lock); + + return 0; +} + +int sprd_ep_dev_set_backup(int ep) +{ + struct sprd_pci_ep_dev *ep_dev; + + if (ep >= PCIE_EP_NR || !g_ep_dev[ep]) + return -ENODEV; + + ep_dev = g_ep_dev[ep]; + ep_dev->need_backup = true; + + /* backup once immediately. */ + sprd_pci_ep_dev_backup(ep_dev); + + return 0; +} + +int sprd_ep_dev_clear_backup(int ep) +{ + struct sprd_pci_ep_dev *ep_dev; + + if (ep >= PCIE_EP_NR || !g_ep_dev[ep]) + return -ENODEV; + + ep_dev = g_ep_dev[ep]; + ep_dev->need_backup = false; + + return 0; +} + +int sprd_ep_dev_pass_smem(int ep, u32 base, u32 size) +{ + struct sprd_pci_ep_dev *ep_dev; + void __iomem *reg_base; + struct pci_dev *pdev; + struct device *dev; + + if (ep >= PCIE_EP_NR || !g_ep_dev[ep]) + return -ENODEV; + + ep_dev = g_ep_dev[ep]; + pdev = ep_dev->pdev; + dev = &pdev->dev; + dev_info(&pdev->dev, + "pass_smem, base=0x%x,size=0x%x\n", + base, size); + + reg_base = ep_dev->cfg_base + DOOR_BELL_BASE; + + writel_relaxed(base, reg_base + DOOR_BELL_SMEMBASE); + writel_relaxed(size, reg_base + DOOR_BELL_SMEMSIZE); + + return 0; +} + +static inline u32 sprd_pci_ep_iatu_readl(struct sprd_pci_ep_dev *ep_dev, + u32 offset) +{ + return readl_relaxed(ep_dev->cfg_base + IATU_REG_BASE + offset); +} + +static inline void sprd_pci_ep_iatu_writel(struct sprd_pci_ep_dev *ep_dev, + u32 offset, u32 value) +{ + writel_relaxed(value, ep_dev->cfg_base + IATU_REG_BASE + offset); +} + +static int sprd_ep_dev_get_bar(int ep) +{ + int bar; + int ret = -EBUSY; + struct sprd_pci_ep_dev *ep_dev; + + if (ep >= PCIE_EP_NR || !g_ep_dev[ep]) + return -ENODEV; + + ep_dev = g_ep_dev[ep]; + spin_lock(&ep_dev->bar_lock); + for (bar = BAR_MIN; bar < BAR_MAX; bar++) { + if (ep_dev->bar[bar] && !test_bit(bar, &ep_dev->bar_res)) { + set_bit(bar, &ep_dev->bar_res); + ret = bar; + break; + } + } + spin_unlock(&ep_dev->bar_lock); + + return ret; +} + +static int sprd_ep_dev_put_bar(int ep, int bar) +{ + int ret = -ENODEV; + struct sprd_pci_ep_dev *ep_dev; + + if (ep >= PCIE_EP_NR || !g_ep_dev[ep]) + return -ENODEV; + + ep_dev = g_ep_dev[ep]; + spin_lock(&ep_dev->bar_lock); + if (test_and_clear_bit(bar, &ep_dev->bar_res)) + ret = bar; + spin_unlock(&ep_dev->bar_lock); + + return ret; +} + +static int sprd_ep_dev_unr_set_bar(struct sprd_pci_ep_dev *ep_dev, + int bar, + dma_addr_t cpu_addr, size_t size) +{ + u32 retries, val; + struct pci_dev *pdev = ep_dev->pdev; + + spin_lock(&ep_dev->set_bar_lock); + + /* bar n use region n to map, map to bar match mode */ + sprd_pci_ep_iatu_writel(ep_dev, + PCIE_ATU_IB_REGION(bar) + + PCIE_ATU_UNR_LOWER_TARGET, + lower_32_bits(cpu_addr)); + sprd_pci_ep_iatu_writel(ep_dev, + PCIE_ATU_IB_REGION(bar) + + PCIE_ATU_UNR_UPPER_TARGET, + upper_32_bits(cpu_addr)); + + sprd_pci_ep_iatu_writel(ep_dev, + PCIE_ATU_IB_REGION(bar) + + PCIE_ATU_UNR_REGION_CTRL1, + PCIE_ATU_TYPE_MEM); + sprd_pci_ep_iatu_writel(ep_dev, + PCIE_ATU_IB_REGION(bar) + + PCIE_ATU_UNR_REGION_CTRL2, + PCIE_ATU_ENABLE | + PCIE_ATU_BAR_MODE_ENABLE | (bar << 8)); + + spin_unlock(&ep_dev->set_bar_lock); + + /* + * Make sure ATU enable takes effect before any subsequent config + * and I/O accesses. + */ + for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { + val = sprd_pci_ep_iatu_readl(ep_dev, + PCIE_ATU_IB_REGION(bar) + + PCIE_ATU_UNR_REGION_CTRL2); + if (val & PCIE_ATU_ENABLE) + return 0; + + dev_dbg(&pdev->dev, + "ep: unr set bar[%d], var = 0x%x\n", + bar, + val); + /* wait a moment for polling ep atu enable bit */ + usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX); + } + + return -EINVAL; +} + +static int sprd_ep_dev_unr_clear_bar(struct sprd_pci_ep_dev *ep_dev, int bar) +{ + struct pci_dev *pdev = ep_dev->pdev; + + dev_dbg(&pdev->dev, "ep: unr clear map bar=%d\n", bar); + + spin_lock(&ep_dev->set_bar_lock); + + sprd_pci_ep_iatu_writel(ep_dev, + PCIE_ATU_IB_REGION(bar) + + PCIE_ATU_UNR_REGION_CTRL2, + (u32)(~PCIE_ATU_ENABLE)); + spin_unlock(&ep_dev->set_bar_lock); + + return 0; +} + +static int sprd_ep_dev_adjust_region(struct sprd_pci_ep_dev *ep_dev, int bar, + dma_addr_t *cpu_addr_ptr, + size_t *size_ptr, + dma_addr_t *offset_ptr) +{ + dma_addr_t cpu_addr, base, offset; + resource_size_t bar_size, size; + struct pci_dev *pdev = ep_dev->pdev; + struct resource *res = &pdev->resource[bar]; + + size = (resource_size_t)*size_ptr; + cpu_addr = *cpu_addr_ptr; + bar_size = resource_size(res); + + /* size must align with page */ + size = PAGE_ALIGN(size); + + /* base must be divisible by bar size for bar match mode */ + base = cpu_addr / bar_size * bar_size; + offset = cpu_addr - base; + size += PAGE_ALIGN(offset); + + /* size must < bar size */ + if (size > bar_size) { + dev_err(&pdev->dev, + "bar[%d]:size=0x%lx > 0x%lx\n", + bar, + (unsigned long)size, + (unsigned long)bar_size); + return -EINVAL; + } + + dev_dbg(&pdev->dev, + "bar[%d]: base=0x%lx,size=0x%lx,offset=0x%lx\n", + bar, (unsigned long)base, + (unsigned long)size, + (unsigned long)offset); + + *size_ptr = (size_t)size; + *offset_ptr = offset; + *cpu_addr_ptr = base; + + return 0; +} + +static int sprd_ep_dev_just_map_bar(struct sprd_pci_ep_dev *ep_dev, int bar, + dma_addr_t cpu_addr, size_t size) +{ + u32 retries, val; + struct pci_dev *pdev; + struct device *dev; + + pdev = ep_dev->pdev; + dev = &pdev->dev; + + dev_dbg(dev, "ep: map bar=%d, addr=0x%lx, size=0x%lx\n", + bar, + (unsigned long)cpu_addr, + (unsigned long)size); + + if (ep_dev->iatu_unroll_enabled) + return sprd_ep_dev_unr_set_bar(ep_dev, bar, cpu_addr, size); + + spin_lock(&ep_dev->set_bar_lock); + + /* bar n use region n to map, map to bar match mode */ + sprd_pci_ep_iatu_writel(ep_dev, + PCIE_ATU_VIEWPORT, + PCIE_ATU_REGION_INBOUND | bar); + sprd_pci_ep_iatu_writel(ep_dev, PCIE_ATU_LOWER_TARGET, + lower_32_bits(cpu_addr)); + sprd_pci_ep_iatu_writel(ep_dev, PCIE_ATU_UPPER_TARGET, + upper_32_bits(cpu_addr)); + sprd_pci_ep_iatu_writel(ep_dev, + PCIE_ATU_CR1, + PCIE_ATU_TYPE_MEM); + sprd_pci_ep_iatu_writel(ep_dev, + PCIE_ATU_CR2, + PCIE_ATU_ENABLE | + PCIE_ATU_BAR_MODE_ENABLE | (bar << 8)); + + spin_unlock(&ep_dev->set_bar_lock); + + /* + * Make sure ATU enable takes effect + * before any subsequent config and I/O accesses. + */ + for (retries = 0; + retries < LINK_WAIT_MAX_IATU_RETRIES; + retries++) { + val = sprd_pci_ep_iatu_readl(ep_dev, PCIE_ATU_CR2); + if (val & PCIE_ATU_ENABLE) + return 0; + + /* wait a moment for polling ep atu enable bit */ + usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX); + } + + return -EINVAL; +} + +static int sprd_ep_dev_just_unmap_bar(struct sprd_pci_ep_dev *ep_dev, int bar) +{ + struct pci_dev *pdev; + struct device *dev; + + pdev = ep_dev->pdev; + dev = &pdev->dev; + + dev_dbg(dev, "ep: unmap bar = %d\n", bar); + + if (ep_dev->iatu_unroll_enabled) + return sprd_ep_dev_unr_clear_bar(ep_dev, bar); + + spin_lock(&ep_dev->set_bar_lock); + + sprd_pci_ep_iatu_writel(ep_dev, PCIE_ATU_VIEWPORT, + PCIE_ATU_REGION_INBOUND | bar); + sprd_pci_ep_iatu_writel(ep_dev, PCIE_ATU_CR2, + (u32)(~PCIE_ATU_ENABLE)); + + spin_unlock(&ep_dev->set_bar_lock); + + return 0; +} + +static void __iomem *sprd_ep_dev_map_bar(int ep, int bar, + dma_addr_t cpu_addr, size_t size) +{ + resource_size_t offset; + struct pci_dev *pdev; + struct device *dev; + struct sprd_pci_ep_dev *ep_dev; + void __iomem *bar_vir; + struct resource *res; + + if (ep >= PCIE_EP_NR || !g_ep_dev[ep]) + return NULL; + + ep_dev = g_ep_dev[ep]; + pdev = ep_dev->pdev; + dev = &pdev->dev; + + /* bar is be used */ + if (ep_dev->bar_vir[bar]) { + dev_err(dev, "ep: bar[%d] is used!", bar); + return NULL; + } + + /* 1st, adjust the map region */ + if (sprd_ep_dev_adjust_region(ep_dev, bar, &cpu_addr, &size, &offset)) + return NULL; + + /* than, ioremap, if map failed, no need to set bar */ + res = &pdev->resource[bar]; +#ifndef ioremap_nocache +#define ioremap_nocache ioremap +#endif + bar_vir = ioremap_nocache(res->start, size); + if (!bar_vir) { + dev_err(dev, "ep: map error, bar=%d, addr=0x%lx, size=0x%lx\n", + bar, + (unsigned long)cpu_addr, + (unsigned long)size); + return NULL; + } + + if (sprd_ep_dev_just_map_bar(ep_dev, bar, cpu_addr, size)) { + dev_err(dev, "ep: map bar =%d\n", bar); + iounmap(ep_dev->bar_vir[bar]); + return NULL; + } + + ep_dev->bar_vir[bar] = (void __iomem *)bar_vir; + ep_dev->cpu_vir[bar] = (void __iomem *)(bar_vir + offset); + ep_dev->src_addr[bar] = res->start + offset; + ep_dev->target_addr[bar] = cpu_addr; + ep_dev->map_size[bar] = size; + + return ep_dev->cpu_vir[bar]; +} + +static int sprd_ep_dev_unmap_bar(int ep, int bar) +{ + struct pci_dev *pdev; + struct device *dev; + struct sprd_pci_ep_dev *ep_dev; + + if (ep >= PCIE_EP_NR || !g_ep_dev[ep]) + return -ENODEV; + + ep_dev = g_ep_dev[ep]; + pdev = ep_dev->pdev; + dev = &pdev->dev; + + dev_info(dev, "ep: unmap bar = %d\n", bar); + + if (!ep_dev->bar_vir[bar]) + return -ENODEV; + + sprd_ep_dev_just_unmap_bar(ep_dev, bar); + + iounmap(ep_dev->bar_vir[bar]); + ep_dev->bar_vir[bar] = NULL; + ep_dev->cpu_vir[bar] = NULL; + ep_dev->src_addr[bar] = 0; + ep_dev->target_addr[bar] = 0; + ep_dev->map_size[bar] = 0; + + return 0; +} + +static void sprd_pci_ep_dev_clear_legacy_irq(struct sprd_pci_ep_dev *ep_dev) +{ + if (!ep_dev->legacy_addr) + return; + + writel_relaxed(PCIE_LEGACY_CLEAR_MASK, + ep_dev->legacy_addr + BIT_CLR_OFFSET + PCIE_LEGACY_CLEAR_REG); +} + +static irqreturn_t sprd_pci_ep_dev_irqhandler(int irq, void *dev_ptr) +{ + struct sprd_pci_ep_dev *ep_dev = dev_ptr; + struct pci_dev *pdev = ep_dev->pdev; + struct device *dev = &pdev->dev; + irq_handler_t handler; + u32 i, j, value; + int ipa_irq; + + if (ep_dev->no_msi) { + /* clear irq */ + sprd_pci_ep_dev_clear_legacy_irq(ep_dev); + + /* workaroud for IPA */ + handler = ep_dev_handler[ep_dev->ep][PCIE_MSI_IPA]; + if (handler) + handler(irq, ep_dev_handler_data[ep_dev->ep][PCIE_MSI_IPA]); + + + value = BL_READL(g_irq_addr[ep_dev->ep]); + dev_dbg(dev, "ep: irq handler. irq = 0x%x, base=%d\n", value, ep_dev->base_irq); + for (i = 0; i < 32; i++) { + if (value & BIT(i)) { + /* clear iqr bit*/ + value = BL_READL(g_irq_addr[ep_dev->ep]); + value &= ~(BIT(i)); + BL_WRITEL(value,g_irq_addr[ep_dev->ep]); + j = i - ep_dev->base_irq; + if (j >= PCIE_MSI_MAX_IRQ) + continue; + + handler = ep_dev_handler[ep_dev->ep][j]; + if (handler) + handler(irq, ep_dev_handler_data[ep_dev->ep][j]); + else + ep_dev->bak_irq_status |= BIT(j); + } + } + return IRQ_HANDLED; + } + + dev_dbg(dev, "ep: irq handler. irq = %d\n", irq); + /* for ipa hw irq. */ + ipa_irq = irq - (pdev->irq + ep_dev->ipa_base_irq); + if (ipa_irq >= 0 && ipa_irq < IPA_HW_IRQ_CNT) { + handler = ep_dev_handler[ep_dev->ep][PCIE_MSI_IPA]; + if (handler) + handler(ipa_irq, ep_dev_handler_data[ep_dev->ep][PCIE_MSI_IPA]); + else + ep_dev->bak_irq_status |= BIT(PCIE_MSI_IPA); + + return IRQ_HANDLED; + } + + + irq -= (pdev->irq + ep_dev->base_irq); + if (irq >= PCIE_MSI_MAX_IRQ || irq < 0) { + dev_err(dev, "ep: error, irq = %d", irq); + return IRQ_HANDLED; + } + + handler = ep_dev_handler[ep_dev->ep][irq]; + if (handler) + handler(irq, ep_dev_handler_data[ep_dev->ep][irq]); + else + ep_dev->bak_irq_status |= BIT(irq); + + return IRQ_HANDLED; +} + +static void sprd_pci_ep_save_reg(struct sprd_pci_ep_dev *ep_dev) +{ + int i, j; + u32 (*save_reg)[PCIE_SAVE_REG_NUM]; + static struct sprd_pci_ep_dev_save *ep_save; + + ep_save = &g_ep_save[ep_dev->ep]; + save_reg = ep_save->save_reg; + + for (i = 0; i < PCIE_SAVE_REGION_NUM; i += 2) { + for (j = 0; j < PCIE_SAVE_REG_NUM; j++) { + save_reg[i][j] = + sprd_pci_ep_iatu_readl(ep_dev, + PCIE_ATU_OB_REGION(i) + + j * sizeof(u32)); + save_reg[i + 1][j] = + sprd_pci_ep_iatu_readl(ep_dev, + PCIE_ATU_IB_REGION(i) + + j * sizeof(u32)); + } + } + + ep_save->doorbell_enable = sprd_pci_ep_iatu_readl(ep_dev, + DOOR_BELL_BASE + + DOOR_BELL_ENABLE); + ep_save->doorbell_status = sprd_pci_ep_iatu_readl(ep_dev, + DOOR_BELL_BASE + + DOOR_BELL_STATUS); + ep_save->cfg_base = ep_dev->cfg_base; + + ep_save->save_succ = true; +} + +static void sprd_pci_ep_dev_backup(struct sprd_pci_ep_dev *ep_dev) +{ + struct pci_dev *pdev = ep_dev->pdev; + struct device *dev = &pdev->dev; + struct sprd_pci_ep_dev_save *ep_save; + int i; + + ep_save = &g_ep_save[ep_dev->ep]; + + /* save some member */ + ep_save->bar_res = ep_dev->bar_res; + for (i = 0; i < BAR_MAX; i++) { + if (!ep_dev->src_addr[i]) + continue; + + dev_info(dev, "ep: backup bar=%d, addr=0x%lx, size=0x%lx\n", + i, + (unsigned long)ep_save->target_addr[i], + (unsigned long)ep_save->map_size[i]); + + ep_save->bar_vir[i] = ep_dev->bar_vir[i]; + ep_save->cpu_vir[i] = ep_dev->cpu_vir[i]; + ep_save->src_addr[i] = ep_dev->src_addr[i]; + ep_save->target_addr[i] = ep_dev->target_addr[i]; + ep_save->map_size[i] = ep_dev->map_size[i]; + } + + /* save ep reg */ + sprd_pci_ep_save_reg(ep_dev); +} + +static void sprd_pci_ep_dev_restore(struct sprd_pci_ep_dev *ep_dev) +{ + struct pci_dev *pdev = ep_dev->pdev; + struct device *dev = &pdev->dev; + struct sprd_pci_ep_dev_save *ep_save; + int i; + + ep_save = &g_ep_save[ep_dev->ep]; + + /* save some member */ + ep_dev->bar_res = ep_save->bar_res; + for (i = 0; i < BAR_MAX; i++) { + if (!ep_save->src_addr[i]) + continue; + + ep_dev->bar_vir[i] = ep_save->bar_vir[i]; + ep_dev->cpu_vir[i] = ep_save->cpu_vir[i]; + ep_dev->src_addr[i] = ep_save->src_addr[i]; + ep_dev->target_addr[i] = ep_save->target_addr[i]; + ep_dev->map_size[i] = ep_save->map_size[i]; + + dev_info(dev, "ep: restore bar=%d, addr=0x%lx, size=0x%lx\n", + i, + (unsigned long)ep_dev->target_addr[i], + (unsigned long)ep_dev->map_size[i]); + + if (sprd_ep_dev_just_map_bar(ep_dev, + i, + ep_dev->target_addr[i], + ep_dev->map_size[i])) + dev_err(dev, "ep: restore map err i = %d.\n", i); + } +} + +static void sprd_pci_ep_notify_fn(struct work_struct *work) +{ + struct sprd_ep_dev_notify *notify; + struct sprd_pci_ep_dev *ep_dev = container_of(work, + struct sprd_pci_ep_dev, + notify_work); + + notify = &g_ep_dev_notify[ep_dev->ep]; + if (notify->notify) + notify->notify(ep_dev->event, notify->data); +} + +static int sprd_pci_ep_dev_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + int i, err, irq_cnt = 0; + u32 val; + enum dev_pci_barno bar; + struct device *dev = &pdev->dev; + struct sprd_pci_ep_dev *ep_dev; + struct resource *res; + + dev_info(dev, "ep: probe\n"); + + sprd_pcie_iommu_init(dev); + + ep_dev = devm_kzalloc(dev, sizeof(*ep_dev), GFP_KERNEL); + if (!ep_dev) + return -ENOMEM; + + ep_dev->pdev = pdev; + + if (ent->device == PCI_DEVICE_ID_SPRD_ORCA) + ep_dev->ep = PCIE_EP_MODEM; + else { + dev_err(dev, "ep: Cannot support ep device = 0x%x\n", + ent->device); + return -EINVAL; + } + + err = pci_enable_device(pdev); + if (err) { + dev_err(dev, "ep: Cannot enable PCI device\n"); + return err; + } + + err = pci_request_regions(pdev, DRV_MODULE_NAME); + if (err) { + dev_err(dev, "ep: Cannot obtain PCI resources\n"); + goto err_disable_pdev; + } + + pci_set_master(pdev); + +#ifdef PCI_IRQ_MSI + irq_cnt = pci_alloc_irq_vectors(pdev, + 1, + MAX_SUPPORT_IRQ, + PCI_IRQ_MSI); +#else + irq_cnt = pci_enable_msi_range(pdev, 1, MAX_SUPPORT_IRQ); +#endif + + if (sprd_pcie_is_defective_chip()) { + ep_dev->base_irq = REQUEST_BASE_IRQ_DEFECT; + ep_dev->ipa_base_irq = IPA_HW_IRQ_BASE_DEFECT; + } else { + ep_dev->base_irq = REQUEST_BASE_IRQ; + ep_dev->ipa_base_irq = IPA_HW_IRQ_BASE; + } + + for (bar = BAR_0; bar <= BAR_5; bar++) { + res = &pdev->resource[bar]; + dev_info(dev, "ep: BAR[%d] %pR\n", bar, res); + /* only save mem bar */ + if (resource_type(res) == IORESOURCE_MEM) + ep_dev->bar[bar] = res; + } + + ep_dev->cfg_base = pci_ioremap_bar(pdev, EP_CFG_BAR); + if (!ep_dev->cfg_base) { + dev_err(dev, "ep: failed to map cfg bar.\n"); + err = -ENOMEM; + goto err_disable_msi; + } + + /* clear all 32 bit door bell */ + writel_relaxed(0x0, + ep_dev->cfg_base + DOOR_BELL_BASE + DOOR_BELL_STATUS); + + pci_set_drvdata(pdev, ep_dev); + pci_read_config_dword(ep_dev->pdev, PCIE_ATU_VIEWPORT, &val); + /* + * this atu view port reg is 0xffffffff means that the ep device + * doesn't support atu view port, we need unroll iatu registers + */ + dev_info(dev, "ep: atu_view_port val = 0x%x", val); + ep_dev->iatu_unroll_enabled = val == 0xffffffff; + + /* default , PCIE_EP_PROBE */ + ep_dev->event = PCIE_EP_PROBE; + g_ep_dev[ep_dev->ep] = ep_dev; + + if (!ep_dev->bar[BAR_1] || !ep_dev->bar[BAR_3]) { + /* only 2 bar, set PCIE_EP_PROBE_BEFORE_SPLIT_BAR */ + ep_dev->event = PCIE_EP_PROBE_BEFORE_SPLIT_BAR; + dev_info(dev, "ep:bar not ready, wait the next probe!"); + } + + /* restore all the config */ + if (ep_dev->event == PCIE_EP_PROBE) + sprd_pci_ep_dev_restore(ep_dev); + + /* start notify work */ + INIT_WORK(&ep_dev->notify_work, sprd_pci_ep_notify_fn); + schedule_work(&ep_dev->notify_work); + + if (irq_cnt < MAX_SUPPORT_IRQ) { + err = irq_cnt < 0 ? irq_cnt : -EINVAL; + ep_dev->no_msi = true; + dev_info(dev, "ep: failed to get MSI, err=%d, irq=%d\n", err, pdev->irq); + + ep_dev->legacy_addr = sprd_ep_map_memory(ep_dev->ep, PCIE_LEGACY_CLEAR_BASE, 0x4000); + /* request legacy irq */ + err = devm_request_irq(dev, pdev->irq, sprd_pci_ep_dev_irqhandler, + IRQF_SHARED, DRV_MODULE_NAME, ep_dev); + if (err) + dev_warn(dev, + "ep: failed to request legacy %d\n", + pdev->irq); + + } else { + ep_dev->irq_cnt = irq_cnt; + dev_info(dev, "ep: request IRQ = %d, cnt =%d\n", + pdev->irq, + ep_dev->irq_cnt); + + /* request msi irq */ + for (i = ep_dev->base_irq; + i < ep_dev->base_irq + PCIE_MSI_MAX_IRQ; + i++) { + err = devm_request_irq(dev, pdev->irq + i, + sprd_pci_ep_dev_irqhandler, + IRQF_SHARED, DRV_MODULE_NAME, ep_dev); + if (err) + dev_warn(dev, + "ep: failed to request IRQ %d for MSI %d\n", + pdev->irq + i, i + 1); + } + + #ifdef CONFIG_SPRD_IPA_PCIE_WORKROUND + for (i = ep_dev->ipa_base_irq; + i < ep_dev->ipa_base_irq + IPA_HW_IRQ_CNT; + i++) { + err = devm_request_irq(dev, pdev->irq + i, + sprd_pci_ep_dev_irqhandler, + IRQF_SHARED, DRV_MODULE_NAME, + ep_dev); + if (!err) + sprd_pcie_teardown_msi_irq(pdev->irq + i); + } + #endif + } + +#ifndef SPRD_PCIE_USE_DTS + sipa_module_init(dev); + sipa_eth_init(); + sipa_dummy_init(); +#endif + + return 0; + +err_disable_msi: + pci_disable_msi(pdev); + pci_release_regions(pdev); + +err_disable_pdev: + pci_disable_device(pdev); + + return err; +} + +static void sprd_pci_ep_dev_remove(struct pci_dev *pdev) +{ + u32 i; + struct sprd_ep_dev_notify *notify; + struct sprd_pci_ep_dev *ep_dev = pci_get_drvdata(pdev); + struct sipa_core *ctrl = sipa_get_ctrl_pointer(); + dev_info(&pdev->dev, "ep: remove\n"); + ctrl->remote_ready = false; + spipe_device_down(); + spool_device_down(); + + cancel_work_sync(&ep_dev->notify_work); + + /* first notify PCIE_EP_REMOVE */ + notify = &g_ep_dev_notify[ep_dev->ep]; + if (notify->notify) + notify->notify(PCIE_EP_REMOVE, notify->data); + + /* back up some config before remove */ + if (ep_dev->need_backup) + sprd_pci_ep_dev_backup(ep_dev); + + if (ep_dev->no_msi) { + devm_free_irq(&pdev->dev, pdev->irq, ep_dev); + } else { + for (i = ep_dev->base_irq; i < ep_dev->base_irq + PCIE_MSI_MAX_IRQ; i++) + devm_free_irq(&pdev->dev, pdev->irq + i, ep_dev); + + #ifdef CONFIG_SPRD_IPA_PCIE_WORKROUND + for (i = ep_dev->ipa_base_irq; + i < ep_dev->ipa_base_irq + IPA_HW_IRQ_CNT; + i++) + devm_free_irq(&pdev->dev, pdev->irq + i, ep_dev); + #endif + } + + pci_disable_msi(pdev); + +#ifndef SPRD_PCIE_USE_DTS + sipa_dummy_exit(); + sipa_eth_exit(); + sipa_module_exit(); +#endif + + if (ep_dev->legacy_addr) { + sprd_ep_unmap_memory(ep_dev->ep, ep_dev->legacy_addr); + ep_dev->legacy_addr = NULL; + dev_info(&ep_dev->pdev->dev, "set ep_dev->legacy_addr = %lx\n", (long)ep_dev->legacy_addr); + } + + if (ep_dev->cfg_base) { + iounmap(ep_dev->cfg_base); + ep_dev->cfg_base = NULL; + } + + pci_release_regions(pdev); + pci_disable_device(pdev); + + g_ep_dev[ep_dev->ep] = NULL; + ep_dev->bar_res = 0; +} + +static const struct pci_device_id sprd_pci_ep_dev_tbl[] = { + { PCI_DEVICE(PCI_VENDOR_ID_SPRD, PCI_DEVICE_ID_SPRD_ORCA) }, + { } +}; +MODULE_DEVICE_TABLE(pci, sprd_pci_ep_dev_tbl); + +#ifdef CONFIG_PM_SLEEP +static int sprd_pci_ep_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + int rc; + + dev_info(dev, "suspend\n"); + + /* Exec pci PCI_D3cold one time */ + if (pdev->current_state != PCI_D0) { + dev_info(dev, "done for pm %d\n", pdev->current_state); + return 0; + } + + /* + * TODO: The HAL will ask the shared memory layer whether D3 is allowed. + */ + + /* Save the PCI configuration space of a device before suspending. */ + rc = pci_save_state(pdev); + if (rc) { + dev_err(dev, "pci_save_state error=%d\n", rc); + return rc; + } + + /* Set the power state of a PCI device. + * Transition a device to a new power state, using the device's PCI PM + * registers. + */ + rc = pci_set_power_state(pdev, PCI_D3cold); + if (rc) { + dev_err(dev, "pci_set_power_state error=%d\n", rc); + return rc; + } + return 0; +} + +static int sprd_pci_ep_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + int rc; + + dev_info(dev, "resume\n"); + + /* Set the power state of a PCI device. */ + rc = pci_set_power_state(pdev, PCI_D0); + if (rc) { + dev_err(dev, "pci_set_power_state error=%d\n", rc); + return rc; + } + + /* Restore the saved state of a PCI device. */ + pci_restore_state(pdev); + + /* TODO: The HAL shall inform that the device is active. */ + return 0; +} +#endif /* CONFIG_PM_SLEEP */ + +static const struct dev_pm_ops sprd_pci_ep_pm = { + SET_SYSTEM_SLEEP_PM_OPS(sprd_pci_ep_suspend, + sprd_pci_ep_resume) +}; + +static struct pci_driver sprd_pci_ep_dev_driver = { + .name = DRV_MODULE_NAME, + .id_table = sprd_pci_ep_dev_tbl, + .probe = sprd_pci_ep_dev_probe, + .remove = sprd_pci_ep_dev_remove, + .driver = { + .pm = &sprd_pci_ep_pm, + } +}; +//module_pci_driver(sprd_pci_ep_dev_driver); + +#if defined(CONFIG_DEBUG_FS) +static void sprd_pci_ep_dev_save_show(struct seq_file *m, + struct sprd_pci_ep_dev_save *ep_save, + int ep) +{ + u32 i; + + seq_printf(m, "ep-save-%d configs:\n", ep); + seq_printf(m, "bar_res = 0x%lx\n", ep_save->bar_res); + + for (i = 0; i < BAR_MAX; i++) { + seq_printf(m, "src_addr[%d] = 0x%lx\n", + i, + (unsigned long)ep_save->src_addr[i]); + seq_printf(m, "target_addr[%d] = 0x%lx\n", + i, + (unsigned long)ep_save->target_addr[i]); + seq_printf(m, "map_size[%d] = 0x%lx\n", + i, + (unsigned long)ep_save->map_size[i]); + } +} + +static void sprd_pci_ep_dev_config_show(struct seq_file *m, + struct sprd_pci_ep_dev *ep_dev) +{ + u32 i; + void __iomem *base; + + seq_printf(m, "ep-%d configs:\n", ep_dev->ep); + + /* doorbell regs */ + seq_puts(m, "door bell regs:\n"); + base = ep_dev->cfg_base + DOOR_BELL_BASE; + + seq_printf(m, "irq_enable = 0x%08x\n irq_status = 0x%08x\n", + readl_relaxed(base + DOOR_BELL_ENABLE), + readl_relaxed(base + DOOR_BELL_STATUS)); + + /* iatu reg regs */ + seq_puts(m, "iatu regs reg:\n"); + for (i = 0; i < IATU_MAX_REGION * 2; i++) { + base = ep_dev->cfg_base + IATU_REG_BASE + i * 100; + seq_printf(m, "IATU[%d]:\n", i); + seq_printf(m, "0x%p: 0x%08x 0x%08x 0x%08x 0x%08x\n", + base, + readl_relaxed(base + 0x0), + readl_relaxed(base + 0x4), + readl_relaxed(base + 0x8), + readl_relaxed(base + 0xc)); + base += 0x10; + seq_printf(m, "0x%p: 0x%08x 0x%08x 0x%08x 0x%08x\n", + base, + readl_relaxed(base + 0x0), + readl_relaxed(base + 0x4), + readl_relaxed(base + 0x8), + readl_relaxed(base + 0x10)); + } +} + +static void sprd_pci_ep_dev_backup_show(struct seq_file *m, + struct sprd_pci_ep_dev_save *ep_save, + int ep) +{ + int i; + u32 (*save_reg)[PCIE_SAVE_REG_NUM]; + void __iomem *base; + + save_reg = ep_save->save_reg; + + seq_printf(m, "ep-%d backup configs:\n", ep); + + /* doorbell regs */ + seq_puts(m, "door bell regs:\n"); + seq_printf(m, "irq_enable = 0x%08x\n irq_status = 0x%08x\n", + ep_save->doorbell_enable, + ep_save->doorbell_status); + + /* iatu reg regs */ + seq_puts(m, "iatu regs reg:\n"); + for (i = 0; i < PCIE_SAVE_REGION_NUM; i++) { + seq_printf(m, "IATU[%d]:\n", i); + base = ep_save->cfg_base + IATU_REG_BASE + i * 100; + + seq_printf(m, "0x%p: 0x%08x 0x%08x 0x%08x 0x%08x\n", + base, + save_reg[i][0], + save_reg[i][1], + save_reg[i][2], + save_reg[i][3]); + base += 0x10; + seq_printf(m, "0x%p: 0x%08x 0x%08x 0x%08x 0x%08x\n", + base, + save_reg[i][4], + save_reg[i][5], + save_reg[i][6], + save_reg[i][7]); + } +} + +static int sprd_pci_ep_dev_show(struct seq_file *m, void *unused) +{ + u32 i; + struct sprd_pci_ep_dev *ep_dev; + struct sprd_pci_ep_dev_save *ep_save; + + for (i = 0; i < PCIE_EP_NR; i++) { + /* ep_save configus */ + ep_save = &g_ep_save[i]; + ep_dev = g_ep_dev[i]; + + if (!ep_dev && !ep_save->save_succ) + continue; + + if (ep_save) + sprd_pci_ep_dev_save_show(m, ep_save, i); + + if (ep_dev) + sprd_pci_ep_dev_config_show(m, ep_dev); + else + sprd_pci_ep_dev_backup_show(m, ep_save, i); + } + + return 0; +} + +static int sprd_pci_ep_dev_open(struct inode *inode, struct file *file) +{ + return single_open(file, sprd_pci_ep_dev_show, NULL); +} + +static const struct file_operations sprd_pci_ep_dev_fops = { + .owner = THIS_MODULE, + .open = sprd_pci_ep_dev_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static struct dentry *g_ep_debugfs_root; + +static int sprd_pci_ep_dev_init_debugfs(void) +{ + struct dentry *g_ep_debugfs_root = debugfs_create_dir("ep_dev", NULL); + + if (!g_ep_debugfs_root) + return -ENXIO; + + debugfs_create_file("ep", 0444, + g_ep_debugfs_root, + NULL, &sprd_pci_ep_dev_fops); + return 0; +} + +static void sprd_pci_ep_dev_remove_debugfs(void) +{ + debugfs_remove_recursive(g_ep_debugfs_root); +} +#endif + +static int __init sprd_pci_ep_dev_init(void) +{ + pr_info("%s %s\n", __func__, QUECTEL_SPRD_PCIE_VERSION); +#if defined(CONFIG_DEBUG_FS) + sprd_pci_ep_dev_init_debugfs(); + sipc_init_debugfs(); +#endif + sipc_init(); + spipe_init(); + spool_init(); + modem_power_manager_init(); + return pci_register_driver(&sprd_pci_ep_dev_driver); +} + +static void __exit sprd_pci_ep_dev_exit(void) +{ + spipe_exit(); + spool_exit(); + modem_power_manager_exit(); + pci_unregister_driver(&sprd_pci_ep_dev_driver); +#if defined(CONFIG_DEBUG_FS) + sprd_pci_ep_dev_remove_debugfs(); +#endif + sipc_exit(); +} + +module_init(sprd_pci_ep_dev_init); +module_exit(sprd_pci_ep_dev_exit); + + +MODULE_DESCRIPTION("SPRD PCI EP DEVICE HOST DRIVER"); +MODULE_AUTHOR("Wenping Zhou "); +MODULE_LICENSE("GPL v2"); +MODULE_VERSION(QUECTEL_SPRD_PCIE_VERSION); diff --git a/package/wwan/driver/quectel_SRPD_PCIE/src/pcie/sprd_pcie_quirks.c b/package/wwan/driver/quectel_SRPD_PCIE/src/pcie/sprd_pcie_quirks.c new file mode 100644 index 000000000..70a3692f2 --- /dev/null +++ b/package/wwan/driver/quectel_SRPD_PCIE/src/pcie/sprd_pcie_quirks.c @@ -0,0 +1,126 @@ +/* + * This file contains work-arounds for many known PCI hardware + * bugs. Devices present only on certain architectures (host + * bridges et cetera) should be handled in arch-specific code. + * + * Note: any quirks for hotpluggable devices must _NOT_ be declared __init. + * + * Copyright (c) 1999 Martin Mares + * + * Init/reset quirks for USB host controllers should be in the + * USB quirks file, where their drivers can access reuse it. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if (LINUX_VERSION_CODE < KERNEL_VERSION( 5,4,0 )) +#include +#endif +#include +#include +#include +#include +#include /* isa_dma_bridge_buggy */ + +#ifndef PCI_VENDOR_ID_SYNOPSYS +#define PCI_VENDOR_ID_SYNOPSYS 0x16c3 +#endif + + + /* + * It's possible that ep bar size is larger than rc allocated + * memory, so need to resize ep bar to small size. + * Original ep bar size:bar0:256MB, bar1:64kb, bar2:256MB, + * bar3: 64kb, bar4:256MB, bar5:64kb. + * resize to bar0:8MB, bar1:64kb, bar2:2MB, bar3: 64kb, + * bar4:2MB, bar5:64kb. + */ +#define SPRD_PCI_BAR0 0x10 +#define SPRD_BAR_NUM 0x6 +#define SPRD_PCI_MISC_CTRL1_OFF 0x8bc +#define SPRD_PCI_DBI_RO_WR_EN (0x1 << 0) +#define SPRD_PCI_RESIZABLE_BAR_EXTENDED_CAP_HEADER 0x260 +#define SPRD_PCI_RESIZABLE_BAR_EXTENDED_CAPID 0x15 +/* Resizable BAR Capability Register */ +#define SPRD_PCI_RESIZABLE_BAR0 0x264 +#define SPRD_PCI_RESIZABLE_BAR2 0x26c +#define SPRD_PCI_RESIZABLE_BAR4 0x274 +#define SPRD_BAR_SUPPORT_2MB (0x1 << 5) +#define SPRD_BAR_SUPPORT_4MB (0x1 << 6) +#define SPRD_BAR_SUPPORT_8MB (0x1 << 7) +/* Resizable BAR Control Register */ +#define SPRD_PCI_RESIZABLE_BAR0_CTL 0x268 +#define SPRD_PCI_RESIZABLE_BAR2_CTL 0x270 +#define SPRD_PCI_RESIZABLE_BAR4_CTL 0x278 +/* bit[13:8] is bar size */ +#define SPRD_PCI_RESIZABLE_BAR_SIZE_MASK 0x3F00 +#define SPRD_PCI_RESIZABLE_2MB (0x1 << 8) +#define SPRD_PCI_RESIZABLE_4MB (0x2 << 8) +#define SPRD_PCI_RESIZABLE_8MB (0x3 << 8) +#define SIZE(val) ((~(val & 0xFFFFFFF0)) + 1) + +static void quirk_sprd_pci_resizebar(struct pci_dev *dev) +{ + u32 val, i, backup; + + pci_read_config_dword(dev, + SPRD_PCI_RESIZABLE_BAR_EXTENDED_CAP_HEADER, &val); + if ((val & SPRD_PCI_RESIZABLE_BAR_EXTENDED_CAPID) != + SPRD_PCI_RESIZABLE_BAR_EXTENDED_CAPID) { + dev_info(&dev->dev, "%s: not support resize bar\n", __func__); + return; + } + + pci_read_config_dword(dev, SPRD_PCI_MISC_CTRL1_OFF, &val); + val |= SPRD_PCI_DBI_RO_WR_EN; + pci_write_config_dword(dev, SPRD_PCI_MISC_CTRL1_OFF, val); + + pci_read_config_dword(dev, SPRD_PCI_RESIZABLE_BAR0, &val); + pci_write_config_dword(dev, SPRD_PCI_RESIZABLE_BAR0, + val | SPRD_BAR_SUPPORT_4MB | + SPRD_BAR_SUPPORT_8MB); + pci_read_config_dword(dev, SPRD_PCI_RESIZABLE_BAR2, &val); + pci_write_config_dword(dev, SPRD_PCI_RESIZABLE_BAR2, + val | SPRD_BAR_SUPPORT_4MB | + SPRD_BAR_SUPPORT_8MB); + pci_read_config_dword(dev, SPRD_PCI_RESIZABLE_BAR4, &val); + pci_write_config_dword(dev, SPRD_PCI_RESIZABLE_BAR4, + val | SPRD_BAR_SUPPORT_4MB | + SPRD_BAR_SUPPORT_8MB); + + pci_read_config_dword(dev, SPRD_PCI_MISC_CTRL1_OFF, &val); + val &= ~SPRD_PCI_DBI_RO_WR_EN; + pci_write_config_dword(dev, SPRD_PCI_MISC_CTRL1_OFF, val); + + pci_read_config_dword(dev, SPRD_PCI_RESIZABLE_BAR0_CTL, &val); + pci_write_config_dword(dev, SPRD_PCI_RESIZABLE_BAR0_CTL, + (val & (~SPRD_PCI_RESIZABLE_BAR_SIZE_MASK)) | + SPRD_PCI_RESIZABLE_4MB); + pci_read_config_dword(dev, SPRD_PCI_RESIZABLE_BAR2_CTL, &val); + pci_write_config_dword(dev, SPRD_PCI_RESIZABLE_BAR2_CTL, + (val & (~SPRD_PCI_RESIZABLE_BAR_SIZE_MASK)) | + SPRD_PCI_RESIZABLE_4MB); + pci_read_config_dword(dev, SPRD_PCI_RESIZABLE_BAR4_CTL, &val); + pci_write_config_dword(dev, SPRD_PCI_RESIZABLE_BAR4_CTL, + (val & (~SPRD_PCI_RESIZABLE_BAR_SIZE_MASK)) | + SPRD_PCI_RESIZABLE_4MB); + + for (i = 0; i < SPRD_BAR_NUM; i++) { + pci_read_config_dword(dev, SPRD_PCI_BAR0 + i * 4, &backup); + pci_write_config_dword(dev, SPRD_PCI_BAR0 + i * 4, 0xFFFFFFFF); + pci_read_config_dword(dev, SPRD_PCI_BAR0 + i * 4, &val); + pci_write_config_dword(dev, SPRD_PCI_BAR0 + i * 4, backup); + + dev_info(&dev->dev, "%s: bar%d size 0x%x\n", + __func__, i, SIZE(val)); + } +} +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SYNOPSYS, 0xabcd, quirk_sprd_pci_resizebar); \ No newline at end of file diff --git a/package/wwan/driver/quectel_SRPD_PCIE/src/power_manager/Makefile b/package/wwan/driver/quectel_SRPD_PCIE/src/power_manager/Makefile new file mode 100644 index 000000000..6d5c49962 --- /dev/null +++ b/package/wwan/driver/quectel_SRPD_PCIE/src/power_manager/Makefile @@ -0,0 +1 @@ +obj-y += power_manager.o diff --git a/package/wwan/driver/quectel_SRPD_PCIE/src/power_manager/power_manager.c b/package/wwan/driver/quectel_SRPD_PCIE/src/power_manager/power_manager.c new file mode 100644 index 000000000..077c31ef9 --- /dev/null +++ b/package/wwan/driver/quectel_SRPD_PCIE/src/power_manager/power_manager.c @@ -0,0 +1,964 @@ +/* + * Copyright (C) 2019 Spreadtrum Communications Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../include/sprd_mpm.h" +#include "../include/sipc.h" +/* + * The data struct of modem power manager. + */ +struct sprd_mpm_data { +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 4,18,0 )) + struct wakeup_source *ws; +#else + struct wakeup_source ws; +#endif + struct list_head pms_list; + struct timer_list timer; + spinlock_t mpm_lock; + char name[20]; + const char *last_name; + unsigned int dst; + unsigned int up_cnt; + unsigned int awake_cnt; + unsigned int wakelock_cnt; + unsigned int mpm_state; + unsigned long expires; + unsigned int later_idle; + + /* resource ops functions */ + int (*wait_resource)(unsigned int dst, int timeout); + int (*request_resource)(unsigned int dst); + int (*release_resource)(unsigned int dst); + + struct work_struct release_res_work; + struct work_struct request_res_work; +}; + +/* + * Save all the instance of mpm in here. + */ +static struct sprd_mpm_data *g_sprd_mpm[SIPC_ID_NR]; + +/** + * sprd_mpm_print_awake + * print the wake up list to known who prevent system sleep. + */ +static void sprd_mpm_print_awake(struct sprd_mpm_data *mpm) +{ + struct sprd_pms *pms; + char *awake_info; + int len = 0, max_len = 512; + + awake_info = kmalloc(max_len, GFP_KERNEL); + if (!awake_info) + return; + + /* print pms list */ + list_for_each_entry(pms, &mpm->pms_list, entry) { + if (!pms->awake && pms->pre_awake_cnt == pms->awake_cnt) + continue; + + pms->pre_awake_cnt = pms->awake_cnt; + snprintf(awake_info + len, + max_len - len, + "%s is awake, awake_cnt = %d\n", + pms->name, + pms->awake_cnt); + len = strlen(awake_info); + } + + if (len) + pr_info("mpm: %s\n", awake_info); + + kfree(awake_info); +} + +/** + * sprd_mpm_pm_event + * monitor the PM_SUSPEND_PREPARE event. + */ +static int sprd_mpm_pm_event(struct notifier_block *notifier, + unsigned long pm_event, void *unused) +{ + unsigned int i; + struct sprd_mpm_data *cur; + + switch (pm_event) { + case PM_SUSPEND_PREPARE: + case PM_POST_SUSPEND: + /* check if has wake lock. */ + for (i = 0; i < SIPC_ID_NR; i++) { + if (!g_sprd_mpm[i]) + continue; + + cur = g_sprd_mpm[i]; + sprd_mpm_print_awake(cur); + } + break; + + default: + break; + } + + return NOTIFY_DONE; +} + +/* + * The pm event notify data, for the register pm notifier. + */ +static struct notifier_block sprd_mpm_notifier_block = { + .notifier_call = sprd_mpm_pm_event, +}; + +/** + * sprd_mpm_request_resource + * request resource. + */ +static void sprd_mpm_request_resource(struct sprd_mpm_data *mpm) +{ + if (mpm->request_resource) + schedule_work(&mpm->request_res_work); +} + +/** + * sprd_mpm_release_resource + * release resource. + */ +static void sprd_mpm_release_resource(struct sprd_mpm_data *mpm) +{ + if (mpm->release_resource) + schedule_work(&mpm->release_res_work); +} + +/** + * sprd_mpm_wait_resource -wait resource. + */ +static int sprd_mpm_wait_resource(struct sprd_mpm_data *mpm, int timeout) +{ + int ret = 0; + + if (mpm->wait_resource) { + ret = mpm->wait_resource(mpm->dst, timeout); + if (ret < 0 && ret != -ERESTARTSYS && timeout) + pr_err("mpm: %s wait resource, ret=%d, timeout=%d.\n", + mpm->name, ret, timeout); + } + + return ret; +} + +/** + * sprd_mpm_active + * set the state to busy. + */ +static void sprd_mpm_active(struct sprd_mpm_data *mpm) +{ + pr_debug("mpm: %s active, set state to busy.\n", mpm->name); + + mpm->mpm_state = SPRD_MPM_BUSY; + sprd_mpm_request_resource(mpm); +} + +/** + * sprd_mpm_deactive + * del the idle timer, + * set the state to idle. + */ +static void sprd_mpm_deactive(struct sprd_mpm_data *mpm) +{ + pr_debug("mpm: %s deactive, set state to idle.\n", mpm->name); + + mpm->mpm_state = SPRD_MPM_IDLE; + mpm->expires = 0; + sprd_mpm_release_resource(mpm); +} + +/** + * sprd_mpm_start_deactive + * start the deactive timer. + */ +static void sprd_mpm_start_deactive(struct sprd_mpm_data *mpm) +{ + pr_debug("mpm: %s start deactive.\n", mpm->name); + + mpm->expires = jiffies + msecs_to_jiffies(mpm->later_idle); + if (!mpm->expires) + mpm->expires = 1; + + mod_timer(&mpm->timer, mpm->expires); +} + +/** + * sprd_mpm_request_res_work_fn + * do release resource call in here. + */ +static void sprd_mpm_request_res_work_fn(struct work_struct *work) +{ + struct sprd_mpm_data *mpm = container_of(work, struct sprd_mpm_data, + request_res_work); + int ret; + + pr_debug("mpm: %s request res work.\n", mpm->name); + + ret = mpm->request_resource(mpm->dst); + if (ret) + pr_err("mpm: %s request res, ret = %d.\n", mpm->name, ret); +} + +/** + * sprd_mpm_release_res_work_fn + * do relase resource call in here + */ +static void sprd_mpm_release_res_work_fn(struct work_struct *work) +{ + struct sprd_mpm_data *mpm = container_of(work, struct sprd_mpm_data, + release_res_work); + int ret; + + pr_debug("mpm: %s releae res work.\n", mpm->name); + + ret = mpm->release_resource(mpm->dst); + if (ret) + pr_err("mpm: %s request res, ret = %d.\n", mpm->name, ret); +} + +/** + * sprd_mpm_deactive_timer_fn + * in a period of time (mpm->later_idle), + * have no modem resource request, + * we consider that it doesn't need modem resource, + * than set the state to idle. + */ +static void sprd_mpm_deactive_timer_fn( +#if (LINUX_VERSION_CODE <= KERNEL_VERSION( 4,10,0 )) + unsigned long data) +{ + struct sprd_mpm_data *mpm = (struct sprd_mpm_data *)data; +#else + struct timer_list *t) +{ + struct sprd_mpm_data *mpm = from_timer(mpm, t, timer); +#endif + + unsigned long flags; + + pr_debug("mpm: %s deactive timer.\n", mpm->name); + + spin_lock_irqsave(&mpm->mpm_lock, flags); + /* expires is 0, means the timer has been cancelled. */ + if (mpm->expires) + sprd_mpm_deactive(mpm); + spin_unlock_irqrestore(&mpm->mpm_lock, flags); +} + +/** + * sprd_pms_cancel_timer + * cancel the pms wakelock timer. + */ +static void sprd_pms_cancel_timer(struct sprd_pms *pms) +{ + unsigned long flags; + bool print = false; + + spin_lock_irqsave(&pms->expires_lock, flags); + if (pms->expires) { + print = true; + pms->expires = 0; + del_timer(&pms->wake_timer); + } + spin_unlock_irqrestore(&pms->expires_lock, flags); + + if (print) + pr_debug("pms: %s del timer.\n", pms->name); + +} + +/** + * sprd_mpm_cancel_timer + * cancel the deactive timer. + */ +static void sprd_mpm_cancel_timer(struct sprd_mpm_data *mpm) +{ + if (mpm->expires) { + pr_debug("mpm: %s del timer.\n", mpm->name); + + mpm->expires = 0; + del_timer(&mpm->timer); + } +} + +/** + * sprd_mpm_up + * modem power manger power up. + */ +static void sprd_mpm_up(struct sprd_mpm_data *mpm, const char *name) +{ + unsigned long flags; + + spin_lock_irqsave(&mpm->mpm_lock, flags); + + /* first cancel deactive timer */ + sprd_mpm_cancel_timer(mpm); + mpm->last_name = name; + + mpm->up_cnt++; + /* when up_cnt is change form 0 to 1, ready active pms. + * Although the cnt is 0, but later down, the state may is still busy, + * so here must see whether the mpm state is idle. + */ + if (mpm->up_cnt == 1 && + mpm->mpm_state == SPRD_MPM_IDLE) + sprd_mpm_active(mpm); + + spin_unlock_irqrestore(&mpm->mpm_lock, flags); + + pr_debug("mpm: %s up, up_cnt=%d.\n", mpm->name, mpm->up_cnt); +} + +/** + * sprd_mpm_down + * modem power manger power down. + */ +static void sprd_mpm_down(struct sprd_mpm_data *mpm, bool immediately) +{ + unsigned long flags; + + /* + * when up_cnt count is change form 1 to 0, + * start deactive pms. + */ + spin_lock_irqsave(&mpm->mpm_lock, flags); + mpm->up_cnt--; + if (!mpm->up_cnt) { + if (mpm->later_idle && !immediately) + sprd_mpm_start_deactive(mpm); + else + sprd_mpm_deactive(mpm); + } + spin_unlock_irqrestore(&mpm->mpm_lock, flags); + + pr_debug("mpm: %s down, up_cnt=%d.\n", mpm->name, mpm->up_cnt); +} + +/** + * sprd_mpm_stay_awake + * modem power manager stay awake. + */ +static void sprd_mpm_stay_awake(struct sprd_mpm_data *mpm) +{ + unsigned long flags; + + /* + * when wakelock_cnt is change form 0 to 1, + * get the system wake lock. + */ + spin_lock_irqsave(&mpm->mpm_lock, flags); + mpm->wakelock_cnt++; + if (mpm->wakelock_cnt == 1) { + mpm->awake_cnt++; +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 4,18,0 )) + __pm_stay_awake(mpm->ws); +#else + __pm_stay_awake(&mpm->ws); +#endif + } + spin_unlock_irqrestore(&mpm->mpm_lock, flags); + + pr_debug("mpm: %s wake, wake_cnt=%d\n", + mpm->name, mpm->wakelock_cnt); +} + +/** + * sprd_mpm_relax + * modem power manager relax wakelock. + */ +static void sprd_mpm_relax(struct sprd_mpm_data *mpm) +{ + unsigned long flags; + + /* + * when wakelock_cnt is change form 0 to 1, + * release the system wake lock. + */ + spin_lock_irqsave(&mpm->mpm_lock, flags); + mpm->wakelock_cnt--; + if (!mpm->wakelock_cnt) +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 4,18,0 )) + __pm_relax(mpm->ws); +#else + __pm_relax(&mpm->ws); +#endif + spin_unlock_irqrestore(&mpm->mpm_lock, flags); + + pr_debug("mpm: %s relax wake, wake_cnt=%d\n", + mpm->name, mpm->wakelock_cnt); +} + +/** + * sprd_pms_do_up_single + * do pms power up. + */ +static void sprd_pms_do_up_single(struct sprd_pms *pms) +{ + struct sprd_mpm_data *mpm = (struct sprd_mpm_data *)pms->data; + + /* + * when active_cnt is change form 0 to 1, mpm up. + */ + pms->active_cnt++; + if (pms->active_cnt == 1) + sprd_mpm_up(mpm, pms->name); + + pr_debug("pms: %s up, active_cnt=%d.\n", + pms->name, pms->active_cnt); +} + +/** + * sprd_pms_do_up_multi + * do pms power up. + */ +static void sprd_pms_do_up_multi(struct sprd_pms *pms) +{ + struct sprd_mpm_data *mpm = (struct sprd_mpm_data *)pms->data; + unsigned long flags; + bool active = false; + + /* + * when active_cnt is change form 0 to 1, mpm up. + */ + spin_lock_irqsave(&pms->active_lock, flags); + + pms->active_cnt++; + if (pms->active_cnt == 1) + active = true; + + spin_unlock_irqrestore(&pms->active_lock, flags); + + pr_debug("pms: %s up, active_cnt=%d.\n", + pms->name, pms->active_cnt); + + if (active) + sprd_mpm_up(mpm, pms->name); +} + +static void sprd_pms_do_up(struct sprd_pms *pms) +{ + if (pms->multitask) + sprd_pms_do_up_multi(pms); + else + sprd_pms_do_up_single(pms); +} + +/** + * sprd_pms_do_down_single + * do pms power down. + */ +static void sprd_pms_do_down_single(struct sprd_pms *pms, bool immediately) +{ + struct sprd_mpm_data *mpm = (struct sprd_mpm_data *)pms->data; + /* + * when active_cnt is change form 1 to 0, mpm down. + */ + if (pms->active_cnt > 0) { + pms->active_cnt--; + if (pms->active_cnt == 0) + sprd_mpm_down(mpm, immediately); + } + + pr_debug("pms: %s down, active_cnt=%d.\n", + pms->name, pms->active_cnt); +} + +/** + * sprd_pms_do_down + * do pms power down. + */ +static void sprd_pms_do_down_multi(struct sprd_pms *pms, bool immediately) +{ + struct sprd_mpm_data *mpm = (struct sprd_mpm_data *)pms->data; + unsigned long flags; + bool deactive = false; + + /* + * when active_cnt is change form 1 to 0, mpm down. + */ + spin_lock_irqsave(&pms->active_lock, flags); + + if (pms->active_cnt > 0) { + pms->active_cnt--; + if (pms->active_cnt == 0) + deactive = true; + } + + spin_unlock_irqrestore(&pms->active_lock, flags); + + pr_debug("pms: %s down, active_cnt=%d.\n", + pms->name, pms->active_cnt); + + if (deactive) + sprd_mpm_down(mpm, immediately); +} + +static void sprd_pms_do_down(struct sprd_pms *pms, bool immediately) +{ + if (pms->multitask) + sprd_pms_do_down_multi(pms, immediately); + else + sprd_pms_do_down_single(pms, immediately); +} + +/** + * sprd_pms_stay_awake + * power manger source stay awake. + */ +static void sprd_pms_stay_awake(struct sprd_pms *pms) +{ + struct sprd_mpm_data *mpm = (struct sprd_mpm_data *)pms->data; + + pr_debug("pms: %s stay awake.\n", pms->name); + + pms->awake_cnt++; + if (!pms->awake) { + pms->awake = true; + sprd_mpm_stay_awake(mpm); + } +} + +/** + * sprd_pms_relax + * power manger source release wakelock. + */ +static void sprd_pms_relax(struct sprd_pms *pms) +{ + struct sprd_mpm_data *mpm = (struct sprd_mpm_data *)pms->data; + + pr_debug("pms: %s relax awake.\n", pms->name); + + if (pms->awake) { + pms->awake = false; + sprd_mpm_relax(mpm); + } +} + +/** + * sprd_pms_relax_wakelock_timer + * the timer process function of pms delay release wakelock. + */ +static void sprd_pms_relax_wakelock_timer( +#if (LINUX_VERSION_CODE <= KERNEL_VERSION( 4,10,0 )) + unsigned long data) +{ + struct sprd_pms *pms = (struct sprd_pms *)data; +#else + struct timer_list *t) +{ + struct sprd_pms *pms = from_timer(pms, t, wake_timer); +#endif + + unsigned long flags; + bool relax = false; + + pr_debug("pms: %s timer down.\n", pms->name); + + spin_lock_irqsave(&pms->expires_lock, flags); + /* + * if jiffies < pms->expires, mpm called has been canceled, + * don't call sprd_pms_down. + */ + if (pms->expires && time_after_eq(jiffies, pms->expires)) { + pms->expires = 0; + relax = true; + } + spin_unlock_irqrestore(&pms->expires_lock, flags); + + if (relax) + sprd_pms_relax(pms); +} + +int sprd_mpm_create(unsigned int dst, const char *name, + unsigned int later_idle) +{ + struct sprd_mpm_data *mpm; + + if (dst >= SIPC_ID_NR) + return -EINVAL; + + mpm = kzalloc(sizeof(*mpm), GFP_KERNEL); + if (!mpm) + return -ENOMEM; + + snprintf(mpm->name, sizeof(mpm->name), "%s-mpm-%d", name, dst); + + mpm->dst = dst; + mpm->later_idle = later_idle; + + spin_lock_init(&mpm->mpm_lock); + INIT_LIST_HEAD(&mpm->pms_list); +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 4,18,0 )) + mpm->ws = wakeup_source_register(NULL, mpm->name); +#else + wakeup_source_init(&mpm->ws, mpm->name); +#endif + +#if (LINUX_VERSION_CODE <= KERNEL_VERSION( 4,10,0 )) + setup_timer(&mpm->timer, + sprd_mpm_deactive_timer_fn, + (unsigned long)mpm); +#else + timer_setup(&mpm->timer, + sprd_mpm_deactive_timer_fn, + 0); +#endif + + INIT_WORK(&mpm->request_res_work, sprd_mpm_request_res_work_fn); + INIT_WORK(&mpm->release_res_work, sprd_mpm_release_res_work_fn); + + g_sprd_mpm[dst] = mpm; + + return 0; +} + +int sprd_mpm_init_resource_ops(unsigned int dst, + int (*wait_resource)(unsigned int dst, + int timeout), + int (*request_resource)(unsigned int dst), + int (*release_resource)(unsigned int dst)) +{ + struct sprd_mpm_data *mpm; + + if (dst >= SIPC_ID_NR) + return -EINVAL; + + mpm = g_sprd_mpm[dst]; + if (!mpm) + return -ENODEV; + + mpm->wait_resource = wait_resource; + mpm->request_resource = request_resource; + mpm->release_resource = release_resource; + + return 0; +} + +int sprd_mpm_destroy(unsigned int dst) +{ + struct sprd_pms *pms, *temp; + struct sprd_mpm_data *mpm; + unsigned long flags; + + if (dst >= SIPC_ID_NR) + return -EINVAL; + + mpm = g_sprd_mpm[dst]; + if (!mpm) + return -ENODEV; + + sprd_mpm_cancel_timer(mpm); + cancel_work_sync(&mpm->request_res_work); + cancel_work_sync(&mpm->release_res_work); + + spin_lock_irqsave(&mpm->mpm_lock, flags); + list_for_each_entry_safe(pms, + temp, + &mpm->pms_list, + entry) { + sprd_pms_cancel_timer(pms); + list_del(&pms->entry); + } + spin_unlock_irqrestore(&mpm->mpm_lock, flags); + + kfree(mpm); + g_sprd_mpm[dst] = NULL; + + return 0; +} + +struct sprd_pms *sprd_pms_create(unsigned int dst, + const char *name, bool multitask) +{ + unsigned long flags; + struct sprd_pms *pms; + struct sprd_mpm_data *mpm; + + if (dst >= SIPC_ID_NR) + return NULL; + + mpm = g_sprd_mpm[dst]; + if (!mpm) { + pr_err("mpm: %s pms init failed, dst=%d.\n", name, dst); + return NULL; + } + + pms = kzalloc(sizeof(*pms), GFP_KERNEL); + if (!pms) + return NULL; + + pms->multitask = multitask; + pms->name = name; + pms->data = (void *)mpm; + + spin_lock_init(&pms->expires_lock); + spin_lock_init(&pms->active_lock); + +#if (LINUX_VERSION_CODE <= KERNEL_VERSION( 4,10,0 )) + setup_timer(&pms->wake_timer, + sprd_pms_relax_wakelock_timer, (unsigned long)pms); +#else + timer_setup(&pms->wake_timer, + sprd_pms_relax_wakelock_timer, 0); +#endif + + spin_lock_irqsave(&mpm->mpm_lock, flags); + list_add(&pms->entry, &mpm->pms_list); + spin_unlock_irqrestore(&mpm->mpm_lock, flags); + + return pms; +} + +void sprd_pms_destroy(struct sprd_pms *pms) +{ + unsigned long flags; + struct sprd_mpm_data *mpm; + + if (pms) { + sprd_pms_cancel_timer(pms); + mpm = (struct sprd_mpm_data *)pms->data; + spin_lock_irqsave(&mpm->mpm_lock, flags); + list_del(&pms->entry); + spin_unlock_irqrestore(&mpm->mpm_lock, flags); + kfree(pms); + } +} + +/** + * sprd_pms_request_resource - request mpm resource + * + * @pms, the point of this pms. + * @timeout, in ms. + * + * Returns: + * 0 resource ready, + * < 0 resoure not ready, + * -%ERESTARTSYS if it was interrupted by a signal. + */ +int sprd_pms_request_resource(struct sprd_pms *pms, int timeout) +{ + int ret; + struct sprd_mpm_data *mpm; + + if (!pms) + return -EINVAL; + + sprd_pms_do_up(pms); + + /* wait resource */ + mpm = (struct sprd_mpm_data *)pms->data; + ret = sprd_mpm_wait_resource(mpm, timeout); + if (ret) + sprd_pms_do_down(pms, false); + + return ret; +} + +/** + * sprd_pms_release_resource - release mpm resource. + * + * @pms, the point of this pms. + */ +void sprd_pms_release_resource(struct sprd_pms *pms) +{ + if (pms) + sprd_pms_do_down(pms, false); +} + +/** + * sprd_pms_request_wakelock - request wakelock + * + * @pms, the point of this pms. + */ +void sprd_pms_request_wakelock(struct sprd_pms *pms) +{ + if (pms) { + sprd_pms_cancel_timer(pms); + sprd_pms_stay_awake(pms); + } +} + +/** + * sprd_pms_release_wakelock - release wakelock + * + * @pms, the point of this pms. + */ +void sprd_pms_release_wakelock(struct sprd_pms *pms) +{ + if (pms) { + sprd_pms_cancel_timer(pms); + sprd_pms_relax(pms); + } +} + +/** + * sprd_pms_request_wakelock_period - + * request wake lock, and will auto reaslse in msec ms. + * + * @pms, the point of this pms. + * @msec, will auto reaslse in msec ms + */ +void sprd_pms_request_wakelock_period(struct sprd_pms *pms, unsigned int msec) +{ + sprd_pms_request_wakelock(pms); + sprd_pms_release_wakelock_later(pms, msec); +} + +/** + * sprd_pms_release_wakelock_later - release wakelock later. + * + * @pms, the point of this pms. + * @msec, later time (in ms). + */ +void sprd_pms_release_wakelock_later(struct sprd_pms *pms, + unsigned int msec) +{ + unsigned long expires; + unsigned long flags; + + if (pms) { + pr_debug("pms: %s release wakelock after %d ms.\n", + pms->name, msec); + + spin_lock_irqsave(&pms->expires_lock, flags); + expires = jiffies + msecs_to_jiffies(msec); + if (!expires) + expires = 1; + + /* always update the timer with new time */ + pms->expires = expires; + mod_timer(&pms->wake_timer, expires); + spin_unlock_irqrestore(&pms->expires_lock, flags); + } +} + +void sprd_pms_power_up(struct sprd_pms *pms) +{ + if (pms) + sprd_pms_do_up(pms); +} + +void sprd_pms_power_down(struct sprd_pms *pms, bool immediately) +{ + if (pms) + sprd_pms_do_down(pms, immediately); +} + +#if defined(CONFIG_DEBUG_FS) +static int sprd_mpm_stats_show(struct seq_file *m, void *unused) +{ + unsigned long flags; + struct sprd_pms *pms; + struct sprd_mpm_data *cur; + unsigned int i, ms; + + seq_puts(m, "---------------------------------------------\n"); + seq_puts(m, "All mpm list:\n"); + + for (i = 0; i < SIPC_ID_NR; i++) { + if (!g_sprd_mpm[i]) + continue; + + cur = g_sprd_mpm[i]; + seq_puts(m, "------------------------------------\n"); + seq_printf(m, "mpm = %s info:\n", cur->name); + seq_printf(m, "last up module = %s info:\n", + cur->last_name ? cur->last_name : "null"); + + if (cur->expires > 0) { + ms = jiffies_to_msecs(cur->expires - jiffies); + seq_printf(m, "left %d ms to idle\n", ms); + } + + seq_printf(m, "up_cnt=%d, state=%d.\n", + cur->up_cnt, cur->mpm_state); + seq_printf(m, "wakelock_cnt=%d, awake_cnt=%d\n", + cur->wakelock_cnt, cur->awake_cnt); + seq_puts(m, "------------------------------------\n"); + + seq_puts(m, "active pms list:\n"); + spin_lock_irqsave(&cur->mpm_lock, flags); + list_for_each_entry(pms, &cur->pms_list, entry) { + if (!pms->active_cnt && !pms->awake) + continue; + + seq_printf(m, " %s: active_cnt=%d, awake=%d\n", + pms->name, pms->active_cnt, pms->awake); + } + spin_unlock_irqrestore(&cur->mpm_lock, flags); + } + + seq_puts(m, "---------------------------------------------\n"); + + return 0; +} + +static int sprd_mpm_stats_open(struct inode *inode, struct file *file) +{ + return single_open(file, sprd_mpm_stats_show, NULL); +} + +static const struct file_operations sprd_mpm_stats_fops = { + .owner = THIS_MODULE, + .open = sprd_mpm_stats_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int sprd_mpm_init_debugfs(void) +{ + struct dentry *root = debugfs_create_dir("mpm", NULL); + + if (!root) + return -ENXIO; + + debugfs_create_file("power_manage", 0444, + (struct dentry *)root, + NULL, &sprd_mpm_stats_fops); + return 0; +} +#endif + + +int modem_power_manager_init(void) +{ + register_pm_notifier(&sprd_mpm_notifier_block); + +#if defined(CONFIG_DEBUG_FS) + sprd_mpm_init_debugfs(); +#endif + + return 0; +} +EXPORT_SYMBOL(modem_power_manager_init); + +void modem_power_manager_exit(void) +{ + unregister_pm_notifier(&sprd_mpm_notifier_block); +} +EXPORT_SYMBOL(modem_power_manager_exit); diff --git a/package/wwan/driver/quectel_SRPD_PCIE/src/pwake_function/ipq4019/pcie-qcom.c b/package/wwan/driver/quectel_SRPD_PCIE/src/pwake_function/ipq4019/pcie-qcom.c new file mode 100755 index 000000000..fcd0d30a2 --- /dev/null +++ b/package/wwan/driver/quectel_SRPD_PCIE/src/pwake_function/ipq4019/pcie-qcom.c @@ -0,0 +1,2237 @@ +/* + * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. + * Copyright 2015 Linaro Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "pcie-designware.h" + +#define PCIE20_PARF_SYS_CTRL 0x00 +#define ECAM_BLOCKER_EN_RANGE2 BIT(30) +#define MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN BIT(29) +#define ECAM_REMOVE_OFFSET_EN BIT(27) +#define ECAM_BLOCKER_EN BIT(26) +#define MST_WAKEUP_EN BIT(13) +#define SLV_WAKEUP_EN BIT(12) +#define MSTR_ACLK_CGC_DIS BIT(10) +#define SLV_ACLK_CGC_DIS BIT(9) +#define CORE_CLK_CGC_DIS BIT(6) +#define AUX_PWR_DET BIT(4) +#define CORE_CLK_2AUX_CLK_MUX_DIS BIT(3) +#define L23_CLK_RMV_DIS BIT(2) +#define L1_CLK_RMV_DIS BIT(1) + +#define PCIE20_PARF_Q2A_FLUSH 0x1AC + +#define PCIE20_PARF_LTSSM 0x1B0 +#define LTSSM_EN (1 << 8) + +#define PCIE20_PARF_PHY_CTRL 0x40 +#define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK (0x1f << 16) +#define PHY_CTRL_PHY_TX0_TERM_OFFSET(x) (x << 16) + +#define PCIE20_PARF_PHY_REFCLK 0x4C +#define REF_SSP_EN BIT(16) +#define REF_USE_PAD BIT(12) + +#define PCIE20_PARF_DBI_BASE_ADDR 0x168 +#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16c +#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT 0x178 + +#define PARF_MHI_CLOCK_RESET_CTRL 0x174 +#define BYPASS BIT(4) +#define MSTR_AXI_CLK_EN BIT(1) +#define AHB_CLK_EN BIT(0) + +#define PARF_BLOCK_SLV_AXI_WR_BASE 0x360 +#define PARF_BLOCK_SLV_AXI_WR_LIMIT 0x368 +#define PARF_BLOCK_SLV_AXI_RD_BASE 0x370 +#define PARF_BLOCK_SLV_AXI_RD_LIMIT 0x378 +#define PARF_ECAM_BASE 0x380 +#define PARF_ECAM_OFFSET_REMOVAL_BASE 0x388 +#define PARF_ECAM_OFFSET_REMOVAL_LIMIT 0x390 +#define PARF_BLOCK_SLV_AXI_WR_BASE_2 0x398 +#define PARF_BLOCK_SLV_AXI_WR_LIMIT_2 0x3A0 +#define PARF_BLOCK_SLV_AXI_RD_BASE_2 0x3A8 +#define PARF_BLOCK_SLV_AXI_RD_LIMIT_2 0x3B0 +#define PARF_BDF_TO_SID_TABLE 0x2000 + +#define PCIE_PARF_DEVICE_TYPE 0x1000 +#define DEVICE_TYPE_RC 0x4 + +#define PCIE20_ELBI_SYS_CTRL 0x04 +#define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0) +#define PCIE20_ELBI_SYS_STTS 0x08 +#define XMLH_LINK_UP 0x400 + +#define PCIE20_CAP 0x70 +#define PCIE20_CAP_LINKCTRLSTATUS (PCIE20_CAP + 0x10) + +#define PCIE20_AXI_MSTR_RESP_COMP_CTRL0 0x818 +#define PCIE20_AXI_MSTR_RESP_COMP_CTRL1 0x81c + +#define PCIE20_PLR_IATU_VIEWPORT 0x900 +#define PCIE20_PLR_IATU_REGION_OUTBOUND (0x0 << 31) +#define PCIE20_PLR_IATU_REGION_INDEX(x) (x << 0) + +#define PCIE20_PLR_IATU_CTRL1 0x904 +#define PCIE20_PLR_IATU_TYPE_CFG0 (0x4 << 0) +#define PCIE20_PLR_IATU_TYPE_MEM (0x0 << 0) + +#define PCIE20_PLR_IATU_CTRL2 0x908 +#define PCIE20_PLR_IATU_ENABLE BIT(31) + +#define PCIE20_PLR_IATU_LBAR 0x90C +#define PCIE20_PLR_IATU_UBAR 0x910 +#define PCIE20_PLR_IATU_LAR 0x914 +#define PCIE20_PLR_IATU_LTAR 0x918 +#define PCIE20_PLR_IATU_UTAR 0x91c + +#define MSM_PCIE_DEV_CFG_ADDR 0x01000000 +#define PCIE20_CAP_LINK_CAPABILITIES (PCIE20_CAP + 0xC) +#define PCIE20_CAP_LINK_1 (PCIE20_CAP + 0x14) +#define PCIE_CAP_LINK1_VAL 0x2fd7f + + +#define PCIE20_COMMAND_STATUS 0x04 +#define CMD_BME_VAL 0x4 +#define BUS_MASTER_EN 0x7 + +#define PCIE20_DEVICE_CONTROL2_STATUS2 0x98 +#define PCIE_CAP_CPL_TIMEOUT_DISABLE 0x10 + +#define PCIE30_GEN3_RELATED_OFF 0x890 +#define GEN3_EQUALIZATION_DISABLE BIT(16) +#define RXEQ_RGRDLESS_RXTS BIT(13) +#define GEN3_ZRXDC_NONCOMPL BIT(0) + +#define PCIE20_MISC_CONTROL_1_REG 0x8BC +#define DBI_RO_WR_EN 1 + +#define PERST_DELAY_US 1000 +/* PARF registers */ +#define PCIE20_PARF_PCS_DEEMPH 0x34 +#define PCS_DEEMPH_TX_DEEMPH_GEN1(x) (x << 16) +#define PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x) (x << 8) +#define PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x) (x << 0) + +#define PCIE20_PARF_PCS_SWING 0x38 +#define PCS_SWING_TX_SWING_FULL(x) (x << 8) +#define PCS_SWING_TX_SWING_LOW(x) (x << 0) + +#define PCIE20_PARF_CONFIG_BITS 0x50 +#define PHY_RX0_EQ(x) (x << 24) + +#define PCIE20_LNK_CONTROL2_LINK_STATUS2 0xA0 +#define PCIE_CAP_CURR_DEEMPHASIS BIT(16) +#define SPEED_GEN1 0x1 +#define SPEED_GEN2 0x2 +#define SPEED_GEN3 0x3 +#define PCIE_CAP_TARGET_LINK_SPEED_MASK __mask(3, 0) + +#define __set(v, a, b) (((v) << (b)) & GENMASK(a, b)) +#define __mask(a, b) (((1 << ((a) + 1)) - 1) & ~((1 << (b)) - 1)) +#define PCIE20_DEV_CAS 0x78 +#define PCIE20_MRRS_MASK __mask(14, 12) +#define PCIE20_MRRS(x) __set(x, 14, 12) +#define PCIE20_MPS_MASK __mask(7, 5) +#define PCIE20_MPS(x) __set(x, 7, 5) + +#define AXI_CLK_RATE 200000000 +#define RCHNG_CLK_RATE 100000000 + +#define PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE 0x358 +#define SLV_ADDR_SPACE_SZ 0x10000000 + +#define PCIE_V2_PARF_SIZE 0x2000 + +#define PCIE20_INT_ALL_STATUS 0x224 +#define PCIE20_INT_ALL_CLEAR 0x228 +#define PCIE20_INT_ALL_MASK 0x22c +#define PCIE_LINK_UP 0x2000 +#define PCIE_LINK_DOWN 0x2 + +#define PCIE_ATU_CR1_OUTBOUND_6_GEN3 0xC00 +#define PCIE_ATU_CR2_OUTBOUND_6_GEN3 0xC04 +#define PCIE_ATU_LOWER_BASE_OUTBOUND_6_GEN3 0xC08 +#define PCIE_ATU_UPPER_BASE_OUTBOUND_6_GEN3 0xC0C +#define PCIE_ATU_LIMIT_OUTBOUND_6_GEN3 0xC10 +#define PCIE_ATU_LOWER_TARGET_OUTBOUND_6_GEN3 0xC14 +#define PCIE_ATU_UPPER_TARGET_OUTBOUND_6_GEN3 0xC18 + +#define PCIE_ATU_CR1_OUTBOUND_7_GEN3 0xE00 +#define PCIE_ATU_CR2_OUTBOUND_7_GEN3 0xE04 +#define PCIE_ATU_LOWER_BASE_OUTBOUND_7_GEN3 0xE08 +#define PCIE_ATU_UPPER_BASE_OUTBOUND_7_GEN3 0xE0C +#define PCIE_ATU_LIMIT_OUTBOUND_7_GEN3 0xE10 +#define PCIE_ATU_LOWER_TARGET_OUTBOUND_7_GEN3 0xE14 +#define PCIE_ATU_UPPER_TARGET_OUTBOUND_7_GEN3 0xE18 + +#define PCIE_ASPM_MASK 0x3 +#define PCIE_ASPM_POS 10 + +struct qcom_pcie_resources_v0 { + struct clk *iface_clk; + struct clk *core_clk; + struct clk *phy_clk; + struct clk *aux_clk; + struct clk *ref_clk; + struct reset_control *pci_reset; + struct reset_control *axi_reset; + struct reset_control *ahb_reset; + struct reset_control *por_reset; + struct reset_control *phy_reset; + struct reset_control *ext_reset; + struct regulator *vdda; + struct regulator *vdda_phy; + struct regulator *vdda_refclk; + uint8_t phy_tx0_term_offset; +}; + +struct qcom_pcie_resources_v1 { + struct clk *iface; + struct clk *aux; + struct clk *master_bus; + struct clk *slave_bus; + struct reset_control *core; + struct regulator *vdda; +}; + +struct qcom_pcie_resources_v2 { + struct clk *ahb_clk; + struct clk *axi_m_clk; + struct clk *axi_s_clk; + struct reset_control *axi_m_reset; + struct reset_control *axi_s_reset; + struct reset_control *pipe_reset; + struct reset_control *axi_m_vmid_reset; + struct reset_control *axi_s_xpu_reset; + struct reset_control *parf_reset; + struct reset_control *phy_reset; + struct reset_control *axi_m_sticky_reset; + struct reset_control *pipe_sticky_reset; + struct reset_control *pwr_reset; + struct reset_control *ahb_reset; + struct reset_control *phy_ahb_reset; + struct regulator *vdda; + struct regulator *vdda_phy; + struct regulator *vdda_refclk; +}; + +struct qcom_pcie_resources_v3 { + struct clk *sys_noc_clk; + struct clk *axi_m_clk; + struct clk *axi_s_clk; + struct clk *ahb_clk; + struct clk *aux_clk; + struct clk *axi_bridge_clk; + struct clk *rchng_clk; + struct reset_control *axi_m_reset; + struct reset_control *axi_s_reset; + struct reset_control *pipe_reset; + struct reset_control *axi_m_sticky_reset; + struct reset_control *axi_s_sticky_reset; + struct reset_control *ahb_reset; + struct reset_control *sticky_reset; + struct reset_control *sleep_reset; + + struct regulator *vdda; + struct regulator *vdda_phy; + struct regulator *vdda_refclk; +}; + +union qcom_pcie_resources { + struct qcom_pcie_resources_v0 v0; + struct qcom_pcie_resources_v1 v1; + struct qcom_pcie_resources_v2 v2; + struct qcom_pcie_resources_v3 v3; +}; + +struct qcom_pcie; + +struct qcom_pcie_ops { + int (*get_resources)(struct qcom_pcie *pcie); + int (*init)(struct qcom_pcie *pcie); + void (*deinit)(struct qcom_pcie *pcie); +}; + +struct qcom_pcie { + struct pcie_port pp; + struct device *dev; + union qcom_pcie_resources res; + void __iomem *parf; + void __iomem *dbi; + void __iomem *elbi; + void __iomem *dm_iatu; + struct phy *phy; + struct gpio_desc *reset; + struct qcom_pcie_ops *ops; + struct work_struct handle_wake_work; + struct work_struct handle_e911_work; + uint32_t force_gen1; + uint32_t force_gen2; + u32 is_emulation; + u32 compliance; + u32 use_delay; + u32 link_retries_count; + u32 slot_id; + u32 cap_active_state_link_pm; + u32 is_gen3; + int global_irq; + int wake_irq; + int link_down_irq; + int link_up_irq; + int mdm2ap_e911_irq; + bool enumerated; + uint32_t rc_idx; + struct qcom_pcie_register_event *event_reg; + struct notifier_block pci_reboot_notifier; +}; + +#define to_qcom_pcie(x) container_of(x, struct qcom_pcie, pp) + +#define MAX_RC_NUM 3 +static struct qcom_pcie *qcom_pcie_dev[MAX_RC_NUM]; +struct gpio_desc *mdm2ap_e911; + +static inline void +writel_masked(void __iomem *addr, u32 clear_mask, u32 set_mask) +{ + u32 val = readl(addr); + + val &= ~clear_mask; + val |= set_mask; + writel(val, addr); +} + +static void qcom_ep_reset_assert(struct qcom_pcie *pcie) +{ + gpiod_set_value(pcie->reset, 1); + usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500); +} + +static void qcom_ep_reset_deassert(struct qcom_pcie *pcie) +{ + msleep(100); + gpiod_set_value(pcie->reset, 0); + usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500); +} + +static irqreturn_t qcom_pcie_msi_irq_handler(int irq, void *arg) +{ + struct pcie_port *pp = arg; + + return dw_handle_msi_irq(pp); +} + +static int qcom_pcie_establish_link(struct qcom_pcie *pcie) +{ + u32 val; + + if (dw_pcie_link_up(&pcie->pp)) + return 0; + + /* enable link training */ + val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL); + val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE; + writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL); + + return dw_pcie_wait_for_link(&pcie->pp); +} + +static void qcom_pcie_notify_client(struct qcom_pcie *dev, + enum qcom_pcie_event event) +{ + if (dev->event_reg && dev->event_reg->callback && + (dev->event_reg->events & event)) { + struct qcom_pcie_notify *notify = &dev->event_reg->notify; + notify->event = event; + notify->user = dev->event_reg->user; + pr_info("PCIe: callback RC%d for event %d.\n", + dev->rc_idx, event); + dev->event_reg->callback(notify); + + } else { + pr_info( + "PCIe: Client of RC%d does not have registered for event %d.\n", + dev->rc_idx, event); + } +} + +static irqreturn_t handle_link_down_irq(int irq, void *data) +{ + struct qcom_pcie *qcom_pcie = data; + + pr_info("PCIe: link_down IRQ for RC=%d\n", qcom_pcie->rc_idx); + + qcom_pcie_notify_client(qcom_pcie, QCOM_PCIE_EVENT_LINKDOWN); + return IRQ_HANDLED; +} + +static irqreturn_t handle_link_up_irq(int irq, void *data) +{ + struct qcom_pcie *qcom_pcie = data; + + pr_info("PCIe: link_up IRQ for RC=%d\n", qcom_pcie->rc_idx); + + return IRQ_HANDLED; +} + +/* PCIe wake-irq handler */ +static void handle_wake_func(struct work_struct *work) +{ + int ret; + struct qcom_pcie *pcie = container_of(work, struct qcom_pcie, + handle_wake_work); + struct pcie_port *pp = &pcie->pp; + + pci_lock_rescan_remove(); + if (pcie->enumerated) { + pr_info("PCIe: RC%d has been already enumerated\n", pcie->rc_idx); + pci_unlock_rescan_remove(); + return; + } + + if (!gpiod_get_value(mdm2ap_e911)) { + ret = dw_pcie_host_init_pm(pp); + + if (ret) + pr_err("PCIe: failed to enable RC%d upon wake request from the device\n", + pcie->rc_idx); + else { + pcie->enumerated = true; + pr_info("PCIe: enumerated RC%d successfully upon wake request from the device\n", + pcie->rc_idx); + } + } + + pci_unlock_rescan_remove(); +} + +static irqreturn_t qcom_pcie_wake_irq_handler(int irq, void *data) +{ + struct qcom_pcie *pcie = data; + + schedule_work(&pcie->handle_wake_work); + + return IRQ_HANDLED; +} + +/* PCIe global int handler */ +static irqreturn_t qcom_pcie_global_irq_handler(int irq, void *data) +{ + u32 status = 0; + unsigned long val, val_status, val_mask; + irqreturn_t ret = IRQ_HANDLED; + struct qcom_pcie *pcie = data; + + val_status = readl_relaxed(pcie->parf + PCIE20_INT_ALL_STATUS); + val_mask = readl_relaxed(pcie->parf + PCIE20_INT_ALL_MASK); + status = val_status & val_mask; + + /* Clear PARF status register */ + val = readl_relaxed(pcie->parf + PCIE20_INT_ALL_CLEAR) | status; + writel_relaxed(val, pcie->parf + PCIE20_INT_ALL_CLEAR); + /* ensure data is written to hw register */ + wmb(); + + if (status & PCIE_LINK_DOWN) + pr_info("PCIe: link_up IRQ for RC=%d\n", pcie->rc_idx); + if (status & PCIE_LINK_UP) + pr_info("PCIe: link_down IRQ for RC=%d\n", pcie->rc_idx); + + return ret; +} + +static void qcom_pcie_prog_viewport_cfg0(struct qcom_pcie *pcie, u32 busdev) +{ + struct pcie_port *pp = &pcie->pp; + + /* + * program and enable address translation region 0 (device config + * address space); region type config; + * axi config address range to device config address range + */ + writel(PCIE20_PLR_IATU_REGION_OUTBOUND | + PCIE20_PLR_IATU_REGION_INDEX(0), + pcie->dbi + PCIE20_PLR_IATU_VIEWPORT); + + writel(PCIE20_PLR_IATU_TYPE_CFG0, pcie->dbi + PCIE20_PLR_IATU_CTRL1); + writel(PCIE20_PLR_IATU_ENABLE, pcie->dbi + PCIE20_PLR_IATU_CTRL2); + writel(pp->cfg0_base, pcie->dbi + PCIE20_PLR_IATU_LBAR); + writel((pp->cfg0_base >> 32), pcie->dbi + PCIE20_PLR_IATU_UBAR); + writel((pp->cfg0_base + pp->cfg0_size - 1), + pcie->dbi + PCIE20_PLR_IATU_LAR); + writel(busdev, pcie->dbi + PCIE20_PLR_IATU_LTAR); + writel(0, pcie->dbi + PCIE20_PLR_IATU_UTAR); +} + +static void qcom_pcie_prog_viewport_mem2_outbound(struct qcom_pcie *pcie) +{ + struct pcie_port *pp = &pcie->pp; + + /* + * program and enable address translation region 2 (device resource + * address space); region type memory; + * axi device bar address range to device bar address range + */ + writel(PCIE20_PLR_IATU_REGION_OUTBOUND | + PCIE20_PLR_IATU_REGION_INDEX(2), + pcie->dbi + PCIE20_PLR_IATU_VIEWPORT); + + writel(PCIE20_PLR_IATU_TYPE_MEM, pcie->dbi + PCIE20_PLR_IATU_CTRL1); + writel(PCIE20_PLR_IATU_ENABLE, pcie->dbi + PCIE20_PLR_IATU_CTRL2); + writel(pp->mem_base, pcie->dbi + PCIE20_PLR_IATU_LBAR); + writel((pp->mem_base >> 32), pcie->dbi + PCIE20_PLR_IATU_UBAR); + writel(pp->mem_base + pp->mem_size - 1, + pcie->dbi + PCIE20_PLR_IATU_LAR); + writel(pp->mem_bus_addr, pcie->dbi + PCIE20_PLR_IATU_LTAR); + writel(upper_32_bits(pp->mem_bus_addr), + pcie->dbi + PCIE20_PLR_IATU_UTAR); + + /* 256B PCIE buffer setting */ + writel(0x1, pcie->dbi + PCIE20_AXI_MSTR_RESP_COMP_CTRL0); + writel(0x1, pcie->dbi + PCIE20_AXI_MSTR_RESP_COMP_CTRL1); +} + +static int qcom_pcie_get_resources_v0(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_v0 *res = &pcie->res.v0; + struct device *dev = pcie->dev; + + res->vdda = devm_regulator_get(dev, "vdda"); + if (IS_ERR(res->vdda)) + return PTR_ERR(res->vdda); + + res->vdda_phy = devm_regulator_get(dev, "vdda_phy"); + if (IS_ERR(res->vdda_phy)) + return PTR_ERR(res->vdda_phy); + + res->vdda_refclk = devm_regulator_get(dev, "vdda_refclk"); + if (IS_ERR(res->vdda_refclk)) + return PTR_ERR(res->vdda_refclk); + + res->iface_clk = devm_clk_get(dev, "iface"); + if (IS_ERR(res->iface_clk)) + return PTR_ERR(res->iface_clk); + + res->core_clk = devm_clk_get(dev, "core"); + if (IS_ERR(res->core_clk)) + return PTR_ERR(res->core_clk); + + res->phy_clk = devm_clk_get(dev, "phy"); + if (IS_ERR(res->phy_clk)) + return PTR_ERR(res->phy_clk); + + res->aux_clk = devm_clk_get(dev, "aux"); + if (IS_ERR(res->aux_clk)) + return PTR_ERR(res->aux_clk); + + res->ref_clk = devm_clk_get(dev, "ref"); + if (IS_ERR(res->ref_clk)) + return PTR_ERR(res->ref_clk); + + res->pci_reset = devm_reset_control_get(dev, "pci"); + if (IS_ERR(res->pci_reset)) + return PTR_ERR(res->pci_reset); + + res->axi_reset = devm_reset_control_get(dev, "axi"); + if (IS_ERR(res->axi_reset)) + return PTR_ERR(res->axi_reset); + + res->ahb_reset = devm_reset_control_get(dev, "ahb"); + if (IS_ERR(res->ahb_reset)) + return PTR_ERR(res->ahb_reset); + + res->por_reset = devm_reset_control_get(dev, "por"); + if (IS_ERR(res->por_reset)) + return PTR_ERR(res->por_reset); + + res->phy_reset = devm_reset_control_get(dev, "phy"); + if (IS_ERR(res->phy_reset)) + return PTR_ERR(res->phy_reset); + + res->ext_reset = devm_reset_control_get(dev, "ext"); + if (IS_ERR(res->ext_reset)) + return PTR_ERR(res->ext_reset); + + if (of_property_read_u8(dev->of_node, "phy-tx0-term-offset", + &res->phy_tx0_term_offset)) + res->phy_tx0_term_offset = 0; + + return 0; +} + +static int qcom_pcie_get_resources_v1(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_v1 *res = &pcie->res.v1; + struct device *dev = pcie->dev; + + res->vdda = devm_regulator_get(dev, "vdda"); + if (IS_ERR(res->vdda)) + return PTR_ERR(res->vdda); + + res->iface = devm_clk_get(dev, "iface"); + if (IS_ERR(res->iface)) + return PTR_ERR(res->iface); + + res->aux = devm_clk_get(dev, "aux"); + if (IS_ERR(res->aux)) + return PTR_ERR(res->aux); + + res->master_bus = devm_clk_get(dev, "master_bus"); + if (IS_ERR(res->master_bus)) + return PTR_ERR(res->master_bus); + + res->slave_bus = devm_clk_get(dev, "slave_bus"); + if (IS_ERR(res->slave_bus)) + return PTR_ERR(res->slave_bus); + + res->core = devm_reset_control_get(dev, "core"); + if (IS_ERR(res->core)) + return PTR_ERR(res->core); + + return 0; +} + +static int qcom_pcie_get_resources_v2(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_v2 *res = &pcie->res.v2; + struct device *dev = pcie->dev; + + res->vdda = devm_regulator_get(dev, "vdda"); + if (IS_ERR(res->vdda)) + return PTR_ERR(res->vdda); + + res->vdda_phy = devm_regulator_get(dev, "vdda_phy"); + if (IS_ERR(res->vdda_phy)) + return PTR_ERR(res->vdda_phy); + + res->vdda_refclk = devm_regulator_get(dev, "vdda_refclk"); + if (IS_ERR(res->vdda_refclk)) + return PTR_ERR(res->vdda_refclk); + + res->ahb_clk = devm_clk_get(dev, "ahb"); + if (IS_ERR(res->ahb_clk)) + return PTR_ERR(res->ahb_clk); + + res->axi_m_clk = devm_clk_get(dev, "axi_m"); + if (IS_ERR(res->axi_m_clk)) + return PTR_ERR(res->axi_m_clk); + + res->axi_s_clk = devm_clk_get(dev, "axi_s"); + if (IS_ERR(res->axi_s_clk)) + return PTR_ERR(res->axi_s_clk); + + res->axi_m_reset = devm_reset_control_get(dev, "axi_m"); + if (IS_ERR(res->axi_m_reset)) + return PTR_ERR(res->axi_m_reset); + + res->axi_s_reset = devm_reset_control_get(dev, "axi_s"); + if (IS_ERR(res->axi_s_reset)) + return PTR_ERR(res->axi_s_reset); + + res->pipe_reset = devm_reset_control_get(dev, "pipe"); + if (IS_ERR(res->pipe_reset)) + return PTR_ERR(res->pipe_reset); + + res->axi_m_vmid_reset = devm_reset_control_get(dev, "axi_m_vmid"); + if (IS_ERR(res->axi_m_vmid_reset)) + return PTR_ERR(res->axi_m_vmid_reset); + + res->axi_s_xpu_reset = devm_reset_control_get(dev, "axi_s_xpu"); + if (IS_ERR(res->axi_s_xpu_reset)) + return PTR_ERR(res->axi_s_xpu_reset); + + res->parf_reset = devm_reset_control_get(dev, "parf"); + if (IS_ERR(res->parf_reset)) + return PTR_ERR(res->parf_reset); + + res->phy_reset = devm_reset_control_get(dev, "phy"); + if (IS_ERR(res->phy_reset)) + return PTR_ERR(res->phy_reset); + + res->axi_m_sticky_reset = devm_reset_control_get(dev, "axi_m_sticky"); + if (IS_ERR(res->axi_m_sticky_reset)) + return PTR_ERR(res->axi_m_sticky_reset); + + res->pipe_sticky_reset = devm_reset_control_get(dev, "pipe_sticky"); + if (IS_ERR(res->pipe_sticky_reset)) + return PTR_ERR(res->pipe_sticky_reset); + + res->pwr_reset = devm_reset_control_get(dev, "pwr"); + if (IS_ERR(res->pwr_reset)) + return PTR_ERR(res->pwr_reset); + + res->ahb_reset = devm_reset_control_get(dev, "ahb"); + if (IS_ERR(res->ahb_reset)) + return PTR_ERR(res->ahb_reset); + + res->phy_ahb_reset = devm_reset_control_get(dev, "phy_ahb"); + if (IS_ERR(res->phy_ahb_reset)) + return PTR_ERR(res->phy_ahb_reset); + + return 0; +} + + +static int qcom_pcie_get_resources_v3(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_v3 *res = &pcie->res.v3; + struct device *dev = pcie->dev; + + res->vdda = devm_regulator_get(dev, "vdda"); + if (IS_ERR(res->vdda)) + return PTR_ERR(res->vdda); + + res->vdda_phy = devm_regulator_get(dev, "vdda_phy"); + if (IS_ERR(res->vdda_phy)) + return PTR_ERR(res->vdda_phy); + + res->vdda_refclk = devm_regulator_get(dev, "vdda_refclk"); + if (IS_ERR(res->vdda_refclk)) + return PTR_ERR(res->vdda_refclk); + + res->sys_noc_clk = devm_clk_get(dev, "sys_noc"); + if (IS_ERR(res->sys_noc_clk)) + return PTR_ERR(res->sys_noc_clk); + + res->axi_m_clk = devm_clk_get(dev, "axi_m"); + if (IS_ERR(res->axi_m_clk)) + return PTR_ERR(res->axi_m_clk); + + res->axi_s_clk = devm_clk_get(dev, "axi_s"); + if (IS_ERR(res->axi_s_clk)) + return PTR_ERR(res->axi_s_clk); + + res->ahb_clk = devm_clk_get(dev, "ahb"); + if (IS_ERR(res->ahb_clk)) + return PTR_ERR(res->ahb_clk); + + res->aux_clk = devm_clk_get(dev, "aux"); + if (IS_ERR(res->aux_clk)) + return PTR_ERR(res->aux_clk); + + if (pcie->is_gen3) { + res->axi_bridge_clk = devm_clk_get(dev, "axi_bridge"); + if (IS_ERR(res->axi_bridge_clk)) + return PTR_ERR(res->axi_bridge_clk); + + res->rchng_clk = devm_clk_get(dev, "rchng"); + if (IS_ERR(res->rchng_clk)) + res->rchng_clk = NULL; + } + + res->axi_m_reset = devm_reset_control_get(dev, "axi_m"); + if (IS_ERR(res->axi_m_reset)) + return PTR_ERR(res->axi_m_reset); + + res->axi_s_reset = devm_reset_control_get(dev, "axi_s"); + if (IS_ERR(res->axi_s_reset)) + return PTR_ERR(res->axi_s_reset); + + res->pipe_reset = devm_reset_control_get(dev, "pipe"); + if (IS_ERR(res->pipe_reset)) + return PTR_ERR(res->pipe_reset); + + res->axi_m_sticky_reset = devm_reset_control_get(dev, "axi_m_sticky"); + if (IS_ERR(res->axi_m_sticky_reset)) + return PTR_ERR(res->axi_m_sticky_reset); + + if (pcie->is_gen3) { + res->axi_s_sticky_reset = devm_reset_control_get(dev, "axi_s_sticky"); + if (IS_ERR(res->axi_s_sticky_reset)) + return PTR_ERR(res->axi_s_sticky_reset); + } + res->sticky_reset = devm_reset_control_get(dev, "sticky"); + if (IS_ERR(res->sticky_reset)) + return PTR_ERR(res->sticky_reset); + + res->ahb_reset = devm_reset_control_get(dev, "ahb"); + if (IS_ERR(res->ahb_reset)) + return PTR_ERR(res->ahb_reset); + + res->sleep_reset = devm_reset_control_get(dev, "sleep"); + if (IS_ERR(res->sleep_reset)) + return PTR_ERR(res->sleep_reset); + + return 0; +} + +static void qcom_pcie_deinit_v0(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_v0 *res = &pcie->res.v0; + + clk_disable_unprepare(res->phy_clk); + reset_control_assert(res->phy_reset); + reset_control_assert(res->axi_reset); + reset_control_assert(res->ahb_reset); + reset_control_assert(res->por_reset); + reset_control_assert(res->pci_reset); + reset_control_assert(res->ext_reset); + clk_disable_unprepare(res->iface_clk); + clk_disable_unprepare(res->core_clk); + clk_disable_unprepare(res->aux_clk); + clk_disable_unprepare(res->ref_clk); + regulator_disable(res->vdda); + regulator_disable(res->vdda_phy); + regulator_disable(res->vdda_refclk); +} + +static int qcom_pcie_init_v0(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_v0 *res = &pcie->res.v0; + struct device *dev = pcie->dev; + int ret; + + ret = reset_control_assert(res->ahb_reset); + if (ret) { + dev_err(dev, "cannot assert ahb reset\n"); + return ret; + } + + ret = regulator_enable(res->vdda); + if (ret) { + dev_err(dev, "cannot enable vdda regulator\n"); + return ret; + } + + ret = regulator_enable(res->vdda_refclk); + if (ret) { + dev_err(dev, "cannot enable vdda_refclk regulator\n"); + goto err_refclk; + } + + ret = regulator_enable(res->vdda_phy); + if (ret) { + dev_err(dev, "cannot enable vdda_phy regulator\n"); + goto err_vdda_phy; + } + + ret = reset_control_deassert(res->ext_reset); + if (ret) { + dev_err(dev, "cannot assert ext reset\n"); + goto err_reset_ext; + } + + ret = clk_prepare_enable(res->iface_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable iface clock\n"); + goto err_iface; + } + + ret = clk_prepare_enable(res->core_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable core clock\n"); + goto err_clk_core; + } + + ret = clk_prepare_enable(res->aux_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable aux clock\n"); + goto err_clk_aux; + } + + ret = clk_prepare_enable(res->ref_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable ref clock\n"); + goto err_clk_ref; + } + + ret = reset_control_deassert(res->ahb_reset); + if (ret) { + dev_err(dev, "cannot deassert ahb reset\n"); + goto err_deassert_ahb; + } + + writel_masked(pcie->parf + PCIE20_PARF_PHY_CTRL, BIT(0), 0); + + /* Set Tx termination offset */ + writel_masked(pcie->parf + PCIE20_PARF_PHY_CTRL, + PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK, + PHY_CTRL_PHY_TX0_TERM_OFFSET(res->phy_tx0_term_offset)); + + /* PARF programming */ + writel(PCS_DEEMPH_TX_DEEMPH_GEN1(0x18) | + PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(0x18) | + PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(0x22), + pcie->parf + PCIE20_PARF_PCS_DEEMPH); + writel(PCS_SWING_TX_SWING_FULL(0x78) | + PCS_SWING_TX_SWING_LOW(0x78), + pcie->parf + PCIE20_PARF_PCS_SWING); + writel(PHY_RX0_EQ(0x4), pcie->parf + PCIE20_PARF_CONFIG_BITS); + + /* Enable reference clock */ + writel_masked(pcie->parf + PCIE20_PARF_PHY_REFCLK, + REF_USE_PAD, REF_SSP_EN); + + + ret = reset_control_deassert(res->phy_reset); + if (ret) { + dev_err(dev, "cannot deassert phy reset\n"); + return ret; + } + + ret = reset_control_deassert(res->pci_reset); + if (ret) { + dev_err(dev, "cannot deassert pci reset\n"); + return ret; + } + + ret = reset_control_deassert(res->por_reset); + if (ret) { + dev_err(dev, "cannot deassert por reset\n"); + return ret; + } + + ret = reset_control_deassert(res->axi_reset); + if (ret) { + dev_err(dev, "cannot deassert axi reset\n"); + return ret; + } + + ret = clk_prepare_enable(res->phy_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable phy clock\n"); + goto err_deassert_ahb; + } + + /* wait for clock acquisition */ + usleep_range(1000, 1500); + if (pcie->force_gen1) { + writel_relaxed(((readl_relaxed( + pcie->dbi + PCIE20_LNK_CONTROL2_LINK_STATUS2) + & (~PCIE_CAP_TARGET_LINK_SPEED_MASK)) | SPEED_GEN1), + pcie->dbi + PCIE20_LNK_CONTROL2_LINK_STATUS2); + } + + qcom_pcie_prog_viewport_cfg0(pcie, MSM_PCIE_DEV_CFG_ADDR); + qcom_pcie_prog_viewport_mem2_outbound(pcie); + + return 0; + +err_deassert_ahb: + clk_disable_unprepare(res->ref_clk); +err_clk_ref: + clk_disable_unprepare(res->aux_clk); +err_clk_aux: + clk_disable_unprepare(res->core_clk); +err_clk_core: + clk_disable_unprepare(res->iface_clk); +err_iface: + reset_control_assert(res->ext_reset); +err_reset_ext: + regulator_disable(res->vdda_phy); +err_vdda_phy: + regulator_disable(res->vdda_refclk); +err_refclk: + regulator_disable(res->vdda); + + return ret; +} + +static void qcom_pcie_deinit_v1(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_v1 *res = &pcie->res.v1; + + reset_control_assert(res->core); + clk_disable_unprepare(res->slave_bus); + clk_disable_unprepare(res->master_bus); + clk_disable_unprepare(res->iface); + clk_disable_unprepare(res->aux); + regulator_disable(res->vdda); +} + +static int qcom_pcie_init_v1(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_v1 *res = &pcie->res.v1; + struct device *dev = pcie->dev; + int ret; + + ret = reset_control_deassert(res->core); + if (ret) { + dev_err(dev, "cannot deassert core reset\n"); + return ret; + } + + ret = clk_prepare_enable(res->aux); + if (ret) { + dev_err(dev, "cannot prepare/enable aux clock\n"); + goto err_res; + } + + ret = clk_prepare_enable(res->iface); + if (ret) { + dev_err(dev, "cannot prepare/enable iface clock\n"); + goto err_aux; + } + + ret = clk_prepare_enable(res->master_bus); + if (ret) { + dev_err(dev, "cannot prepare/enable master_bus clock\n"); + goto err_iface; + } + + ret = clk_prepare_enable(res->slave_bus); + if (ret) { + dev_err(dev, "cannot prepare/enable slave_bus clock\n"); + goto err_master; + } + + ret = regulator_enable(res->vdda); + if (ret) { + dev_err(dev, "cannot enable vdda regulator\n"); + goto err_slave; + } + + /* change DBI base address */ + writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); + + if (IS_ENABLED(CONFIG_PCI_MSI)) { + u32 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT); + + val |= BIT(31); + writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT); + } + + return 0; +err_slave: + clk_disable_unprepare(res->slave_bus); +err_master: + clk_disable_unprepare(res->master_bus); +err_iface: + clk_disable_unprepare(res->iface); +err_aux: + clk_disable_unprepare(res->aux); +err_res: + reset_control_assert(res->core); + + return ret; +} + +static void qcom_pcie_deinit_v2(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_v2 *res = &pcie->res.v2; + + /* Assert pcie_pipe_ares */ + reset_control_assert(res->axi_m_reset); + reset_control_assert(res->axi_s_reset); + usleep_range(10000, 12000); /* wait 12ms */ + + reset_control_assert(res->pipe_reset); + reset_control_assert(res->pipe_sticky_reset); + reset_control_assert(res->phy_reset); + reset_control_assert(res->phy_ahb_reset); + usleep_range(10000, 12000); /* wait 12ms */ + + reset_control_assert(res->axi_m_sticky_reset); + reset_control_assert(res->pwr_reset); + reset_control_assert(res->ahb_reset); + usleep_range(10000, 12000); /* wait 12ms */ + + clk_disable_unprepare(res->ahb_clk); + clk_disable_unprepare(res->axi_m_clk); + clk_disable_unprepare(res->axi_s_clk); + regulator_disable(res->vdda); + regulator_disable(res->vdda_phy); + regulator_disable(res->vdda_refclk); +} + +static int qcom_pcie_enable_resources_v2(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_v2 *res = &pcie->res.v2; + struct device *dev = pcie->dev; + int ret; + + ret = regulator_enable(res->vdda); + if (ret) { + dev_err(dev, "cannot enable vdda regulator\n"); + return ret; + } + + ret = regulator_enable(res->vdda_refclk); + if (ret) { + dev_err(dev, "cannot enable vdda_refclk regulator\n"); + goto err_refclk; + } + + ret = regulator_enable(res->vdda_phy); + if (ret) { + dev_err(dev, "cannot enable vdda_phy regulator\n"); + goto err_vdda_phy; + } + + ret = clk_prepare_enable(res->ahb_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable iface clock\n"); + goto err_ahb; + } + + ret = clk_prepare_enable(res->axi_m_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable core clock\n"); + goto err_clk_axi_m; + } + + ret = clk_prepare_enable(res->axi_s_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable phy clock\n"); + goto err_clk_axi_s; + } + + udelay(1); + + return 0; + +err_clk_axi_s: + clk_disable_unprepare(res->axi_m_clk); +err_clk_axi_m: + clk_disable_unprepare(res->ahb_clk); +err_ahb: + regulator_disable(res->vdda_phy); +err_vdda_phy: + regulator_disable(res->vdda_refclk); +err_refclk: + regulator_disable(res->vdda); + return ret; +} + +static void qcom_pcie_v2_reset(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_v2 *res = &pcie->res.v2; + /* Assert pcie_pipe_ares */ + reset_control_assert(res->axi_m_reset); + reset_control_assert(res->axi_s_reset); + usleep_range(10000, 12000); /* wait 12ms */ + + reset_control_assert(res->pipe_reset); + reset_control_assert(res->pipe_sticky_reset); + reset_control_assert(res->phy_reset); + reset_control_assert(res->phy_ahb_reset); + usleep_range(10000, 12000); /* wait 12ms */ + + reset_control_assert(res->axi_m_sticky_reset); + reset_control_assert(res->pwr_reset); + reset_control_assert(res->ahb_reset); + usleep_range(10000, 12000); /* wait 12ms */ + + reset_control_deassert(res->phy_ahb_reset); + reset_control_deassert(res->phy_reset); + reset_control_deassert(res->pipe_reset); + reset_control_deassert(res->pipe_sticky_reset); + usleep_range(10000, 12000); /* wait 12ms */ + + reset_control_deassert(res->axi_m_reset); + reset_control_deassert(res->axi_m_sticky_reset); + reset_control_deassert(res->axi_s_reset); + reset_control_deassert(res->pwr_reset); + reset_control_deassert(res->ahb_reset); + usleep_range(10000, 12000); /* wait 12ms */ + wmb(); /* ensure data is written to hw register */ +} + +static int qcom_pcie_init_v2(struct qcom_pcie *pcie) +{ + int ret; + + qcom_pcie_v2_reset(pcie); + qcom_ep_reset_assert(pcie); + + ret = qcom_pcie_enable_resources_v2(pcie); + if (ret) + return ret; + + writel_masked(pcie->parf + PCIE20_PARF_PHY_CTRL, BIT(0), 0); + + writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); + + writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS + | SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS | + AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS, + pcie->parf + PCIE20_PARF_SYS_CTRL); + writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH); + writel(CMD_BME_VAL, pcie->dbi + PCIE20_COMMAND_STATUS); + writel(DBI_RO_WR_EN, pcie->dbi + PCIE20_MISC_CONTROL_1_REG); + writel(PCIE_CAP_LINK1_VAL, pcie->dbi + PCIE20_CAP_LINK_1); + + writel_masked(pcie->dbi + PCIE20_CAP_LINK_CAPABILITIES, + BIT(10) | BIT(11), 0); + writel(PCIE_CAP_CPL_TIMEOUT_DISABLE, pcie->dbi + + PCIE20_DEVICE_CONTROL2_STATUS2); + writel(LTSSM_EN, pcie->parf + PCIE20_PARF_LTSSM); + + return 0; +} + +static void qcom_pcie_deinit_v3(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_v3 *res = &pcie->res.v3; + + clk_disable_unprepare(res->axi_m_clk); + clk_disable_unprepare(res->axi_s_clk); + clk_disable_unprepare(res->ahb_clk); + clk_disable_unprepare(res->aux_clk); + clk_disable_unprepare(res->sys_noc_clk); + regulator_disable(res->vdda); + regulator_disable(res->vdda_phy); + regulator_disable(res->vdda_refclk); +} + +static void qcom_pcie_v3_reset(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_v3 *res = &pcie->res.v3; + /* Assert pcie_pipe_ares */ + reset_control_assert(res->pipe_reset); + reset_control_assert(res->sleep_reset); + reset_control_assert(res->sticky_reset); + reset_control_assert(res->axi_m_reset); + reset_control_assert(res->axi_s_reset); + reset_control_assert(res->ahb_reset); + reset_control_assert(res->axi_m_sticky_reset); + if (pcie->is_gen3) + reset_control_assert(res->axi_s_sticky_reset); + usleep_range(10000, 12000); /* wait 12ms */ + + reset_control_deassert(res->pipe_reset); + reset_control_deassert(res->sleep_reset); + reset_control_deassert(res->sticky_reset); + reset_control_deassert(res->axi_m_reset); + reset_control_deassert(res->axi_s_reset); + reset_control_deassert(res->ahb_reset); + reset_control_deassert(res->axi_m_sticky_reset); + if (pcie->is_gen3) + reset_control_deassert(res->axi_s_sticky_reset); + usleep_range(10000, 12000); /* wait 12ms */ + wmb(); /* ensure data is written to hw register */ +} + +static int qcom_pcie_enable_resources_v3(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_v3 *res = &pcie->res.v3; + struct device *dev = pcie->dev; + int ret; + + ret = regulator_enable(res->vdda); + if (ret) { + dev_err(dev, "cannot enable vdda regulator\n"); + return ret; + } + + ret = regulator_enable(res->vdda_refclk); + if (ret) { + dev_err(dev, "cannot enable vdda_refclk regulator\n"); + goto err_refclk; + } + + ret = regulator_enable(res->vdda_phy); + if (ret) { + dev_err(dev, "cannot enable vdda_phy regulator\n"); + goto err_vdda_phy; + } + + ret = clk_prepare_enable(res->sys_noc_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable core clock\n"); + goto err_clk_sys_noc; + } + + ret = clk_prepare_enable(res->axi_m_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable core clock\n"); + goto err_clk_axi_m; + } + + ret = clk_set_rate(res->axi_m_clk, AXI_CLK_RATE); + if (ret) { + dev_err(dev, "MClk rate set failed (%d)\n", ret); + goto err_clk_axi_m; + } + + ret = clk_prepare_enable(res->axi_s_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable axi slave clock\n"); + goto err_clk_axi_s; + } + + ret = clk_set_rate(res->axi_s_clk, AXI_CLK_RATE); + if (ret) { + dev_err(dev, "MClk rate set failed (%d)\n", ret); + goto err_clk_axi_s; + } + + ret = clk_prepare_enable(res->ahb_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable ahb clock\n"); + goto err_clk_ahb; + } + + ret = clk_prepare_enable(res->aux_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable aux clock\n"); + goto err_clk_aux; + } + + if (pcie->is_gen3) { + ret = clk_prepare_enable(res->axi_bridge_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable axi_bridge clock\n"); + goto err_clk_axi_bridge; + } + + if (res->rchng_clk) { + ret = clk_prepare_enable(res->rchng_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable rchng_clk clock\n"); + goto err_clk_rchng; + } + + ret = clk_set_rate(res->rchng_clk, RCHNG_CLK_RATE); + if (ret) { + dev_err(dev, "rchng_clk rate set failed (%d)\n", + ret); + goto err_clk_rchng; + } + } + } + + + udelay(1); + + return 0; + +err_clk_rchng: + clk_disable_unprepare(res->axi_bridge_clk); +err_clk_axi_bridge: + clk_disable_unprepare(res->aux_clk); +err_clk_aux: + clk_disable_unprepare(res->ahb_clk); +err_clk_ahb: + clk_disable_unprepare(res->axi_s_clk); +err_clk_axi_s: + clk_disable_unprepare(res->axi_m_clk); +err_clk_axi_m: + clk_disable_unprepare(res->sys_noc_clk); +err_clk_sys_noc: + regulator_disable(res->vdda_phy); +err_vdda_phy: + regulator_disable(res->vdda_refclk); +err_refclk: + regulator_disable(res->vdda); + return ret; +} + + +static int qcom_pcie_init_v3(struct qcom_pcie *pcie) +{ + int ret, i; + + qcom_pcie_v3_reset(pcie); + if (!pcie->is_emulation) + qcom_ep_reset_assert(pcie); + + ret = qcom_pcie_enable_resources_v3(pcie); + if (ret) + return ret; + + writel(SLV_ADDR_SPACE_SZ, pcie->parf + PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE); + + ret = phy_power_on(pcie->phy); + if (ret) + return ret; + + writel_masked(pcie->parf + PCIE20_PARF_PHY_CTRL, BIT(0), 0); + + writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); + + if (pcie->is_gen3) { + writel(DEVICE_TYPE_RC, pcie->parf + PCIE_PARF_DEVICE_TYPE); + writel(BYPASS | MSTR_AXI_CLK_EN | AHB_CLK_EN, + pcie->parf + PARF_MHI_CLOCK_RESET_CTRL); + writel(RXEQ_RGRDLESS_RXTS | + GEN3_ZRXDC_NONCOMPL, pcie->dbi + PCIE30_GEN3_RELATED_OFF); + } + + writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS + | SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS | + AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS, + pcie->parf + PCIE20_PARF_SYS_CTRL); + + writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH); + if (pcie->is_gen3) + writel(BUS_MASTER_EN, pcie->dbi + PCIE20_COMMAND_STATUS); + else + writel(CMD_BME_VAL, pcie->dbi + PCIE20_COMMAND_STATUS); + writel(DBI_RO_WR_EN, pcie->dbi + PCIE20_MISC_CONTROL_1_REG); + writel(PCIE_CAP_LINK1_VAL, pcie->dbi + PCIE20_CAP_LINK_1); + + /* Configure PCIe link capabilities for ASPM */ + writel_masked(pcie->dbi + PCIE20_CAP_LINK_CAPABILITIES, + PCIE_ASPM_MASK << PCIE_ASPM_POS, + (pcie->cap_active_state_link_pm & PCIE_ASPM_MASK) << PCIE_ASPM_POS); + + writel(PCIE_CAP_CPL_TIMEOUT_DISABLE, pcie->dbi + + PCIE20_DEVICE_CONTROL2_STATUS2); + + if (pcie->is_gen3 && !pcie->force_gen2) + writel_relaxed(PCIE_CAP_CURR_DEEMPHASIS | SPEED_GEN3, + pcie->dbi + PCIE20_LNK_CONTROL2_LINK_STATUS2); + else if (pcie->force_gen2) + writel_relaxed(PCIE_CAP_CURR_DEEMPHASIS | SPEED_GEN2, + pcie->dbi + PCIE20_LNK_CONTROL2_LINK_STATUS2); + + if (pcie->force_gen1) { + writel_relaxed(((readl_relaxed( + pcie->dbi + PCIE20_LNK_CONTROL2_LINK_STATUS2) + & (~PCIE_CAP_TARGET_LINK_SPEED_MASK)) | SPEED_GEN1), + pcie->dbi + PCIE20_LNK_CONTROL2_LINK_STATUS2); + } + + writel(LTSSM_EN, pcie->parf + PCIE20_PARF_LTSSM); + if (pcie->is_emulation) + qcom_ep_reset_deassert(pcie); + + if (pcie->is_gen3) { + for (i = 0; i < 255; i++) + writel(0x0, pcie->parf + PARF_BDF_TO_SID_TABLE + (4 * i)); + writel( 0x4, pcie->dm_iatu + PCIE_ATU_CR1_OUTBOUND_6_GEN3); + writel( 0x90000000, pcie->dm_iatu + PCIE_ATU_CR2_OUTBOUND_6_GEN3); + writel( 0x0, pcie->dm_iatu + PCIE_ATU_LOWER_BASE_OUTBOUND_6_GEN3); + writel( 0x0, pcie->dm_iatu + PCIE_ATU_UPPER_BASE_OUTBOUND_6_GEN3); + writel( 0x00107FFFF, pcie->dm_iatu + PCIE_ATU_LIMIT_OUTBOUND_6_GEN3); + writel( 0x0, pcie->dm_iatu + PCIE_ATU_LOWER_TARGET_OUTBOUND_6_GEN3); + writel( 0x0, pcie->dm_iatu + PCIE_ATU_UPPER_TARGET_OUTBOUND_6_GEN3); + writel( 0x5, pcie->dm_iatu + PCIE_ATU_CR1_OUTBOUND_7_GEN3); + writel( 0x90000000, pcie->dm_iatu + PCIE_ATU_CR2_OUTBOUND_7_GEN3); + writel( 0x200000, pcie->dm_iatu + PCIE_ATU_LOWER_BASE_OUTBOUND_7_GEN3); + writel( 0x0, pcie->dm_iatu+ PCIE_ATU_UPPER_BASE_OUTBOUND_7_GEN3); + writel( 0x7FFFFF, pcie->dm_iatu + PCIE_ATU_LIMIT_OUTBOUND_7_GEN3); + writel( 0x0, pcie->dm_iatu + PCIE_ATU_LOWER_TARGET_OUTBOUND_7_GEN3); + writel( 0x0, pcie->dm_iatu + PCIE_ATU_UPPER_TARGET_OUTBOUND_7_GEN3); + } + + phy_power_off(pcie->phy); + return 0; +} + +static int qcom_pcie_link_up(struct pcie_port *pp) +{ + struct qcom_pcie *pcie = to_qcom_pcie(pp); + u32 val; + + val = readl_relaxed(pcie->elbi + PCIE20_ELBI_SYS_STTS); + if (val & XMLH_LINK_UP) + return 1; + return 0; +} + +static int qcom_pcie_host_init(struct pcie_port *pp) +{ + struct qcom_pcie *pcie = to_qcom_pcie(pp); + int ret; + + if (gpiod_get_value(mdm2ap_e911)) + return -EBUSY; + + if (!pcie->is_emulation) + qcom_ep_reset_assert(pcie); + + ret = pcie->ops->init(pcie); + if (ret) + goto err_deinit; + + ret = phy_power_on(pcie->phy); + if (ret) + goto err_deinit; + + if (IS_ENABLED(CONFIG_PCI_MSI)) { + if (!pp->msi_gicm_addr) + dw_pcie_msi_init(pp); + } + + if (!pcie->is_emulation) + qcom_ep_reset_deassert(pcie); + + ret = qcom_pcie_establish_link(pcie); + if (ret) + goto err; + + return 0; + +err: + if (pcie->compliance == 1) + return 0; + + if (!pcie->is_emulation) + qcom_ep_reset_assert(pcie); + + phy_power_off(pcie->phy); + +err_deinit: + if (pcie->compliance == 1) + return 0; + + pcie->ops->deinit(pcie); + return ret; +} + +static int qcom_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, + u32 *val) +{ + /* the device class is not reported correctly from the register */ + if (where == PCI_CLASS_REVISION && size == 4) { + *val = readl(pp->dbi_base + PCI_CLASS_REVISION); + *val &= 0xff; /* keep revision id */ + *val |= PCI_CLASS_BRIDGE_PCI << 16; + return PCIBIOS_SUCCESSFUL; + } + + return dw_pcie_cfg_read(pp->dbi_base + where, size, val); +} + +static struct pcie_host_ops qcom_pcie_dw_ops = { + .link_up = qcom_pcie_link_up, + .host_init = qcom_pcie_host_init, + .rd_own_conf = qcom_pcie_rd_own_conf, +}; + +static const struct qcom_pcie_ops ops_v0 = { + .get_resources = qcom_pcie_get_resources_v0, + .init = qcom_pcie_init_v0, + .deinit = qcom_pcie_deinit_v0, +}; + +static const struct qcom_pcie_ops ops_v1 = { + .get_resources = qcom_pcie_get_resources_v1, + .init = qcom_pcie_init_v1, + .deinit = qcom_pcie_deinit_v1, +}; + +static const struct qcom_pcie_ops ops_v2 = { + .get_resources = qcom_pcie_get_resources_v2, + .init = qcom_pcie_init_v2, + .deinit = qcom_pcie_deinit_v2, +}; + +static const struct qcom_pcie_ops ops_v3 = { + .get_resources = qcom_pcie_get_resources_v3, + .init = qcom_pcie_init_v3, + .deinit = qcom_pcie_deinit_v3, +}; + +static void qcom_slot_remove(int val) +{ + + struct pcie_port *pp; + pci_lock_rescan_remove(); + + if ((val >= 0) && (val < MAX_RC_NUM)) { + if (qcom_pcie_dev[val]) { + if (!qcom_pcie_dev[val]->enumerated) { + pr_notice("\nPCIe: RC%d already removed", val); + } else { + pr_notice("---> Removing %d", val); + pp = &qcom_pcie_dev[val]->pp; + pci_stop_root_bus(pp->pci_bus); + pci_remove_root_bus(pp->pci_bus); + if (!qcom_pcie_dev[val]->is_emulation) + qcom_ep_reset_assert(qcom_pcie_dev[val]); + phy_power_off(qcom_pcie_dev[val]->phy); + qcom_pcie_dev[val]->ops->deinit(qcom_pcie_dev[val]); + pp->pci_bus = NULL; + qcom_pcie_dev[val]->enumerated = false; + pr_notice(" ... done<---\n"); + } + } + } + pci_unlock_rescan_remove(); +} + +static void qcom_slot_rescan(int val) +{ + + int ret; + struct pcie_port *pp; + pci_lock_rescan_remove(); + + if ((val >= 0) && (val < MAX_RC_NUM)) { + if (qcom_pcie_dev[val]) { + if (qcom_pcie_dev[val]->enumerated) { + pr_notice("PCIe: RC%d already enumerated", val); + } else { + pp = &qcom_pcie_dev[val]->pp; + ret = dw_pcie_host_init_pm(pp); + if (!ret) + qcom_pcie_dev[val]->enumerated = true; + } + } + } + pci_unlock_rescan_remove(); + +} + +int qcom_pcie_rescan(void) +{ + int i, ret; + struct pcie_port *pp; + + for (i = 0; i < MAX_RC_NUM; i++) { + /* reset and enumerate the pcie devices */ + if (qcom_pcie_dev[i]) { + pr_notice("---> Initializing %d\n", i); + if (qcom_pcie_dev[i]->enumerated) + continue; + + pp = &qcom_pcie_dev[i]->pp; + ret = dw_pcie_host_init_pm(pp); + if (!ret) + qcom_pcie_dev[i]->enumerated = true; + pr_notice(" ... done<---\n"); + } + } + return 0; +} + +void qcom_pcie_remove_bus(void) +{ + int i; + + for (i = 0; i < MAX_RC_NUM; i++) { + if (qcom_pcie_dev[i]) { + struct pcie_port *pp; + struct qcom_pcie *pcie; + + pr_notice("---> Removing %d\n", i); + + pcie = qcom_pcie_dev[i]; + if (!pcie->enumerated) + continue; + + pp = &qcom_pcie_dev[i]->pp; + pci_stop_root_bus(pp->pci_bus); + pci_remove_root_bus(pp->pci_bus); + + if (!pcie->is_emulation) + qcom_ep_reset_assert(pcie); + phy_power_off(pcie->phy); + + qcom_pcie_dev[i]->ops->deinit(qcom_pcie_dev[i]); + pp->pci_bus = NULL; + pcie->enumerated = false; + pr_notice(" ... done<---\n"); + } + } +} + +static void handle_e911_func(struct work_struct *work) +{ + + int slot_id; + struct qcom_pcie *pcie = container_of(work, struct qcom_pcie, + handle_e911_work); + slot_id = pcie->slot_id; + + if (gpiod_get_value(mdm2ap_e911)) + qcom_slot_remove(slot_id); + else + qcom_slot_rescan(slot_id); +} + +static irqreturn_t handle_mdm2ap_e911_irq(int irq, void *data) +{ + struct qcom_pcie *pcie = data; + + schedule_work(&pcie->handle_e911_work); + + return IRQ_HANDLED; +} + +static ssize_t qcom_bus_rescan_store(struct bus_type *bus, const char *buf, + size_t count) +{ + unsigned long val; + + if (kstrtoul(buf, 0, &val) < 0) + return -EINVAL; + + if (gpiod_get_value(mdm2ap_e911)) + return -EBUSY; + + if (val) { + pci_lock_rescan_remove(); + qcom_pcie_rescan(); + pci_unlock_rescan_remove(); + } + return count; +} +static BUS_ATTR(rcrescan, (S_IWUSR|S_IWGRP), NULL, qcom_bus_rescan_store); + +static ssize_t qcom_bus_remove_store(struct bus_type *bus, const char *buf, + size_t count) +{ + unsigned long val; + + if (kstrtoul(buf, 0, &val) < 0) + return -EINVAL; + + if (val) { + pci_lock_rescan_remove(); + qcom_pcie_remove_bus(); + pci_unlock_rescan_remove(); + } + return count; +} +static BUS_ATTR(rcremove, (S_IWUSR|S_IWGRP), NULL, qcom_bus_remove_store); + +static ssize_t qcom_slot_rescan_store(struct bus_type *bus, const char *buf, + size_t count) +{ + unsigned long val; + + if (kstrtoul(buf, 0, &val) < 0) + return -EINVAL; + + qcom_slot_rescan(val); + + return count; +} +static BUS_ATTR(slot_rescan, (S_IWUSR|S_IWGRP), NULL, qcom_slot_rescan_store); + +static ssize_t qcom_slot_remove_store(struct bus_type *bus, const char *buf, + size_t count) +{ + unsigned long val; + + if (kstrtoul(buf, 0, &val) < 0) + return -EINVAL; + + qcom_slot_remove(val); + + return count; +} +static BUS_ATTR(slot_remove, (S_IWUSR|S_IWGRP), NULL, qcom_slot_remove_store); + +int qcom_pcie_register_event(struct qcom_pcie_register_event *reg) +{ + int ret = 0; + struct pci_dev *pci_dev; + struct pcie_port *pp; + struct qcom_pcie *qcom_pcie; + + if (!reg) { + pr_err("PCIe: Event registration is NULL\n"); + return -ENODEV; + } + + if (!reg->user) { + pr_err("PCIe: User of event registration is NULL\n"); + return -ENODEV; + } + pci_dev = (struct pci_dev *)reg->user; + pp = pci_dev->bus->sysdata; + qcom_pcie = to_qcom_pcie(pp); + + if (qcom_pcie) { + qcom_pcie->event_reg = reg; + pr_info("Event 0x%x is registered for RC %d\n", reg->events, + qcom_pcie->rc_idx); + } else { + pr_err("PCIe: did not find RC for pci endpoint device %p\n", + reg->user); + ret = -ENODEV; + } + + return ret; +} +EXPORT_SYMBOL(qcom_pcie_register_event); + +int qcom_pcie_deregister_event(struct qcom_pcie_register_event *reg) +{ + int ret = 0; + struct pci_dev *pci_dev; + struct pcie_port *pp; + struct qcom_pcie *qcom_pcie; + + if (!reg) { + pr_err("PCIe: Event deregistration is NULL\n"); + return -ENODEV; + } + + if (!reg->user) { + pr_err("PCIe: User of event deregistration is NULL\n"); + return -ENODEV; + } + pci_dev = (struct pci_dev *)reg->user; + pp = pci_dev->bus->sysdata; + qcom_pcie = to_qcom_pcie(pp); + + if (qcom_pcie) { + qcom_pcie->event_reg = NULL; + pr_info("Event is deregistered for RC %d\n", + qcom_pcie->rc_idx); + } else { + pr_err("PCIe: did not find RC for pci endpoint device %p\n", + reg->user); + ret = -ENODEV; + } + + return ret; +} +EXPORT_SYMBOL(qcom_pcie_deregister_event); + +static int pci_reboot_handler(struct notifier_block *this, + unsigned long event, void *ptr) +{ + pci_lock_rescan_remove(); + qcom_pcie_remove_bus(); + pci_unlock_rescan_remove(); + + return 0; +} + +static int qcom_pcie_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *res; + struct qcom_pcie *pcie; + struct pcie_port *pp; + int ret; + uint32_t force_gen1 = 0; + uint32_t force_gen2 = 0; + struct device_node *np = pdev->dev.of_node; + u32 is_emulation = 0; + u32 use_delay = 0; + u32 link_retries_count = 0; + u32 slot_id = -1; + u32 compliance = 0; + static int rc_idx; + int i; + char irq_name[20]; + u32 soc_version_major; + int index = 0; + + pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); + if (!pcie) + return -ENOMEM; + + pcie->ops = (struct qcom_pcie_ops *)of_device_get_match_data(dev); + pcie->dev = dev; + + of_property_read_u32(np, "compliance", &compliance); + pcie->compliance = compliance; + + of_property_read_u32(np, "is_emulation", &is_emulation); + pcie->is_emulation = is_emulation; + + of_property_read_u32(np, "use_delay", &use_delay); + pcie->use_delay = use_delay; + + of_property_read_u32(np, "link_retries_count", &link_retries_count); + pcie->link_retries_count = link_retries_count; + + of_property_read_u32(np, "slot_id", &slot_id); + pcie->slot_id = slot_id; + + of_property_read_u32(np, "pcie-cap-active-state-link-pm", + &pcie->cap_active_state_link_pm); + + pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH); + if (IS_ERR(pcie->reset)) + return PTR_ERR(pcie->reset); + + of_property_read_u32(np, "force_gen1", &force_gen1); + pcie->force_gen1 = force_gen1; + + of_property_read_u32(np, "force_gen2", &force_gen2); + pcie->force_gen2 = force_gen2; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); + pcie->dbi = devm_ioremap_resource(dev, res); + if (IS_ERR(pcie->dbi)) + return PTR_ERR(pcie->dbi); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi"); + pcie->elbi = devm_ioremap_resource(dev, res); + if (IS_ERR(pcie->elbi)) + return PTR_ERR(pcie->elbi); + + pcie->is_gen3 = 0; + if (of_device_is_compatible(pdev->dev.of_node, "qcom,pcie-ipq807x")) { + soc_version_major = read_ipq_soc_version_major(); + BUG_ON(soc_version_major <= 0); + index = of_property_match_string(dev->of_node, "phy-names", + "pciephy"); + if (index < 0) { + if (soc_version_major == 1) { + pcie->phy = devm_phy_optional_get(dev, "pciephy-gen2"); + if (IS_ERR(pcie->phy)) + return PTR_ERR(pcie->phy); + pcie->is_gen3 = 0; + } else if (soc_version_major == 2) { + pcie->phy = devm_phy_optional_get(dev, "pciephy-gen3"); + if (IS_ERR(pcie->phy)) + return PTR_ERR(pcie->phy); + pcie->is_gen3 = 1; + } else { + dev_err(dev, "missing phy-names\n"); + return index; + } + } else { + pcie->phy = devm_phy_optional_get(dev, "pciephy"); + if (IS_ERR(pcie->phy)) + return PTR_ERR(pcie->phy); + pcie->is_gen3 = 0; + } + } else if (of_device_is_compatible(pdev->dev.of_node, "qcom,pcie-ipq6018")) { + if (!pcie->is_emulation) { + pcie->phy = devm_phy_optional_get(dev, "pciephy"); + if (IS_ERR(pcie->phy)) + return PTR_ERR(pcie->phy); + } + pcie->is_gen3 = 1; + } else if (of_device_is_compatible(pdev->dev.of_node, + "qcom,pcie-ipq5018")) { + if (!pcie->is_emulation) { + pcie->phy = devm_phy_optional_get(dev, "pciephy"); + if (IS_ERR(pcie->phy)) + return PTR_ERR(pcie->phy); + } + pcie->is_gen3 = 1; + } + + if (pcie->is_gen3) { + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dm_iatu"); + pcie->dm_iatu = devm_ioremap_resource(dev, res); + if (IS_ERR(pcie->dm_iatu)) + return PTR_ERR(pcie->dm_iatu); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "parf"); + if (res) + res->end += PCIE_V2_PARF_SIZE; + pcie->parf = devm_ioremap_resource(dev, res); + if (IS_ERR(pcie->parf)) + return PTR_ERR(pcie->parf); + } else { + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "parf"); + pcie->parf = devm_ioremap_resource(dev, res); + if (IS_ERR(pcie->parf)) + return PTR_ERR(pcie->parf); + } + + ret = pcie->ops->get_resources(pcie); + if (ret) + return ret; + + pp = &pcie->pp; + pp->dev = dev; + pp->dbi_base = pcie->dbi; + pp->dm_iatu = pcie->dm_iatu; + pp->is_gen3 = pcie->is_gen3; + pp->use_delay = pcie->use_delay; + pp->link_retries_count = pcie->link_retries_count; + pp->root_bus_nr = -1; + pp->ops = &qcom_pcie_dw_ops; + + pcie->mdm2ap_e911_irq = platform_get_irq_byname(pdev, + "mdm2ap_e911"); + if (pcie->mdm2ap_e911_irq >= 0) { + mdm2ap_e911 = devm_gpiod_get_optional(&pdev->dev, "e911", + GPIOD_IN); + + if (IS_ERR(mdm2ap_e911)) { + pr_err("requesting for e911 gpio failed %ld\n", + PTR_ERR(mdm2ap_e911)); + return PTR_ERR(mdm2ap_e911); + } + + INIT_WORK(&pcie->handle_e911_work, handle_e911_func); + + ret = devm_request_irq(&pdev->dev, pcie->mdm2ap_e911_irq, + handle_mdm2ap_e911_irq, + IRQ_TYPE_EDGE_BOTH, "mdm2ap_e911", + pcie); + + if (ret) { + dev_err(&pdev->dev, "Unable to request mdm2ap_e911 irq\n"); + return ret; + } + + pcie->pci_reboot_notifier.notifier_call = pci_reboot_handler; + ret = register_reboot_notifier(&pcie->pci_reboot_notifier); + if (ret) { + pr_warn("%s: Failed to register notifier (%d)\n", + __func__, ret); + return ret; + } + } + + pcie->link_down_irq = platform_get_irq_byname(pdev, + "int_link_down"); + if (pcie->link_down_irq >= 0) { + ret = devm_request_irq(&pdev->dev, pcie->link_down_irq, + handle_link_down_irq, + IRQF_TRIGGER_RISING, "pci_link_down", + pcie); + } + + pcie->link_up_irq = platform_get_irq_byname(pdev, "int_link_up"); + if (pcie->link_up_irq >= 0) { + ret = devm_request_irq(&pdev->dev, pcie->link_up_irq, + handle_link_up_irq, + IRQF_TRIGGER_RISING, "pci_link_up", + pcie); + } + + pcie->global_irq = platform_get_irq_byname(pdev, "global"); + if (pcie->global_irq >= 0) { + ret = devm_request_irq(&pdev->dev, pcie->global_irq, + qcom_pcie_global_irq_handler, + IRQF_TRIGGER_RISING, "qcom-pcie-global", pcie); + if (ret) { + dev_err(&pdev->dev, "cannot request global irq\n"); + return ret; + } + } + + pp->msi_gicm_addr = 0; + pp->msi_gicm_base = 0; + of_property_read_u32(np, "qcom,msi-gicm-addr", &pp->msi_gicm_addr); + of_property_read_u32(np, "qcom,msi-gicm-base", &pp->msi_gicm_base); + + if (IS_ENABLED(CONFIG_PCI_MSI)) { + pp->msi_irq = platform_get_irq_byname(pdev, "msi"); + if (pp->msi_irq < 0) + return pp->msi_irq; + + ret = devm_request_irq(dev, pp->msi_irq, + qcom_pcie_msi_irq_handler, + IRQF_SHARED, "qcom-pcie-msi", pp); + if (ret) { + dev_err(dev, "cannot request msi irq\n"); + return ret; + } + for (i = 0; i < MAX_MSI_IRQS; i++) { + snprintf(irq_name, sizeof(irq_name), "msi_%d", i); + pp->msi[i] = platform_get_irq_byname(pdev, irq_name); + if (pp->msi[i] < 0) + break; + } + } + + ret = phy_init(pcie->phy); + if (ret) + return ret; + + pcie->wake_irq = platform_get_irq_byname(pdev, "wake_gpio"); + + ret = dw_pcie_host_init(pp); + + if (ret) { + if (pcie->wake_irq < 0) { + dev_err(dev, "cannot initialize host\n"); + return ret; + } + pr_info("PCIe: RC%d is not enabled during bootup;it will be enumerated upon client request\n", + rc_idx); + } else { + pcie->enumerated = true; + pr_info("PCIe: RC%d enabled during bootup\n", rc_idx); + } + + if (pcie->wake_irq >= 0) { + INIT_WORK(&pcie->handle_wake_work, handle_wake_func); + + ret = devm_request_irq(&pdev->dev, pcie->wake_irq, + qcom_pcie_wake_irq_handler, + IRQF_TRIGGER_FALLING, "qcom-pcie-wake", pcie); + if (ret) { + dev_err(&pdev->dev, "Unable to request wake irq\n"); + return ret; + } + } + + /* + ####ipq-4019, add pcie_wake control + static irqreturn_t quectel_pcie_wake_irq(int irq, void *data) + { + return IRQ_WAKE_THREAD; + } + + static irqreturn_t quectel_pcie_wake_thread_irq(int irq, void *data) + { + struct qcom_pcie *pcie = data; + int val = gpiod_get_value(pcie->quectel_pwake); + struct dw_pcie *pci = ep->pci; + + pr_info("pwake val: %d\n", val); + if (val){ + //up: rescan + pci_lock_rescan_remove(); + qcom_pcie_rescan(); + pci_unlock_rescan_remove(); + }else{ + //down: remove + pci_lock_rescan_remove(); + qcom_pcie_remove_bus(); + pci_unlock_rescan_remove(); + } + return IRQ_HANDLED; + } + + pcie->quectel_pwake = devm_gpiod_get_index(dev, "pcie-wake", 0, GPIOD_IN); + if (IS_ERR(pcie->quectel_pwake)) { + dev_err(&pdev->dev, "Please set pcie-wake gpio in DTS\n"); + return PTR_ERR(pcie->quectel_pwake); + } + + pcie->quectel_pwake_irq = gpiod_to_irq(pcie->quectel_pwake); + + ret = devm_request_threaded_irq(&pdev->dev, pcie->quectel_pwake_irq, + quectel_pcie_wake_irq, + quectel_pcie_wake_thread_irq, + IRQF_TRIGGER_RISING | + IRQF_TRIGGER_FALLING | IRQF_NO_SUSPEND, + "quectel-pcie-wake", pcie); + + enable_irq_wake(pcie->quectel_pcie_wake); + */ + + platform_set_drvdata(pdev, pcie); + + if (!rc_idx) { + ret = bus_create_file(&pci_bus_type, &bus_attr_rcrescan); + if (ret != 0) { + dev_err(&pdev->dev, + "Failed to create sysfs rcrescan file\n"); + return ret; + } + + ret = bus_create_file(&pci_bus_type, &bus_attr_rcremove); + if (ret != 0) { + dev_err(&pdev->dev, + "Failed to create sysfs rcremove file\n"); + return ret; + } + } + + /* create sysfs files to support slot rescan and remove*/ + if (!rc_idx) { + ret = bus_create_file(&pci_bus_type, &bus_attr_slot_rescan); + if (ret != 0) { + dev_err(&pdev->dev, + "Failed to create sysfs rcrescan file\n"); + return ret; + } + + ret = bus_create_file(&pci_bus_type, &bus_attr_slot_remove); + if (ret != 0) { + dev_err(&pdev->dev, + "Failed to create sysfs rcremove file\n"); + return ret; + } + } + + pcie->rc_idx = rc_idx; + qcom_pcie_dev[rc_idx++] = pcie; + + return 0; +} + +static int qcom_pcie_remove(struct platform_device *pdev) +{ + struct qcom_pcie *pcie = platform_get_drvdata(pdev); + + if (!pcie->is_emulation) + qcom_ep_reset_assert(pcie); + + phy_power_off(pcie->phy); + phy_exit(pcie->phy); + pcie->ops->deinit(pcie); + + return 0; +} + +static void qcom_pcie_fixup_final(struct pci_dev *dev) +{ + int cap, err; + u16 ctl, reg_val; + + cap = pci_pcie_cap(dev); + if (!cap) + return; + + err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl); + + if (err) + return; + + reg_val = ctl; + + if (((reg_val & PCIE20_MRRS_MASK) >> 12) > 1) + reg_val = (reg_val & ~(PCIE20_MRRS_MASK)) | PCIE20_MRRS(0x1); + + if (((ctl & PCIE20_MPS_MASK) >> 5) > 1) + reg_val = (reg_val & ~(PCIE20_MPS_MASK)) | PCIE20_MPS(0x1); + + err = pci_write_config_word(dev, cap + PCI_EXP_DEVCTL, reg_val); + + if (err) + pr_err("pcie config write failed %d\n", err); +} +DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, qcom_pcie_fixup_final); + +static const struct of_device_id qcom_pcie_match[] = { + { .compatible = "qcom,pcie-ipq8064", .data = &ops_v0 }, + { .compatible = "qcom,pcie-apq8064", .data = &ops_v0 }, + { .compatible = "qcom,pcie-apq8084", .data = &ops_v1 }, + { .compatible = "qcom,pcie-ipq4019", .data = &ops_v2 }, + { .compatible = "qcom,pcie-ipq807x", .data = &ops_v3 }, + { .compatible = "qcom,pcie-ipq6018", .data = &ops_v3 }, + { .compatible = "qcom,pcie-ipq5018", .data = &ops_v3 }, + { } +}; +MODULE_DEVICE_TABLE(of, qcom_pcie_match); + +static struct platform_driver qcom_pcie_driver = { + .probe = qcom_pcie_probe, + .remove = qcom_pcie_remove, + .driver = { + .name = "qcom-pcie", + .of_match_table = qcom_pcie_match, + }, +}; + +module_platform_driver(qcom_pcie_driver); + +MODULE_AUTHOR("Stanimir Varbanov "); +MODULE_DESCRIPTION("Qualcomm PCIe root complex driver"); +MODULE_LICENSE("GPL v2"); diff --git a/package/wwan/driver/quectel_SRPD_PCIE/src/sipa/Kconfig b/package/wwan/driver/quectel_SRPD_PCIE/src/sipa/Kconfig new file mode 100644 index 000000000..f6480336a --- /dev/null +++ b/package/wwan/driver/quectel_SRPD_PCIE/src/sipa/Kconfig @@ -0,0 +1,9 @@ +menu "SIPA modules" + +config SPRD_SIPA + bool "sipa ipa" + default n + help + sipa is a module for spreadtrum ip packet accelator driver. + +endmenu diff --git a/package/wwan/driver/quectel_SRPD_PCIE/src/sipa/Makefile b/package/wwan/driver/quectel_SRPD_PCIE/src/sipa/Makefile new file mode 100644 index 000000000..1d5101bf8 --- /dev/null +++ b/package/wwan/driver/quectel_SRPD_PCIE/src/sipa/Makefile @@ -0,0 +1,6 @@ +EXTRA_CFLAGS += -Wno-error -Wno-packed-bitfield-compat +ccflags-y += -DCONFIG_SPRD_SIPA +obj-y += sipa_core.o sipa_skb_send.o sipa_skb_recv.o \ + sipa_nic.o sipa_debugfs.o sipa_dele_cmn.o \ + sipa_eth.o sipa_dummy.o +obj-y += sipa_phy_v0/ diff --git a/package/wwan/driver/quectel_SRPD_PCIE/src/sipa/sipa_core.c b/package/wwan/driver/quectel_SRPD_PCIE/src/sipa/sipa_core.c new file mode 100644 index 000000000..bcde7e519 --- /dev/null +++ b/package/wwan/driver/quectel_SRPD_PCIE/src/sipa/sipa_core.c @@ -0,0 +1,333 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note. + * + * UNISOC 'virt sipa' driver + * + * Qingsheng.Li + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License v2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../include/sipa.h" +#include "../include/sprd_pcie_ep_device.h" +#include "../include/sipc.h" +#include "sipa_core.h" + +#define DRV_NAME "virt_sipa" + +struct sipa_core *sipa_ctrl; + +struct sipa_core *sipa_get_ctrl_pointer(void) +{ + return sipa_ctrl; +} +EXPORT_SYMBOL(sipa_get_ctrl_pointer); + +static void sipa_notify_sender_flow_ctrl(struct work_struct *work) +{ + struct sipa_core *sipa_ctrl = container_of(work, struct sipa_core, + flow_ctrl_work); + + if (sipa_ctrl->sender && sipa_ctrl->sender->free_notify_net) + wake_up(&sipa_ctrl->sender->free_waitq); +} + +static int sipa_init_cmn_fifo(struct sipa_core *ipa, + enum sipa_cmn_fifo_index id) +{ + size_t size; + dma_addr_t dma_addr; + struct sipa_cmn_fifo_cfg_tag *cmn_fifo; + + cmn_fifo = &ipa->cmn_fifo_cfg[id]; + cmn_fifo->fifo_id = id; + cmn_fifo->dst = SIPA_TERM_VCP; + cmn_fifo->cur = SIPA_TERM_PCIE0; + size = cmn_fifo->tx_fifo.depth * + sizeof(struct sipa_node_description_tag); + cmn_fifo->tx_fifo.virtual_addr = dma_alloc_coherent(ipa->pci_dev, size, + &dma_addr, + GFP_KERNEL); + if (!cmn_fifo->tx_fifo.virtual_addr) + return -ENOMEM; + cmn_fifo->tx_fifo.dma_ptr = dma_addr; + memset(cmn_fifo->tx_fifo.virtual_addr, 0, size); + pr_info("comfifo%d tx_fifo addr-0x%lx\n", id, (long unsigned int)cmn_fifo->tx_fifo.virtual_addr); + + cmn_fifo->tx_fifo.fifo_base_addr_l = lower_32_bits(dma_addr); + cmn_fifo->tx_fifo.fifo_base_addr_h = 0x2; + + size = cmn_fifo->rx_fifo.depth * + sizeof(struct sipa_node_description_tag); + cmn_fifo->rx_fifo.virtual_addr = dma_alloc_coherent(ipa->pci_dev, size, + &dma_addr, + GFP_KERNEL); + if (!cmn_fifo->rx_fifo.virtual_addr) + return -ENOMEM; + cmn_fifo->rx_fifo.dma_ptr = dma_addr; + memset(cmn_fifo->rx_fifo.virtual_addr, 0, size); + pr_info("comfifo%d rx_fifo addr-0x%lx\n", id, (long unsigned int)cmn_fifo->rx_fifo.virtual_addr); + + cmn_fifo->rx_fifo.fifo_base_addr_l = lower_32_bits(dma_addr); + cmn_fifo->rx_fifo.fifo_base_addr_h = 0x2; + + return 0; +} + +static void sipa_free_cmn_fifo(struct sipa_core *ipa, enum sipa_cmn_fifo_index id) +{ + size_t size; + struct sipa_cmn_fifo_cfg_tag *cmn_fifo; + + cmn_fifo = &ipa->cmn_fifo_cfg[id]; + size = cmn_fifo->tx_fifo.depth * sizeof(struct sipa_node_description_tag); + dma_free_coherent(ipa->dev, size, cmn_fifo->tx_fifo.virtual_addr, cmn_fifo->tx_fifo.dma_ptr); + size = cmn_fifo->rx_fifo.depth * sizeof(struct sipa_node_description_tag); + dma_free_coherent(ipa->dev, size, cmn_fifo->rx_fifo.virtual_addr, cmn_fifo->rx_fifo.dma_ptr); +} + +static void sipa_init_ep(struct sipa_core *ipa) +{ + struct sipa_endpoint *ep = &ipa->ep; + + ep->send_fifo = &ipa->cmn_fifo_cfg[SIPA_FIFO_PCIE_UL]; + ep->recv_fifo = &ipa->cmn_fifo_cfg[SIPA_FIFO_PCIE_DL]; +} + +#ifdef SPRD_PCIE_USE_DTS +static int sipa_parse_dts_configuration(struct platform_device *pdev, + struct sipa_core *ipa) +{ + int ret; + struct sipa_cmn_fifo_cfg_tag *cmn_fifo; + + ipa->reg_res = platform_get_resource_byname(pdev, + IORESOURCE_MEM, "ipa-base"); + if (!ipa->reg_res) { + dev_err(&pdev->dev, "get ipa-base res fail\n"); + return -EINVAL; + } + + cmn_fifo = &ipa->cmn_fifo_cfg[SIPA_FIFO_PCIE_DL]; + ret = of_property_read_u32(pdev->dev.of_node, "pcie-dl-tx-fifo-depth", + &cmn_fifo->tx_fifo.depth); + if (ret) { + dev_err(&pdev->dev, + "get pcie-dl-tx-fifo-depth ret = %d\n", ret); + return ret; + } + + ret = of_property_read_u32(pdev->dev.of_node, "pcie-dl-rx-fifo-depth", + &cmn_fifo->rx_fifo.depth); + if (ret) { + dev_err(&pdev->dev, + "get pcie-dl-rx-fifo-depth ret = %d\n", ret); + return ret; + } + + cmn_fifo = &ipa->cmn_fifo_cfg[SIPA_FIFO_PCIE_UL]; + ret = of_property_read_u32(pdev->dev.of_node, "pcie-ul-tx-fifo-depth", + &cmn_fifo->tx_fifo.depth); + if (ret) { + dev_err(&pdev->dev, + "get pcie-ul-tx-fifo-depth ret = %d\n", ret); + return ret; + } + + ret = of_property_read_u32(pdev->dev.of_node, "pcie-ul-rx-fifo-depth", + &cmn_fifo->rx_fifo.depth); + if (ret) { + dev_err(&pdev->dev, + "get pcie-ul-rx-fifo-depth ret = %d\n", ret); + return ret; + } + + return 0; +} +#else +static struct resource ipa_res = { + .start = 0x2e000000, + .end = 0x2e000000 + 0x2000 -1, + .flags = IORESOURCE_MEM, +}; + +static int sipa_parse_dts_configuration(struct platform_device *pdev, + struct sipa_core *ipa) +{ + struct sipa_cmn_fifo_cfg_tag *cmn_fifo; + ipa->reg_res = &ipa_res; + cmn_fifo = &ipa->cmn_fifo_cfg[SIPA_FIFO_PCIE_DL]; + cmn_fifo->tx_fifo.depth = 4096; + cmn_fifo->rx_fifo.depth = 4096; + cmn_fifo = &ipa->cmn_fifo_cfg[SIPA_FIFO_PCIE_UL]; + cmn_fifo->tx_fifo.depth = 4096; + cmn_fifo->rx_fifo.depth = 4096; + return 0; +} +#endif + +static int sipa_plat_drv_probe(struct platform_device *pdev) +{ + int ret; + struct sipa_core *ipa; + struct device *dev = &pdev->dev; + struct device *pci_dev; + + pci_dev = (struct device *)dev_get_drvdata(dev); + if(!pci_dev) + return -1; + + ipa = devm_kzalloc(dev, sizeof(*ipa), GFP_KERNEL); + if (!ipa) + return -ENOMEM; + + sipa_ctrl = ipa; + ipa->dev = dev; + ipa->pci_dev = pci_dev; + ipa->pcie_mem_offset = SIPA_PCIE_MEM_OFFSET; + dev_set_drvdata(dev, ipa); + ret = sipa_parse_dts_configuration(pdev, ipa); + if (ret) + return ret; + + ret = sipa_init_cmn_fifo(ipa, SIPA_FIFO_PCIE_DL); + if (ret) + return ret; + + ret = sipa_init_cmn_fifo(ipa, SIPA_FIFO_PCIE_UL); + if (ret) + return ret; + + sipa_init_ep(ipa); + + sipa_fifo_ops_init(&ipa->hal_ops); + INIT_WORK(&ipa->flow_ctrl_work, sipa_notify_sender_flow_ctrl); + + create_sipa_skb_receiver(&ipa->ep, &ipa->receiver); + create_sipa_skb_sender(&ipa->ep, &ipa->sender); + device_init_wakeup(dev, true); + + sipa_create_smsg_channel(ipa); + + sprd_ep_dev_register_irq_handler(PCIE_EP_MODEM, PCIE_MSI_IPA, + (irq_handler_t)sipa_int_callback_func, + (void *)ipa); + sipa_init_debugfs(ipa); + + return 0; +} + +extern void destroy_sipa_skb_receiver(struct sipa_skb_receiver *receiver); +extern void destroy_sipa_skb_sender(struct sipa_skb_sender *sender); + +static int sipa_plat_drv_remove(struct platform_device *pdev) +{ + struct sipa_core *ipa; + + ipa = dev_get_drvdata(&pdev->dev); + smsg_ch_close(SIPC_ID_MINIAP, SMSG_CH_COMM_SIPA, 1000); + if(ipa->smsg_thread){ + kthread_stop(ipa->smsg_thread); + ipa->smsg_thread = NULL; + } + destroy_sipa_skb_sender(ipa->sender); + destroy_sipa_skb_receiver(ipa->receiver); + cancel_work_sync(&ipa->flow_ctrl_work); + mdelay(1000); + sipa_free_cmn_fifo(ipa, SIPA_FIFO_PCIE_UL); + sipa_free_cmn_fifo(ipa, SIPA_FIFO_PCIE_DL); + if (!IS_ERR_OR_NULL(ipa->dentry)) + debugfs_remove_recursive(ipa->dentry); + devm_kfree(&pdev->dev, ipa); + platform_set_drvdata(pdev, NULL); + return 0; +} + +#ifdef SPRD_PCIE_USE_DTS +static const struct of_device_id sipa_plat_drv_match[] = { + { .compatible = "sprd,virt-sipa"}, +}; +#endif + +static struct platform_driver sipa_plat_drv = { + .probe = sipa_plat_drv_probe, + .remove = sipa_plat_drv_remove, + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, +#ifdef SPRD_PCIE_USE_DTS + .of_match_table = sipa_plat_drv_match, +#endif + }, +}; + +#ifndef SPRD_PCIE_USE_DTS +static struct platform_device *sipa_plat_dev; +static int sipa_core_platform_device_reigster(struct device *dev) +{ + int retval = -ENOMEM; + + sipa_plat_dev = platform_device_alloc("virt_sipa", -1); + if (!sipa_plat_dev) + return retval; + + sipa_plat_dev->dev.dma_mask = dev->dma_mask; + sipa_plat_dev->dev.coherent_dma_mask = dev->coherent_dma_mask; + sipa_plat_dev->dev.archdata = dev->archdata; + dev_set_drvdata(&sipa_plat_dev->dev, dev); + retval = platform_device_add(sipa_plat_dev); + if (retval < 0) + platform_device_put(sipa_plat_dev); + + return retval; +} +#endif + +int sipa_module_init(struct device *dev) +{ +#ifndef SPRD_PCIE_USE_DTS + sipa_core_platform_device_reigster(dev); +#endif + return platform_driver_register(&sipa_plat_drv); +} +EXPORT_SYMBOL(sipa_module_init); + +void sipa_module_exit(void) +{ + platform_driver_unregister(&sipa_plat_drv); +#ifndef SPRD_PCIE_USE_DTS + platform_device_unregister(sipa_plat_dev); + #endif +} +EXPORT_SYMBOL(sipa_module_exit); diff --git a/package/wwan/driver/quectel_SRPD_PCIE/src/sipa/sipa_core.h b/package/wwan/driver/quectel_SRPD_PCIE/src/sipa/sipa_core.h new file mode 100644 index 000000000..f67853bd1 --- /dev/null +++ b/package/wwan/driver/quectel_SRPD_PCIE/src/sipa/sipa_core.h @@ -0,0 +1,519 @@ +/* + * SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _SIPA_CORE_H_ +#define _SIPA_CORE_H_ + +#include +#include +#include +#include +#include +#include +#include + +enum sipa_cmn_fifo_index { + SIPA_FIFO_PCIE_DL, + SIPA_FIFO_PCIE_UL, + SIPA_FIFO_MAX, +}; + +enum sipa_irq_evt_type { + SIPA_IRQ_TX_FIFO_THRESHOLD_SW = BIT(22), + SIPA_IRQ_EXIT_FLOW_CTRL = BIT(20), + SIPA_IRQ_ENTER_FLOW_CTRL = BIT(19), + SIPA_IRQ_TXFIFO_FULL_INT = BIT(18), + SIPA_IRQ_TXFIFO_OVERFLOW = BIT(17), + SIPA_IRQ_ERRORCODE_IN_TX_FIFO = BIT(16), + SIPA_IRQ_INTR_BIT = BIT(15), + SIPA_IRQ_THRESHOLD = BIT(14), + SIPA_IRQ_DELAY_TIMER = BIT(13), + SIPA_IRQ_DROP_PACKT_OCCUR = BIT(12), + + SIPA_IRQ_ERROR = 0x0, +}; + +#define SIPA_FIFO_THRESHOLD_IRQ_EN BIT(1) +#define SIPA_FIFO_DELAY_TIMER_IRQ_EN BIT(0) + +#define SIPA_PCIE_MEM_OFFSET 0x200000000ULL + +enum sipa_nic_status_e { + NIC_OPEN, + NIC_CLOSE +}; + +#define SIPA_RECV_EVT (SIPA_IRQ_INTR_BIT | SIPA_IRQ_THRESHOLD | \ + SIPA_IRQ_DELAY_TIMER | SIPA_IRQ_TX_FIFO_THRESHOLD_SW) + +#define SIPA_RECV_WARN_EVT (SIPA_IRQ_TXFIFO_FULL_INT | SIPA_IRQ_TXFIFO_OVERFLOW) + +#define SMSG_FLG_DELE_REQUEST 0x1 +#define SMSG_FLG_DELE_RELEASE 0x2 + +typedef void (*sipa_irq_notify_cb)(void *priv, + enum sipa_irq_evt_type evt, + u32 data); + +struct sipa_node_description_tag { + /*soft need to set*/ + u64 address : 40; + /*soft need to set*/ + u32 length : 20; + /*soft need to set*/ + u16 offset : 12; + /*soft need to set*/ + u8 net_id; + /*soft need to set*/ + u8 src : 5; + /*soft need to set*/ + u8 dst : 5; + u8 prio : 3; + u8 bear_id : 7; + /*soft need to set*/ + u8 intr : 1; + /*soft need to set*/ + u8 indx : 1; + u8 err_code : 4; + u32 reserved : 22; +} __attribute__((__packed__)); + +struct sipa_cmn_fifo_params { + u32 tx_intr_delay_us; + u32 tx_intr_threshold; + bool flowctrl_in_tx_full; + u32 flow_ctrl_cfg; + u32 flow_ctrl_irq_mode; + u32 tx_enter_flowctrl_watermark; + u32 tx_leave_flowctrl_watermark; + u32 rx_enter_flowctrl_watermark; + u32 rx_leave_flowctrl_watermark; + + u32 data_ptr_cnt; + u32 buf_size; + dma_addr_t data_ptr; +}; + +struct sipa_skb_dma_addr_node { + struct sk_buff *skb; + u64 dma_addr; + struct list_head list; +}; + +struct sipa_cmn_fifo_tag { + u32 depth; + u32 wr; + u32 rd; + + u32 fifo_base_addr_l; + u32 fifo_base_addr_h; + + void *virtual_addr; + dma_addr_t dma_ptr; +}; + +struct sipa_cmn_fifo_cfg_tag { + const char *fifo_name; + + void *priv; + + enum sipa_cmn_fifo_index fifo_id; + + bool state; + u32 dst; + u32 cur; + + void __iomem *fifo_reg_base; + + struct sipa_cmn_fifo_tag rx_fifo; + struct sipa_cmn_fifo_tag tx_fifo; + + u32 enter_flow_ctrl_cnt; + u32 exit_flow_ctrl_cnt; + + sipa_irq_notify_cb irq_cb; +}; + +struct sipa_endpoint { + /* Centered on CPU/PAM */ + struct sipa_cmn_fifo_cfg_tag *send_fifo; + struct sipa_cmn_fifo_cfg_tag *recv_fifo; + + struct sipa_cmn_fifo_params send_fifo_param; + struct sipa_cmn_fifo_params recv_fifo_param; + + bool inited; + bool connected; + bool suspended; +}; + +struct sipa_nic { + enum sipa_nic_id nic_id; + struct sipa_endpoint *send_ep; + struct sk_buff_head rx_skb_q; + int need_notify; + u32 src_mask; + int netid; + struct list_head list; + sipa_notify_cb cb; + void *cb_priv; + atomic_t status; + bool flow_ctrl_status; + bool continue_notify; + bool rm_flow_ctrl; +}; + +struct sipa_skb_array { + struct sipa_skb_dma_addr_node *array; + u32 rp; + u32 wp; + u32 depth; +}; + +struct sipa_skb_sender { + struct device *dev; + + struct sipa_endpoint *ep; + + atomic_t left_cnt; + /* To be used for add/remove nic device */ + spinlock_t nic_lock; + /* To be used for send skb process */ + spinlock_t send_lock; + spinlock_t exit_lock; + struct list_head nic_list; + struct list_head sending_list; + struct list_head pair_free_list; + struct sipa_skb_dma_addr_node *pair_cache; + + bool free_notify_net; + bool ep_cover_net; + bool send_notify_net; + + wait_queue_head_t free_waitq; + + struct task_struct *free_thread; + struct task_struct *send_thread; + + bool init_flag; + u32 no_mem_cnt; + u32 no_free_cnt; + u32 enter_flow_ctrl_cnt; + u32 exit_flow_ctrl_cnt; + u32 run; +}; + +struct sipa_skb_receiver { + struct sipa_endpoint *ep; + u32 rsvd; + struct sipa_skb_array recv_array; + wait_queue_head_t recv_waitq; + wait_queue_head_t fill_recv_waitq; + spinlock_t lock; + spinlock_t exit_lock; + u32 nic_cnt; + atomic_t need_fill_cnt; + struct sipa_nic *nic_array[SIPA_NIC_MAX]; + + struct task_struct *fill_thread; + + u32 tx_danger_cnt; + u32 rx_danger_cnt; + u32 run; +}; + +struct sipa_fifo_hal_ops { + int (*open)(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base, void *cookie); + int (*close)(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base); + int (*set_rx_depth)(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base, u32 depth); + int (*set_tx_depth)(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base, u32 depth); + u32 (*get_rx_depth)(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base); + int (*hal_set_tx_depth)(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base, + u32 depth); + u32 (*get_tx_depth)(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base); + int (*set_intr_drop_packet)(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base, + u32 enable, sipa_irq_notify_cb cb); + int (*set_intr_error_code)(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base, + u32 enable, sipa_irq_notify_cb cb); + int (*set_intr_timeout)(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base, + u32 enable, u32 time, sipa_irq_notify_cb cb); + int (*set_hw_intr_timeout)(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base, + u32 enable, u32 time, sipa_irq_notify_cb cb); + int (*set_intr_threshold)(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base, + u32 enable, u32 cnt, sipa_irq_notify_cb cb); + int (*set_hw_intr_thres)(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base, + u32 enable, u32 cnt, sipa_irq_notify_cb cb); + int (*set_src_dst_term)(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base, + u32 src, u32 dst); + int (*enable_local_flowctrl_intr)(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag * + cfg_base, u32 enable, u32 irq_mode, + sipa_irq_notify_cb cb); + int (*enable_remote_flowctrl_intr)(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag * + cfg_base, u32 work_mode, + u32 tx_entry_watermark, + u32 tx_exit_watermark, + u32 rx_entry_watermark, + u32 rx_exit_watermark); + int (*set_interrupt_intr)(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base, + u32 enable, sipa_irq_notify_cb cb); + int (*set_intr_txfifo_overflow)(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base, + u32 enable, sipa_irq_notify_cb cb); + int (*set_intr_txfifo_full)(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base, + u32 enable, sipa_irq_notify_cb cb); + int (*put_node_to_rx_fifo)(struct device *dev, + enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base, + struct sipa_node_description_tag *node, + u32 force_intr, u32 num); + u32 (*get_left_cnt)(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base); + u32 (*recv_node_from_tx_fifo)(struct device *dev, + enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base, + u32 num); + void (*get_rx_ptr)(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base, + u32 *wr, u32 *rd); + void (*get_tx_ptr)(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base, + u32 *wr, u32 *rd); + void (*get_filled_depth)(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base, + u32 *rx_filled, u32 *tx_filled); + u32 (*get_tx_full_status)(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base); + u32 (*get_tx_empty_status)(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base); + u32 (*get_rx_full_status)(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base); + u32 (*get_rx_empty_status)(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base); + bool (*set_rx_fifo_wptr)(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base, + u32 wptr); + bool (*set_tx_fifo_wptr)(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base, + u32 wptr); + int (*set_rx_tx_fifo_ptr)(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base, + u32 rx_rd, u32 rx_wr, u32 tx_rd, u32 tx_wr); + int (*ctrl_receive)(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base, + bool stop); + struct sipa_node_description_tag * + (*get_tx_fifo_rp)(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base, + u32 index); + struct sipa_node_description_tag * + (*get_rx_fifo_wr)(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base, + u32 index); + int (*set_tx_fifo_rp)(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base, + u32 tx_rd); + int (*set_rx_fifo_wr)(struct device *dev, enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base, + u32 num); + int (*set_intr_eb)(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base, + bool eb, u32 type); + void (*clr_tout_th_intr)(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base); +}; + +struct sipa_core { + const char *name; + + struct device *dev; + struct device *pci_dev; + struct dentry *dentry; + struct sipa_endpoint ep; + + struct sipa_cmn_fifo_cfg_tag cmn_fifo_cfg[SIPA_FIFO_MAX]; + + struct work_struct flow_ctrl_work; + + /* ipa low power*/ + bool remote_ready; + + struct resource *reg_res; + phys_addr_t reg_mapped; + void __iomem *virt_reg_addr; + /* IPA NIC interface */ + struct sipa_nic *nic[SIPA_NIC_MAX]; + + /* sender & receiver */ + struct sipa_skb_sender *sender; + struct sipa_skb_receiver *receiver; + + atomic_t recv_cnt; + u64 pcie_mem_offset; + + struct sipa_fifo_hal_ops hal_ops; + + struct task_struct *smsg_thread; + + struct dentry *debugfs_root; + const void *debugfs_data; +}; + +void sipa_fifo_ops_init(struct sipa_fifo_hal_ops *ops); +struct sipa_core *sipa_get_ctrl_pointer(void); + +void sipa_receiver_add_nic(struct sipa_skb_receiver *receiver, + struct sipa_nic *nic); +void sipa_receiver_open_cmn_fifo(struct sipa_skb_receiver *receiver); + +void sipa_sender_open_cmn_fifo(struct sipa_skb_sender *sender); +int create_sipa_skb_sender(struct sipa_endpoint *ep, + struct sipa_skb_sender **sender_pp); +void destroy_sipa_skb_sender(struct sipa_skb_sender *sender); +void sipa_skb_sender_add_nic(struct sipa_skb_sender *sender, + struct sipa_nic *nic); +void sipa_skb_sender_remove_nic(struct sipa_skb_sender *sender, + struct sipa_nic *nic); +int sipa_skb_sender_send_data(struct sipa_skb_sender *sender, + struct sk_buff *skb, + enum sipa_term_type dst, + u8 netid); +int create_sipa_skb_receiver(struct sipa_endpoint *ep, + struct sipa_skb_receiver **receiver_pp); + +void sipa_nic_notify_evt(struct sipa_nic *nic, enum sipa_evt_type evt); +void sipa_nic_try_notify_recv(struct sipa_nic *nic); +void sipa_nic_push_skb(struct sipa_nic *nic, struct sk_buff *skb); +void sipa_nic_check_flow_ctrl(void); + +int sipa_create_smsg_channel(struct sipa_core *ipa); + +int sipa_init_debugfs(struct sipa_core *ipa); + +int sipa_int_callback_func(int evt, void *cookie); + +#if defined (__BIG_ENDIAN_BITFIELD) +static inline int sipa_get_node_desc(u8 *node_addr, + struct sipa_node_description_tag *node) +{ + if (!node_addr || !node) + return -EINVAL; + + node->address = node_addr[0] + ((u32)node_addr[1] << 8) + + ((u32)node_addr[2] << 16) + ((u32)node_addr[3] << 24) + + ((u64)node_addr[4] << 32); +#if 0 + node->length = node_addr[5] + ((u32)node_addr[6] << 8) + + ((u32)(node_addr[7] & 0xf) << 16); + node->offset = ((node_addr[7] & 0xf0) >> 4) + + ((u16)node_addr[8] << 4); +#endif + node->net_id = node_addr[9]; + node->src = node_addr[10] & 0x1f; +#if 0 + node->dst = ((node_addr[11] & 0x3) << 3) + + ((node_addr[10] & 0xe0) >> 5); +#endif + node->err_code = ((node_addr[12] & 0xc0) >> 6) + + ((node_addr[13] & 0x03) << 2); +#if 0 + node->prio = (node_addr[11] & 0x1c) >> 2; + node->bear_id = ((node_addr[11] & 0xe0) >> 5) + + ((node_addr[12] & 0xf) << 3); + node->intr = !!(node_addr[12] & BIT(4)); + node->indx = !!(node_addr[12] & BIT(5)); + node->reserved = ((node_addr[13] & 0xfc) >> 2) + + ((u32)node_addr[14] << 6) + ((u32)node_addr[15] << 14); +#endif + smp_rmb(); + + return 0; +} + +static inline int sipa_set_node_desc(u8 *dst_addr, u8 *src_addr) +{ + if (!dst_addr || !src_addr) + return -EINVAL; + + /* address */ + dst_addr[0] = src_addr[4]; + dst_addr[1] = src_addr[3]; + dst_addr[2] = src_addr[2]; + dst_addr[3] = src_addr[1]; + dst_addr[4] = src_addr[0]; + + /* length */ + dst_addr[5] = (src_addr[7] >> 4) + ((src_addr[6] & 0x0f) << 4); + dst_addr[6] = (src_addr[6] >> 4) + ((src_addr[5] & 0x0f) << 4); + dst_addr[7] = src_addr[5] >> 4; + + /* offset */ + dst_addr[7] += ((src_addr[8] & 0x0f) << 4); + dst_addr[8] = (src_addr[7] << 4) + (src_addr[8] >> 4); + + /* netid */ + dst_addr[9] = src_addr[9]; + + /* src */ + dst_addr[10] = ((src_addr[10] & 0xf8) >> 3); + + /* dst */ + dst_addr[10] += + ((src_addr[11] >> 6) + ((src_addr[10] & 0x01) << 2)) << 5; + dst_addr[11] = (src_addr[10] & 0x6) >> 1; + + /* prio */ + dst_addr[11] += ((src_addr[11] & 0x38) >> 1); + + /* bear_id */ + dst_addr[11] += ((src_addr[12] & 0x70) << 1); + dst_addr[12] = ((src_addr[11] & 0x7) << 1) + (src_addr[12] >> 7); + + /* intx */ + dst_addr[12] += ((src_addr[12] & 0x8) << 1); + + /* indx */ + dst_addr[12] += ((src_addr[12] & 0x4) << 3); + + /* err code */ + dst_addr[12] += (src_addr[13] & 0xc0); + dst_addr[13] = src_addr[12] & 0x3; + + /* reserved */ + dst_addr[13] += src_addr[15] << 2; + dst_addr[14] = (src_addr[15] & 0x3) + (src_addr[14] << 2); + dst_addr[15] = ((src_addr[13] & 0x3f) << 2) + + ((src_addr[14] & 0xc0) >> 6); + smp_wmb(); + + return 0; +} +#endif +#endif diff --git a/package/wwan/driver/quectel_SRPD_PCIE/src/sipa/sipa_debugfs.c b/package/wwan/driver/quectel_SRPD_PCIE/src/sipa/sipa_debugfs.c new file mode 100644 index 000000000..13dba6029 --- /dev/null +++ b/package/wwan/driver/quectel_SRPD_PCIE/src/sipa/sipa_debugfs.c @@ -0,0 +1,590 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../include/sipa.h" +#include "sipa_core.h" + +static u32 debug_cmd[5], data_buf[5]; +static struct sipa_node_description_tag ipa_node; + +static int sipa_params_debug_show(struct seq_file *s, void *unused) +{ + int i; + u32 tmp; + struct sipa_core *ipa = (struct sipa_core *)s->private; + struct sipa_cmn_fifo_cfg_tag *fifo_cfg; + + seq_printf(s, "dma_mask = 0x%llx coherent_dma_mask = 0x%llx\n", + (u64)*ipa->pci_dev->dma_mask, (u64)ipa->pci_dev->coherent_dma_mask); + seq_printf(s, "remote ready = %d reg_mapped = 0x%llx virt_reg_addr = 0x%p\n", + ipa->remote_ready, (long long unsigned int)ipa->reg_mapped, ipa->virt_reg_addr); + seq_printf(s, "ipa reg start = 0x%llx size = 0x%llx pcie_mem_offset = %llx\n", + (long long unsigned int)ipa->reg_res->start, (long long unsigned int)resource_size(ipa->reg_res), + (long long unsigned int)ipa->pcie_mem_offset); + for (i = 0; i < SIPA_NIC_MAX; i++) { + if (!ipa->nic[i]) + continue; + + seq_printf(s, "open = %d src_mask = 0x%x netid = %d flow_ctrl_status = %d", + atomic_read(&ipa->nic[i]->status), ipa->nic[i]->src_mask, + ipa->nic[i]->netid, ipa->nic[i]->flow_ctrl_status); + seq_printf(s, " qlen = %d need_notify = %d continue_notify = %d\n", + ipa->nic[i]->rx_skb_q.qlen, ipa->nic[i]->need_notify, + ipa->nic[i]->continue_notify); + } + + seq_printf(s, "sender no_mem_cnt = %d no_free_cnt = %d left_cnt = %d\n", + ipa->sender->no_mem_cnt, ipa->sender->no_free_cnt, + atomic_read(&ipa->sender->left_cnt)); + seq_printf(s, "sender enter_flow_ctrl_cnt=%d, exit_flow_ctrl_cnt=%d, free_notify_net=%d, ep_cover_net=%d\n", + ipa->sender->enter_flow_ctrl_cnt, ipa->sender->exit_flow_ctrl_cnt, + ipa->sender->free_notify_net, ipa->sender->ep_cover_net); + seq_printf(s, "receiver need_fill_cnt = %d", + atomic_read(&ipa->receiver->need_fill_cnt)); + seq_printf(s, " tx_danger_cnt = %d rx_danger_cnt = %d\n", + ipa->receiver->tx_danger_cnt, ipa->receiver->rx_danger_cnt); + + fifo_cfg = &ipa->cmn_fifo_cfg[SIPA_FIFO_PCIE_DL]; + seq_printf(s, "[PCIE_DL]state = %d fifo_reg_base = %p\n", + fifo_cfg->state, fifo_cfg->fifo_reg_base); + seq_printf(s, "[PCIE_DL]rx fifo depth = 0x%x wr = 0x%x rd = 0x%x\n", + fifo_cfg->rx_fifo.depth, + fifo_cfg->rx_fifo.wr, + fifo_cfg->rx_fifo.rd); + seq_printf(s, "[PCIE_DL]rx_fifo fifo_addrl = 0x%x fifo_addrh = 0x%x\n", + fifo_cfg->rx_fifo.fifo_base_addr_l, + fifo_cfg->rx_fifo.fifo_base_addr_h); + seq_printf(s, "[PCIE_DL]rx fifo virt addr = %p\n", + fifo_cfg->rx_fifo.virtual_addr); + seq_printf(s, "[PCIE_DL]tx fifo depth = 0x%x wr = 0x%x rd = 0x%x\n", + fifo_cfg->tx_fifo.depth, fifo_cfg->tx_fifo.wr, + fifo_cfg->tx_fifo.rd); + seq_printf(s, "[PCIE_DL]tx_fifo fifo_addrl = 0x%x fifo_addrh = 0x%x\n", + fifo_cfg->tx_fifo.fifo_base_addr_l, + fifo_cfg->tx_fifo.fifo_base_addr_h); + seq_printf(s, "[PCIE_DL]tx fifo virt addr = %p\n", + fifo_cfg->tx_fifo.virtual_addr); + fifo_cfg = &ipa->cmn_fifo_cfg[SIPA_FIFO_PCIE_UL]; + seq_printf(s, "[PCIE_UL]state = %d fifo_reg_base = %p\n", + fifo_cfg->state, fifo_cfg->fifo_reg_base); + seq_printf(s, "[PCIE_UL]rx fifo depth = 0x%x wr = 0x%x rd = 0x%x\n", + fifo_cfg->rx_fifo.depth, + fifo_cfg->rx_fifo.wr, + fifo_cfg->rx_fifo.rd); + seq_printf(s, "[PCIE_UL]rx_fifo fifo_addrl = 0x%x fifo_addrh = 0x%x\n", + fifo_cfg->rx_fifo.fifo_base_addr_l, + fifo_cfg->rx_fifo.fifo_base_addr_h); + seq_printf(s, "[PCIE_UL]rx fifo virt addr = %p\n", + fifo_cfg->rx_fifo.virtual_addr); + seq_printf(s, "[PCIE_UL]tx fifo depth = 0x%x wr = 0x%x rd = 0x%x\n", + fifo_cfg->tx_fifo.depth, fifo_cfg->tx_fifo.wr, + fifo_cfg->tx_fifo.rd); + seq_printf(s, "[PCIE_UL]tx_fifo fifo_addrl = 0x%x fifo_addrh = 0x%x\n", + fifo_cfg->tx_fifo.fifo_base_addr_l, + fifo_cfg->tx_fifo.fifo_base_addr_h); + seq_printf(s, "[PCIE_UL]tx fifo virt addr = %p\n", + fifo_cfg->tx_fifo.virtual_addr); + + //ep: IPA_COMMON_TX_FIFO_DEPTH 0x0Cl + tmp = readl_relaxed(ipa->virt_reg_addr + 0xc00 + 0x0C); + seq_printf(s, "neil: read IPA_COMMON_TX_FIFO_DEPTH, value = %x\n", (tmp >> 16)); + + //ep: IPA_COMMON_TX_FIFO_WR 0x10l + tmp = readl_relaxed(ipa->virt_reg_addr + 0xc00 + 0x10); + seq_printf(s, "neil: read IPA_COMMON_TX_FIFO_WR, value = %x\n", (tmp >> 16)); + + //ep: IPA_COMMON_TX_FIFO_RD 0x14l + tmp = readl_relaxed(ipa->virt_reg_addr + 0xc00 + 0x14); + seq_printf(s, "neil: read IPA_COMMON_TX_FIFO_RD, value = %x\n", (tmp >> 16)); + return 0; +} + +static int sipa_params_debug_open(struct inode *inode, + struct file *file) +{ + return single_open(file, sipa_params_debug_show, + inode->i_private); +} + +static ssize_t sipa_endian_debug_write(struct file *f, const char __user *buf, + size_t size, loff_t *l) +{ + ssize_t len; + u32 debug_cmd[24], data_buf[24]; + + len = min(size, sizeof(data_buf) - 1); + if (copy_from_user((char *)data_buf, buf, len)) + return -EFAULT; + + len = sscanf((char *)data_buf, "%x %x %x %x %x %x %x %x %x %x %x %x\n", + &debug_cmd[0], &debug_cmd[1], &debug_cmd[2], + &debug_cmd[3], &debug_cmd[4], &debug_cmd[5], + &debug_cmd[6], &debug_cmd[7], &debug_cmd[8], + &debug_cmd[9], &debug_cmd[10], &debug_cmd[11]); + + ipa_node.address = debug_cmd[0]; + ipa_node.length = debug_cmd[1]; + ipa_node.offset = debug_cmd[2]; + ipa_node.net_id = debug_cmd[3]; + ipa_node.src = debug_cmd[4]; + ipa_node.dst = debug_cmd[5]; + ipa_node.prio = debug_cmd[6]; + ipa_node.bear_id = debug_cmd[7]; + ipa_node.intr = debug_cmd[8]; + ipa_node.indx = debug_cmd[9]; + ipa_node.err_code = debug_cmd[10]; + ipa_node.reserved = debug_cmd[11]; + + return size; +} + +static int sipa_endian_debug_show(struct seq_file *s, void *unused) +{ + int i; + u8 *byte; + + seq_printf(s, "address = 0x%llx length = 0x%x offset = 0x%x net_id = 0x%x\n", + (u64)ipa_node.address, ipa_node.length, ipa_node.offset, + ipa_node.net_id); + seq_printf(s, "src = 0x%x dst = 0x%x prio = 0x%x bear_id = 0x%x\n", + ipa_node.src, ipa_node.dst, ipa_node.prio, ipa_node.bear_id); + seq_printf(s, "intr = 0x%x indx = 0x%x err_code = 0x%x reserved = 0x%x\n", + ipa_node.intr, ipa_node.indx, + ipa_node.err_code, ipa_node.reserved); + + byte = (u8 *)&ipa_node; + for (i = 0; i < sizeof(ipa_node); i++) + seq_printf(s, "0x%x ", *(byte + i)); + + seq_puts(s, "\n"); + + return 0; +} + +static const struct file_operations sipa_params_fops = { + .open = sipa_params_debug_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int sipa_endian_debug_open(struct inode *inode, + struct file *file) +{ + return single_open(file, sipa_endian_debug_show, + inode->i_private); +} + +static const struct file_operations sipa_endian_fops = { + .open = sipa_endian_debug_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = sipa_endian_debug_write, +}; + +static ssize_t sipa_get_node_debug_write(struct file *f, const char __user *buf, + size_t size, loff_t *l) +{ + int i; + ssize_t len; + u8 debug_cmd[16], data_buf[128]; + + len = min(size, sizeof(data_buf) - 1); + if (copy_from_user((char *)data_buf, buf, len)) + return -EFAULT; + + len = sscanf((char *)data_buf, "%4hhx %4hhx %4hhx %4hhx %4hhx %4hhx %4hhx %4hhx %4hhx %4hhx %4hhx %4hhx %4hhx %4hhx %4hhx %4hhx\n", + &debug_cmd[0], &debug_cmd[1], &debug_cmd[2], + &debug_cmd[3], &debug_cmd[4], &debug_cmd[5], + &debug_cmd[6], &debug_cmd[7], &debug_cmd[8], + &debug_cmd[9], &debug_cmd[10], &debug_cmd[11], + &debug_cmd[12], &debug_cmd[13], &debug_cmd[14], + &debug_cmd[15]); + + for (i = 0; i < 16; i++) + pr_err("0x%x ", debug_cmd[i]); + pr_err("\n"); + +#if defined (__BIG_ENDIAN_BITFIELD) + sipa_get_node_desc(debug_cmd, &ipa_node); +#else + ipa_node.address = debug_cmd[4] + ((u32)debug_cmd[3] << 8) + + ((u32)debug_cmd[2] << 16) + ((u32)debug_cmd[1] << 24) + + ((u64)debug_cmd[0] << 32); + ipa_node.net_id = debug_cmd[9]; + ipa_node.src = debug_cmd[10] & 0x1f; + ipa_node.err_code = ((debug_cmd[13] & 0xc0) >> 6) + + ((debug_cmd[12] & 0x03) << 2); +#endif + return size; +} + +static int sipa_get_node_debug_show(struct seq_file *s, void *unused) +{ + int i; + u8 *byte; + + seq_printf(s, "address = 0x%llx length = 0x%x offset = 0x%x net_id = 0x%x\n", + (u64)ipa_node.address, ipa_node.length, ipa_node.offset, + ipa_node.net_id); + seq_printf(s, "src = 0x%x dst = 0x%x prio = 0x%x bear_id = 0x%x\n", + ipa_node.src, ipa_node.dst, ipa_node.prio, ipa_node.bear_id); + seq_printf(s, "intr = 0x%x indx = 0x%x err_code = 0x%x reserved = 0x%x\n", + ipa_node.intr, ipa_node.indx, + ipa_node.err_code, ipa_node.reserved); + + byte = (u8 *)&ipa_node; + for (i = 0; i < sizeof(ipa_node); i++) + seq_printf(s, "0x%x ", *(byte + i)); + + seq_puts(s, "\n"); + + return 0; +} + +static int sipa_get_node_debug_open(struct inode *inode, + struct file *file) +{ + return single_open(file, sipa_get_node_debug_show, + inode->i_private); +} + +static const struct file_operations sipa_get_node_fops = { + .open = sipa_get_node_debug_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = sipa_get_node_debug_write, +}; + +static ssize_t sipa_set_node_debug_write(struct file *f, const char __user *buf, + size_t size, loff_t *l) +{ + ssize_t len; + u32 debug_cmd[24], data_buf[24]; + + len = min(size, sizeof(data_buf) - 1); + if (copy_from_user((char *)data_buf, buf, len)) + return -EFAULT; + + len = sscanf((char *)data_buf, "%x %x %x %x %x %x %x %x %x %x %x %x\n", + &debug_cmd[0], &debug_cmd[1], &debug_cmd[2], + &debug_cmd[3], &debug_cmd[4], &debug_cmd[5], + &debug_cmd[6], &debug_cmd[7], &debug_cmd[8], + &debug_cmd[9], &debug_cmd[10], &debug_cmd[11]); + + ipa_node.address = debug_cmd[0]; + ipa_node.length = debug_cmd[1]; + ipa_node.offset = debug_cmd[2]; + ipa_node.net_id = debug_cmd[3]; + ipa_node.src = debug_cmd[4]; + ipa_node.dst = debug_cmd[5]; + ipa_node.prio = debug_cmd[6]; + ipa_node.bear_id = debug_cmd[7]; + ipa_node.intr = debug_cmd[8]; + ipa_node.indx = debug_cmd[9]; + ipa_node.err_code = debug_cmd[10]; + ipa_node.reserved = debug_cmd[11]; + + return size; +} + +static int sipa_set_node_debug_show(struct seq_file *s, void *unused) +{ +#if defined (__BIG_ENDIAN_BITFIELD) + int i; + u8 node_buf[16]; +#endif + + seq_printf(s, "address = 0x%llx length = 0x%x offset = 0x%x net_id = 0x%x\n", + (u64)ipa_node.address, ipa_node.length, ipa_node.offset, + ipa_node.net_id); + seq_printf(s, "src = 0x%x dst = 0x%x prio = 0x%x bear_id = 0x%x\n", + ipa_node.src, ipa_node.dst, ipa_node.prio, ipa_node.bear_id); + seq_printf(s, "intr = 0x%x indx = 0x%x err_code = 0x%x reserved = 0x%x\n", + ipa_node.intr, ipa_node.indx, + ipa_node.err_code, ipa_node.reserved); + +#if defined (__BIG_ENDIAN_BITFIELD) + sipa_set_node_desc(node_buf, (u8 *)&ipa_node); + for (i = 0; i < sizeof(node_buf); i++) + seq_printf(s, "0x%x ", node_buf[i]); +#endif + + seq_puts(s, "\n"); + + return 0; +} + +static int sipa_set_node_debug_open(struct inode *inode, + struct file *file) +{ + return single_open(file, sipa_set_node_debug_show, + inode->i_private); +} + +static const struct file_operations sipa_set_node_fops = { + .open = sipa_set_node_debug_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = sipa_set_node_debug_write, +}; + +static ssize_t sipa_reg_debug_write(struct file *f, const char __user *buf, + size_t size, loff_t *l) +{ + ssize_t len; + struct sipa_core *ipa = f->f_inode->i_private; + + len = min(size, sizeof(data_buf) - 1); + if (copy_from_user((char *)data_buf, buf, len)) + return -EFAULT; + + len = sscanf((char *)data_buf, "%x %x %x %x %x\n", + &debug_cmd[0], &debug_cmd[1], &debug_cmd[2], + &debug_cmd[3], &debug_cmd[4]); + if (debug_cmd[2]) + writel_relaxed(debug_cmd[1], ipa->virt_reg_addr + debug_cmd[0]); + + return size; +} + +static int sipa_reg_debug_show(struct seq_file *s, void *unused) +{ + u32 tx_filled, rx_filled; + u32 tx_wr, tx_rd, rx_wr, rx_rd; + struct sipa_core *ipa = (struct sipa_core *)s->private; + + seq_printf(s, "0x%x\n", + readl_relaxed(ipa->virt_reg_addr + debug_cmd[0])); + + seq_printf(s, "pcie dl tx fifo empty = %d full = %d rx fifo empty = %d full = %d\n", + ipa->hal_ops.get_tx_empty_status(SIPA_FIFO_PCIE_DL, + ipa->cmn_fifo_cfg), + ipa->hal_ops.get_tx_full_status(SIPA_FIFO_PCIE_DL, + ipa->cmn_fifo_cfg), + ipa->hal_ops.get_rx_empty_status(SIPA_FIFO_PCIE_DL, + ipa->cmn_fifo_cfg), + ipa->hal_ops.get_rx_full_status(SIPA_FIFO_PCIE_DL, + ipa->cmn_fifo_cfg)); + seq_printf(s, "pcie ul tx fifo empty = %d full = %d rx fifo empty = %d full = %d\n", + ipa->hal_ops.get_tx_empty_status(SIPA_FIFO_PCIE_UL, + ipa->cmn_fifo_cfg), + ipa->hal_ops.get_tx_full_status(SIPA_FIFO_PCIE_UL, + ipa->cmn_fifo_cfg), + ipa->hal_ops.get_rx_empty_status(SIPA_FIFO_PCIE_UL, + ipa->cmn_fifo_cfg), + ipa->hal_ops.get_rx_full_status(SIPA_FIFO_PCIE_UL, + ipa->cmn_fifo_cfg)); + ipa->hal_ops.get_filled_depth(SIPA_FIFO_PCIE_DL, ipa->cmn_fifo_cfg, + &rx_filled, &tx_filled); + seq_printf(s, "pcie dl tx filled = 0x%x rx filled = 0x%x\n", + tx_filled, rx_filled); + ipa->hal_ops.get_filled_depth(SIPA_FIFO_PCIE_UL, ipa->cmn_fifo_cfg, + &rx_filled, &tx_filled); + seq_printf(s, "pcie ul tx filled = 0x%x rx filled = 0x%x\n", + tx_filled, rx_filled); + + ipa->hal_ops.get_rx_ptr(SIPA_FIFO_PCIE_UL, ipa->cmn_fifo_cfg, &rx_wr, &rx_rd); + ipa->hal_ops.get_tx_ptr(SIPA_FIFO_PCIE_UL, ipa->cmn_fifo_cfg, &tx_wr, &tx_rd); + seq_printf(s, "pcie ul rx_wr = 0x%x, rx_rd = 0x%x, tx_wr = 0x%x, tx_rd = 0x%x\n", + rx_wr, rx_rd, tx_wr, tx_rd); + + ipa->hal_ops.get_rx_ptr(SIPA_FIFO_PCIE_DL, ipa->cmn_fifo_cfg, &rx_wr, &rx_rd); + ipa->hal_ops.get_tx_ptr(SIPA_FIFO_PCIE_DL, ipa->cmn_fifo_cfg, &tx_wr, &tx_rd); + seq_printf(s, "pcie dl rx_wr = 0x%x, rx_rd = 0x%x, tx_wr = 0x%x, tx_rd = 0x%x\n", + rx_wr, rx_rd, tx_wr, tx_rd); + + sipa_int_callback_func(0, NULL); + + return 0; +} + +static int sipa_reg_debug_open(struct inode *inode, + struct file *file) +{ + return single_open(file, sipa_reg_debug_show, + inode->i_private); +} + +static const struct file_operations sipa_reg_debug_fops = { + .open = sipa_reg_debug_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = sipa_reg_debug_write, +}; + +static int sipa_send_test_show(struct seq_file *s, void *unused) +{ + struct sk_buff *skb = NULL; + struct sipa_core *ipa = (struct sipa_core *)s->private; + + if (!skb) { + skb = __dev_alloc_skb(256, GFP_KERNEL | GFP_NOWAIT); + if (!skb) { + dev_err(ipa->dev, "failed to alloc skb!\n"); + return 0; + } + skb_put(skb, 128); + memset(skb->data, 0xE7, skb->len); + + sipa_skb_sender_send_data(ipa->sender, skb, 0x19, 0); + } + + return 0; +} + +static int sipa_send_test_open(struct inode *inode, struct file *file) +{ + return single_open(file, sipa_send_test_show, inode->i_private); +} + +static const struct file_operations sipa_send_test_fops = { + .open = sipa_send_test_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static ssize_t sipa_nic_debug_write(struct file *f, const char __user *buf, + size_t size, loff_t *l) +{ + ssize_t len; + u8 debug_cmd[24], data_buf[24]; + + len = min(size, sizeof(data_buf) - 1); + if (copy_from_user((char *)data_buf, buf, len)) + return -EFAULT; + + len = sscanf((char *)data_buf, "%4hhx %4hhx\n", + &debug_cmd[0], &debug_cmd[1]); + if (debug_cmd[1]) + sipa_nic_open(debug_cmd[0], 0, NULL, NULL); + else + sipa_nic_close(debug_cmd[0]); + + return size; +} + +static int sipa_nic_debug_show(struct seq_file *s, void *unused) +{ + //struct sk_buff *skb = NULL; + struct sipa_core *ipa = (struct sipa_core *)s->private; + struct sipa_cmn_fifo_cfg_tag *pcie_dl = &ipa->cmn_fifo_cfg[SIPA_FIFO_PCIE_DL]; + //struct sipa_cmn_fifo_cfg_tag *pcie_ul = &ipa->cmn_fifo_cfg[SIPA_FIFO_PCIE_UL]; + //struct sipa_cmn_fifo_tag *dl_tx_fifo = &pcie_dl->tx_fifo; + struct sipa_cmn_fifo_tag *dl_rx_fifo = &pcie_dl->rx_fifo; + //struct sipa_cmn_fifo_tag *ul_tx_fifo = &pcie_ul->tx_fifo; + //struct sipa_cmn_fifo_tag *ul_rx_fifo = &pcie_ul->rx_fifo; + struct sipa_node_description_tag *node; + int i = 0; + + pr_info("dl rx_fifo addr: 0x%lx wp-%d rp-%d\n", (long unsigned int)dl_rx_fifo->virtual_addr, + dl_rx_fifo->wr, dl_rx_fifo->rd); + node = (struct sipa_node_description_tag *)dl_rx_fifo->virtual_addr; + for (i = 0; i < dl_rx_fifo->depth; i++, node++) { + pr_info("node addr 0x%lx\n", (long unsigned int)node); + pr_info("node info i-%d, addr-0x%llx len-%u off-%u netid-%u src-%u dst-%u pro-%u bearid-%u intr-%u indx-%u err-%u resd-%u\n", + i, (long long unsigned int)node->address, node->length, node->offset, node->net_id, + node->src, node->dst, node->prio, node->bear_id, node->intr, + node->indx, node->err_code, node->reserved); + } + + + return 0; +} + +static int sipa_nic_debug_open(struct inode *inode, struct file *file) +{ + return single_open(file, sipa_nic_debug_show, inode->i_private); +} + +static const struct file_operations sipa_nic_debug_fops = { + .open = sipa_nic_debug_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = sipa_nic_debug_write, +}; + +int sipa_init_debugfs(struct sipa_core *ipa) +{ + struct dentry *root; + struct dentry *file; + + root = debugfs_create_dir(dev_name(ipa->dev), NULL); + if (!root) { + dev_err(ipa->dev, "sipa create debugfs fail\n"); + return -ENOMEM; + } + + file = debugfs_create_file("params", 0444, root, ipa, + &sipa_params_fops); + if (!file) { + dev_err(ipa->dev, "sipa create params file debugfs fail\n"); + debugfs_remove_recursive(root); + return -ENOMEM; + } + + file = debugfs_create_file("endian", 0444, root, ipa, + &sipa_endian_fops); + if (!file) { + dev_err(ipa->dev, "sipa create endian file debugfs fail\n"); + debugfs_remove_recursive(root); + return -ENOMEM; + } + + file = debugfs_create_file("get_node", 0444, root, ipa, + &sipa_get_node_fops); + if (!file) { + dev_err(ipa->dev, "sipa create endian file debugfs fail\n"); + debugfs_remove_recursive(root); + return -ENOMEM; + } + + file = debugfs_create_file("set_node", 0444, root, ipa, + &sipa_set_node_fops); + if (!file) { + dev_err(ipa->dev, "sipa create set node file debugfs fail\n"); + debugfs_remove_recursive(root); + return -ENOMEM; + } + + file = debugfs_create_file("reg", 0444, root, ipa, + &sipa_reg_debug_fops); + if (!file) { + dev_err(ipa->dev, "sipa create reg debug file debugfs fail\n"); + debugfs_remove_recursive(root); + return -ENOMEM; + } + + file = debugfs_create_file("send_test", 0444, root, ipa, + &sipa_send_test_fops); + if (!file) { + dev_err(ipa->dev, "sipa create send_test debug file debugfs fail\n"); + debugfs_remove_recursive(root); + return -ENOMEM; + } + + file = debugfs_create_file("nic", 0444, root, ipa, + &sipa_nic_debug_fops); + if (!file) { + dev_err(ipa->dev, "sipa create nic debug file debugfs fail\n"); + debugfs_remove_recursive(root); + return -ENOMEM; + } + ipa->dentry = root; + + return 0; +} +EXPORT_SYMBOL(sipa_init_debugfs); diff --git a/package/wwan/driver/quectel_SRPD_PCIE/src/sipa/sipa_dele_cmn.c b/package/wwan/driver/quectel_SRPD_PCIE/src/sipa/sipa_dele_cmn.c new file mode 100644 index 000000000..e90895b88 --- /dev/null +++ b/package/wwan/driver/quectel_SRPD_PCIE/src/sipa/sipa_dele_cmn.c @@ -0,0 +1,156 @@ +/* + * Copyright (C) 2018-2019 Unisoc Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../include/sipa.h" +#include "../include/sipc.h" +#include "../include/sprd_pcie_ep_device.h" +#include "sipa_core.h" + +#define SIPA_PCIE_DL_CMN_FIFO_REG_OFFSET 0x980 +#define SIPA_PCIE_UL_CMN_FIFO_REG_OFFSET 0x200 + +static int sipa_dele_start_req_work(void) +{ + struct smsg msg; + + msg.channel = SMSG_CH_COMM_SIPA; + msg.type = SMSG_TYPE_CMD; + msg.flag = SMSG_FLG_DELE_REQUEST; + msg.value = 0; + + return smsg_send(SIPC_ID_MINIAP, &msg, -1); +} + +static int sipa_init_cmn_fifo_reg_addr(struct sipa_core *ipa) +{ + ipa->reg_mapped = sprd_ep_ipa_map(PCIE_IPA_TYPE_REG, + ipa->reg_res->start, + resource_size(ipa->reg_res)); +#ifndef devm_ioremap_nocache +#define devm_ioremap_nocache devm_ioremap +#endif + ipa->virt_reg_addr = devm_ioremap_nocache(ipa->dev, + (resource_size_t)ipa->reg_mapped, + (resource_size_t)(resource_size(ipa->reg_res))); + if (!ipa->virt_reg_addr) { + dev_err(ipa->dev, "ipa reg base remap fail\n"); + return -ENOMEM; + } + + ipa->cmn_fifo_cfg[SIPA_FIFO_PCIE_DL].fifo_reg_base = + ipa->virt_reg_addr + SIPA_PCIE_DL_CMN_FIFO_REG_OFFSET; + ipa->cmn_fifo_cfg[SIPA_FIFO_PCIE_UL].fifo_reg_base = + ipa->virt_reg_addr + SIPA_PCIE_UL_CMN_FIFO_REG_OFFSET; + + return 0; +} + +static int conn_thread(void *data) +{ + struct smsg mrecv; + int ret, timeout = 500; + struct sipa_core *ipa = data; + + /* since the channel open may hang, we call it in the thread context */ + ret = smsg_ch_open(SIPC_ID_MINIAP, SMSG_CH_COMM_SIPA, -1); + if (ret != 0) { + dev_err(ipa->dev, "sipa_delegator failed to open dst %d channel %d\n", + SIPC_ID_MINIAP, SMSG_CH_COMM_SIPA); + /* assign NULL to thread poniter as failed to open channel */ + return ret; + } + + while (sipa_dele_start_req_work() && timeout--) + usleep_range(5000, 10000); + + /* start listen the smsg events */ + while (!kthread_should_stop()) { + /* monitor seblock recv smsg */ + smsg_set(&mrecv, SMSG_CH_COMM_SIPA, 0, 0, 0); + ret = smsg_recv(SIPC_ID_MINIAP, &mrecv, -1); + if (ret == -EIO || ret == -ENODEV) { + /* channel state is FREE */ + usleep_range(5000, 10000); + continue; + } + + dev_dbg(ipa->dev, "sipa type=%d, flag=0x%x, value=0x%08x\n", + mrecv.type, mrecv.flag, mrecv.value); + + switch (mrecv.type) { + case SMSG_TYPE_OPEN: + /* just ack open */ + smsg_open_ack(SIPC_ID_AP, SMSG_CH_COMM_SIPA); + break; + case SMSG_TYPE_CLOSE: + /* handle channel close */ + smsg_close_ack(SIPC_ID_AP, SMSG_CH_COMM_SIPA); + break; + case SMSG_TYPE_CMD: + /* handle commads */ + break; + case SMSG_TYPE_DONE: + sipa_init_cmn_fifo_reg_addr(ipa); + dev_info(ipa->dev, "remote ipa ready reg_mapped = 0x%llx\n", (long long unsigned int)ipa->reg_mapped); + sipa_receiver_open_cmn_fifo(ipa->receiver); + sipa_sender_open_cmn_fifo(ipa->sender); + sipa_nic_check_flow_ctrl(); + ipa->remote_ready = true; + /* handle cmd done */ + break; + case SMSG_TYPE_EVENT: + /* handle events */ + break; + default: + ret = 1; + break; + }; + + if (ret) { + dev_info(ipa->dev, "unknown msg in conn_thrd: %d, %d, %d\n", + mrecv.type, mrecv.flag, mrecv.value); + ret = 0; + } + } + + return ret; +} + +int sipa_create_smsg_channel(struct sipa_core *ipa) +{ + /* create channel thread for this seblock channel */ + ipa->smsg_thread = kthread_create(conn_thread, ipa, "sipa-dele"); + if (IS_ERR(ipa->smsg_thread)) { + dev_err(ipa->dev, "Failed to create monitor smsg kthread\n"); + return PTR_ERR(ipa->smsg_thread); + } + + wake_up_process(ipa->smsg_thread); + + return 0; +} +EXPORT_SYMBOL(sipa_create_smsg_channel); diff --git a/package/wwan/driver/quectel_SRPD_PCIE/src/sipa/sipa_dummy.c b/package/wwan/driver/quectel_SRPD_PCIE/src/sipa/sipa_dummy.c new file mode 100644 index 000000000..6d3c8f91b --- /dev/null +++ b/package/wwan/driver/quectel_SRPD_PCIE/src/sipa/sipa_dummy.c @@ -0,0 +1,583 @@ +/* + * Copyright (C) 2020 Spreadtrum Communications Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) "sipa_dummy: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sipa_eth.h" +#include "sipa_core.h" +#include "../include/sipa.h" + + +/* Device status */ +#define DEV_ON 1 +#define DEV_OFF 0 + +#define SIPA_DUMMY_NAPI_WEIGHT 64 + +extern struct sipa_eth_netid_device * dev_list[]; +static struct net_device *dummy_dev; +static struct dentry *dummy_root; +static int sipa_dummy_debugfs_mknod(void *data); + +#ifndef CONFIG_SPRD_ETHERNET +static int sipa_arp_reply(struct net_device *net, struct sk_buff *skb) { + struct arphdr *parp; + u8 *arpptr, *sha; + u8 sip[4], tip[4]; + struct sk_buff *reply = NULL; + + parp = arp_hdr(skb); + + if (parp->ar_hrd == htons(ARPHRD_ETHER) && parp->ar_pro == htons(ETH_P_IP) + && parp->ar_op == htons(ARPOP_REQUEST) && parp->ar_hln == 6 && parp->ar_pln == 4) { + arpptr = (u8 *)parp + sizeof(struct arphdr); + sha = arpptr; + arpptr += net->addr_len; /* sha */ + memcpy(sip, arpptr, sizeof(sip)); + arpptr += sizeof(sip); + arpptr += net->addr_len; /* tha */ + memcpy(tip, arpptr, sizeof(tip)); + + pr_info("%s sip = %d.%d.%d.%d, tip=%d.%d.%d.%d\n", netdev_name(net), sip[0], sip[1], sip[2], sip[3], tip[0], tip[1], tip[2], tip[3]); + reply = arp_create(ARPOP_REPLY, ETH_P_ARP, *((__be32 *)sip), skb->dev, *((__be32 *)tip), sha, net->dev_addr, sha); + if (reply) { + dev_queue_xmit(reply); + } + return 1; + } + + return 0; +} + +static void sipa_get_modem_mac(struct sk_buff *skb, struct SIPA_ETH *sipa_eth) +{ + struct ethhdr *ehdr; + struct iphdr *iph; + struct udphdr *udph; + struct sipa_eth_init_data *pdata = sipa_eth->pdata; + + ehdr = (struct ethhdr *)(skb->data - ETH_HLEN); + iph = ip_hdr(skb); + udph = (struct udphdr *)(skb->data + iph->ihl*4); + + if (ehdr->h_proto == htons(ETH_P_ARP)) { + sipa_arp_reply(skb->dev, skb); + return; + } + + //printk("%s skb=%p, h_proto=%x, protocol=%x, saddr=%x, daddr=%x dest=%x\n", __func__, skb, ehdr->h_proto, iph->protocol, iph->saddr, iph->daddr, udph->dest); + if (ehdr->h_proto == htons(ETH_P_IP) && iph->protocol == IPPROTO_UDP && iph->saddr != 0x00000000 && iph->daddr == 0xFFFFFFFF) { + if (udph->dest == htons(68)) //DHCP offer/ACK + { + memcpy(pdata->modem_mac, ehdr->h_source, ETH_ALEN); + pr_info("Modem Mac Address: %02x:%02x:%02x:%02x:%02x:%02x\n", + pdata->modem_mac[0], pdata->modem_mac[1], pdata->modem_mac[2], pdata->modem_mac[3], pdata->modem_mac[4], pdata->modem_mac[5]); + } + } +} +#endif + +/* Term type 0x6 means we are in direct mode, currently. + * we will recv pkt with a dummy mac header, which will + * cause us fail to get skb->pkt_type and skb->protocol. + */ +static void sipa_dummy_prepare_skb(struct sk_buff *skb) +{ + struct iphdr *iph; + struct ipv6hdr *ipv6h; + struct net_device *dev; + unsigned int real_len = 0, payload_len = 0; + bool ip_arp = true; + + dev = skb->dev; + + skb->protocol = eth_type_trans(skb, dev); + skb_reset_network_header(skb); + + switch (ntohs(skb->protocol)) { + case ETH_P_IP: + iph = ip_hdr(skb); + real_len = ntohs(iph->tot_len); + break; + case ETH_P_IPV6: + ipv6h = ipv6_hdr(skb); + payload_len = ntohs(ipv6h->payload_len); + real_len = payload_len + sizeof(struct ipv6hdr); + break; + case ETH_P_ARP: + real_len = arp_hdr_len(dev); + break; + default: + ip_arp = false; + break; + } + + if (ip_arp) + skb_trim(skb, real_len); + + /* TODO chechsum ... */ + skb->ip_summed = CHECKSUM_NONE; + skb->pkt_type = PACKET_HOST; +} + +/* Term type 0x6 means we are in direct mode, currently. + * we will recv pkt with a dummy mac header, which will + * cause us fail to get skb->pkt_type and skb->protocol. + */ +static void sipa_dummy_direct_mode_prepare_skb(struct sk_buff *skb) +{ + struct iphdr *iph; + struct ipv6hdr *ipv6h; + unsigned int real_len = 0, payload_len = 0; + + skb_pull_inline(skb, ETH_HLEN); + skb_reset_network_header(skb); + iph = ip_hdr(skb); + if (iph->version == 4) { + skb->protocol = htons(ETH_P_IP); + iph = ip_hdr(skb); + real_len = ntohs(iph->tot_len); + skb_trim(skb, real_len); + } else if(iph->version == 6){ + skb->protocol = htons(ETH_P_IPV6); + ipv6h = ipv6_hdr(skb); + payload_len = ntohs(ipv6h->payload_len); + real_len = payload_len + sizeof(struct ipv6hdr); + skb_trim(skb, real_len); + } else { + pr_err("unrecognized ip version %d\n", iph->version); + } + + skb->ip_summed = CHECKSUM_NONE; + skb->pkt_type = PACKET_HOST; +} + +static int sipa_dummy_rx(struct SIPA_DUMMY *sipa_dummy, int budget) +{ + struct sk_buff *skb; + struct sipa_eth_netid_device *netid_dev_info; + struct SIPA_ETH *sipa_eth; + int real_netid = 0; + int skb_cnt = 0; + int ret; + + if (!sipa_dummy) { + pr_err("no sipa_dummy device\n"); + return -EINVAL; + } + + atomic_set(&sipa_dummy->rx_evt, 0); + while (skb_cnt < budget) { + ret = sipa_nic_rx(&real_netid, &skb, skb_cnt); + + if (ret) { + switch (ret) { + case -ENODEV: + pr_err("sipa fail to find dev\n"); + sipa_dummy->stats.rx_errors++; + sipa_dummy->netdev->stats.rx_errors++; + break; + case -ENODATA: + pr_err("sipa no data\n"); + atomic_set(&sipa_dummy->rx_busy, 0); + break; + } + break; + } + + skb_cnt++; + sipa_dummy->stats.rx_packets++; + sipa_dummy->stats.rx_bytes += skb->len; + if (real_netid < 0) { + pr_err("sipa invaild netid"); + break; + } + /* + * We should determine the real device before we do eth_types_tran, + */ + if (real_netid < 0 || real_netid >= SIPA_DUMMY_IFACE_NUM) { + pr_err("illegal real_netid %d\n", real_netid); + dev_kfree_skb_any(skb); + break; + } + netid_dev_info = dev_list[real_netid]; + if (!netid_dev_info || netid_dev_info->state == DEV_OFF) { + pr_info("netid= %d net is not DEV_ON\n", real_netid); + dev_kfree_skb_any(skb); + break; + } + + skb->dev = netid_dev_info->ndev; + sipa_eth = netdev_priv(skb->dev); + sipa_eth->stats.rx_packets++; + sipa_eth->stats.rx_bytes += skb->len; + if (sipa_eth->pdata->term_type == 0x6) { + sipa_dummy_direct_mode_prepare_skb(skb); + } else { + sipa_dummy_prepare_skb(skb); +#ifndef CONFIG_SPRD_ETHERNET + sipa_get_modem_mac(skb, sipa_eth); +#endif + } + napi_gro_receive(&sipa_dummy->napi, skb); + } + + return skb_cnt; +} + +static int sipa_dummy_rx_poll_handler(struct napi_struct *napi, int budget) +{ + int pkts = 0, num, tmp = 0; + + struct SIPA_DUMMY *sipa_dummy = container_of(napi, struct SIPA_DUMMY, napi); + +READ_AGAIN: + num = sipa_nic_get_filled_num(); + if (!num) + goto check; + if (num > budget) + num = budget; + + pkts = sipa_dummy_rx(sipa_dummy, num); + if (pkts > 0) + sipa_nic_set_tx_fifo_rp(pkts); + tmp += pkts; + + budget -= pkts; + if (!budget) + goto out; + +check: + if (!sipa_check_recv_tx_fifo_empty() || + atomic_read(&sipa_dummy->rx_evt)) { + atomic_set(&sipa_dummy->rx_evt, 0); + goto READ_AGAIN; + } + + atomic_set(&sipa_dummy->rx_busy, 0); + napi_complete(napi); + sipa_nic_restore_irq(); + if (atomic_read(&sipa_dummy->rx_evt) || + atomic_read(&sipa_dummy->rx_busy) || + !sipa_check_recv_tx_fifo_empty()) { + atomic_set(&sipa_dummy->rx_evt, 0); + napi_schedule(&sipa_dummy->napi); + } + +out: + return tmp; +} + +static void sipa_dummy_rx_handler (void *priv) +{ + struct SIPA_DUMMY *sipa_dummy = (struct SIPA_DUMMY *)priv; + + if (!sipa_dummy) { + pr_err("data is NULL\n"); + return; + } + + if (!atomic_cmpxchg(&sipa_dummy->rx_busy, 0, 1)) { + atomic_set(&sipa_dummy->rx_evt, 0); + napi_schedule(&sipa_dummy->napi); + } +} + +/* for sipa to invoke */ +void sipa_dummy_recv_trigger(void) +{ + struct SIPA_DUMMY *sipa_dummy; + + if (!dummy_dev) + return; + + sipa_dummy = netdev_priv(dummy_dev); + + atomic_set(&sipa_dummy->rx_evt, 1); + sipa_dummy_rx_handler(sipa_dummy); +} + +static int sipa_dummy_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct SIPA_DUMMY *sipa_dummy = netdev_priv(dev); + + /* update netdev statistics */ + sipa_dummy->stats.tx_packets++; + sipa_dummy->stats.tx_bytes += skb->len; + + dev_kfree_skb(skb); + return NETDEV_TX_OK; +} + +/* Open interface */ +static int sipa_dummy_open(struct net_device *dev) +{ + struct SIPA_DUMMY *sipa_dummy = netdev_priv(dev); + struct sipa_core *ctrl = sipa_get_ctrl_pointer(); + + if (!ctrl) { + return -EINVAL; + } + + if(!ctrl->remote_ready) + return -EINVAL; + + pr_info("dummy open\n"); + if (!netif_carrier_ok(sipa_dummy->netdev)) { + netif_carrier_on(sipa_dummy->netdev); + } + + netif_start_queue(dev); + //napi_enable(&sipa_dummy->napi); + + napi_schedule(&sipa_dummy->napi); + return 0; +} + +/* Close interface */ +static int sipa_dummy_close(struct net_device *dev) +{ + //struct SIPA_DUMMY *sipa_dummy = netdev_priv(dev); + pr_info("close dummy!\n"); + + //napi_disable(&sipa_dummy->napi); + netif_stop_queue(dev); + netif_carrier_off(dev); + + return 0; +} + +static struct net_device_stats *sipa_dummy_get_stats(struct net_device *dev) +{ + struct SIPA_DUMMY *sipa_dummy = netdev_priv(dev); + + return &sipa_dummy->stats; +} + +static const struct net_device_ops sipa_dummy_ops = { + .ndo_open = sipa_dummy_open, + .ndo_stop = sipa_dummy_close, + .ndo_start_xmit = sipa_dummy_start_xmit, + .ndo_get_stats = sipa_dummy_get_stats, +}; + +static void s_setup(struct net_device *dev) +{ + ether_setup(dev); +} + +static int sipa_dummy_probe(struct platform_device *pdev) +{ + struct SIPA_DUMMY *sipa_dummy; + struct net_device *netdev; + int ret; + +#ifdef NET_NAME_PREDICTABLE + netdev = alloc_netdev( + sizeof(struct SIPA_DUMMY), + "sipa_dummy0", + NET_NAME_PREDICTABLE, + s_setup); +#else + netdev = alloc_netdev( + sizeof(struct SIPA_DUMMY), + "sipa_dummy0", + s_setup); +#endif + + if (!netdev) { + pr_err("alloc_netdev() failed.\n"); + return -ENOMEM; + } + + dummy_dev = netdev; + netdev->type = ARPHRD_ETHER; + sipa_dummy = netdev_priv(netdev); + sipa_dummy->netdev = netdev; + netdev->netdev_ops = &sipa_dummy_ops; + netdev->watchdog_timeo = 1 * HZ; + netdev->irq = 0; + netdev->dma = 0; + netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM); + random_ether_addr(netdev->dev_addr); + + netif_napi_add(netdev, + &sipa_dummy->napi, + sipa_dummy_rx_poll_handler, + SIPA_DUMMY_NAPI_WEIGHT); + + /* Register new Ethernet interface */ + ret = register_netdev(netdev); + if (ret) { + pr_err("register_netdev() failed (%d)\n", ret); + netif_napi_del(&sipa_dummy->napi); + free_netdev(netdev); + return ret; + } + + /* Set link as disconnected */ + netif_carrier_off(netdev); + platform_set_drvdata(pdev, sipa_dummy); + sipa_dummy_debugfs_mknod((void *)sipa_dummy); + napi_enable(&sipa_dummy->napi); + return 0; +} + +/* Cleanup Ethernet device driver. */ +static int sipa_dummy_remove(struct platform_device *pdev) +{ + struct SIPA_DUMMY *sipa_dummy= platform_get_drvdata(pdev); + netif_stop_queue(sipa_dummy->netdev); + napi_disable(&sipa_dummy->napi); + netif_napi_del(&sipa_dummy->napi); + unregister_netdev(sipa_dummy->netdev); + free_netdev(sipa_dummy->netdev); + platform_set_drvdata(pdev, NULL); + if (!IS_ERR_OR_NULL(dummy_root)) + debugfs_remove_recursive(dummy_root); + + return 0; +} + +#ifdef SPRD_PCIE_USE_DTS +static const struct of_device_id sipa_dummy_match_table[] = { + { .compatible = "sprd,sipa_dummy"}, + { } +}; +#endif + +static struct platform_driver sipa_dummy_driver = { + .probe = sipa_dummy_probe, + .remove = sipa_dummy_remove, + .driver = { + .owner = THIS_MODULE, + .name = "sipa_dummy", +#ifdef SPRD_PCIE_USE_DTS + .of_match_table = sipa_dummy_match_table +#endif + } +}; + +#ifndef SPRD_PCIE_USE_DTS +static struct platform_device *sipa_dummy_device; + +static int sipa_dummy_platform_device_reigster(void) +{ + int retval = -ENOMEM; + + sipa_dummy_device = platform_device_alloc("sipa_dummy", -1); + if (!sipa_dummy_device) + return retval; + + retval = platform_device_add(sipa_dummy_device); + if (retval < 0) + platform_device_put(sipa_dummy_device); + + return retval; +} +#endif + + +static int sipa_dummy_debug_show(struct seq_file *m, void *v) +{ + struct SIPA_DUMMY *sipa_dummy = (struct SIPA_DUMMY *)(m->private); + + if (!sipa_dummy) { + pr_err("invalid data, sipa_dummy is NULL\n"); + return -EINVAL; + } + + seq_puts(m, "*************************************************\n"); + seq_printf(m, "DEVICE: %s rx_busy=%d rx_evt=%d\n", + sipa_dummy->netdev->name, atomic_read(&sipa_dummy->rx_busy), + atomic_read(&sipa_dummy->rx_evt)); + seq_puts(m, "*************************************************\n"); + + return 0; +} + +static int sipa_dummy_debug_open(struct inode *inode, struct file *file) +{ + return single_open(file, sipa_dummy_debug_show, inode->i_private); +} + +static const struct file_operations sipa_dummy_debug_fops = { + .open = sipa_dummy_debug_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int sipa_dummy_debugfs_mknod(void *data) +{ + if (!dummy_root) { + pr_err("dummy dir is NULL\n"); + return -ENXIO; + } + debugfs_create_file("stats", + 0444, + dummy_root, + data, + &sipa_dummy_debug_fops); + + return 0; +} + +static void __init sipa_dummy_debugfs_init(void) +{ + dummy_root = debugfs_create_dir("sipa_dummy", NULL); + if (!dummy_root) + pr_err("failed to create sipa_dummy debugfs dir\n"); +} + +int sipa_dummy_init(void) +{ + sipa_dummy_debugfs_init(); +#ifndef SPRD_PCIE_USE_DTS + sipa_dummy_platform_device_reigster(); +#endif + return platform_driver_register(&sipa_dummy_driver); +} +EXPORT_SYMBOL(sipa_dummy_init); + +void sipa_dummy_exit(void) +{ + platform_driver_unregister(&sipa_dummy_driver); +#ifndef SPRD_PCIE_USE_DTS + platform_device_unregister(sipa_dummy_device); +#endif +} +EXPORT_SYMBOL(sipa_dummy_exit); diff --git a/package/wwan/driver/quectel_SRPD_PCIE/src/sipa/sipa_eth.c b/package/wwan/driver/quectel_SRPD_PCIE/src/sipa/sipa_eth.c new file mode 100644 index 000000000..c790725fe --- /dev/null +++ b/package/wwan/driver/quectel_SRPD_PCIE/src/sipa/sipa_eth.c @@ -0,0 +1,1087 @@ +/* + * Copyright (C) 2019 Spreadtrum Communications Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) "sipa_eth: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sipa_eth.h" +#include "../include/sipa.h" + +#ifndef ARPHRD_RAWIP +#define ARPHRD_RAWIP ARPHRD_NONE +#endif + +#ifdef CONFIG_PINCTRL_IPQ807x +//#define CONFIG_QCA_NSS_DRV +#endif + +#if 1//def CONFIG_QCA_NSS_DRV +#define _RMNET_NSS_H_ +#define _RMENT_NSS_H_ +struct rmnet_nss_cb { + int (*nss_create)(struct net_device *dev); + int (*nss_free)(struct net_device *dev); + int (*nss_tx)(struct sk_buff *skb); +}; +static struct rmnet_nss_cb *rmnet_nss_callbacks __rcu __read_mostly; +#ifdef CONFIG_QCA_NSS_DRV +static uint __read_mostly qca_nss_enabled = 1; +module_param( qca_nss_enabled, uint, S_IRUGO); +#define rmnet_nss_dereference(nss_cb) do { \ + rcu_read_lock(); \ + nss_cb = rcu_dereference(rmnet_nss_callbacks); \ + rcu_read_unlock(); \ +} while(0) +#else +#define rmnet_nss_dereference(nss_cb) do { nss_cb = NULL; } while(0) +#endif +#endif + +/* Device status */ +#define DEV_ON 1 +#define DEV_OFF 0 + +#define SIPA_ETH_NAPI_WEIGHT 64 +#define SIPA_ETH_IFACE_PREF "seth" +#define SIPA_ETH_VPCIE_PREF "pcie" +#define SIPA_ETH_VPCIE_IDX 8 + +#define SIPA_DUMMY_IFACE_NUM 4 + +static struct dentry *root; +static int sipa_eth_debugfs_mknod(void *root, void *data); +static void sipa_eth_poll_rx_handler (void *priv); +static u64 gro_enable; + +struct sipa_eth_netid_device * dev_list[SIPA_DUMMY_IFACE_NUM]; + + +static const unsigned char dhcp_dst_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; + +//static const u8 default_modem_addr[ETH_ALEN] = {0x02, 0x50, 0xf3}; +//static const u8 default_dev_addr[ETH_ALEN] = {0x02, 0x50, 0xf4}; + +static inline void sipa_eth_dt_stats_init(struct sipa_eth_dtrans_stats *stats) +{ + memset(stats, 0, sizeof(*stats)); +} + +static inline void sipa_eth_rx_stats_update( + struct sipa_eth_dtrans_stats *stats, u32 len) +{ + stats->rx_sum += len; + stats->rx_cnt++; +} + +static inline void sipa_eth_tx_stats_update( + struct sipa_eth_dtrans_stats *stats, u32 len) +{ + stats->tx_sum += len; + stats->tx_cnt++; +} + +static void sipa_eth_prepare_skb(struct SIPA_ETH *sipa_eth, struct sk_buff *skb) +{ + struct iphdr *iph; + struct ipv6hdr *ipv6h; + struct net_device *dev; + unsigned int real_len = 0, payload_len = 0; + bool ip_arp = true; + + dev = sipa_eth->netdev; + + skb->protocol = eth_type_trans(skb, dev); + skb_reset_network_header(skb); + + switch (ntohs(skb->protocol)) { + case ETH_P_IP: + iph = ip_hdr(skb); + real_len = ntohs(iph->tot_len); + break; + case ETH_P_IPV6: + ipv6h = ipv6_hdr(skb); + payload_len = ntohs(ipv6h->payload_len); + real_len = payload_len + sizeof(struct ipv6hdr); + break; + case ETH_P_ARP: + real_len = arp_hdr_len(dev); + break; + default: + ip_arp = false; + break; + } + + if (ip_arp) + skb_trim(skb, real_len); + + /* TODO chechsum ... */ + skb->ip_summed = CHECKSUM_NONE; + skb->dev = dev; +} + +static int sipa_eth_rx(struct SIPA_ETH *sipa_eth, int budget) +{ + struct sk_buff *skb; + struct net_device *dev; + struct sipa_eth_dtrans_stats *dt_stats; + int netid; + int skb_cnt = 0; + int ret; + + dt_stats = &sipa_eth->dt_stats; + + if (!sipa_eth) { + pr_err("no sipa_eth device\n"); + return -EINVAL; + } + + dev = sipa_eth->netdev; + atomic_set(&sipa_eth->rx_evt, 0); + while (skb_cnt < budget) { + ret = sipa_nic_rx(&netid, &skb, skb_cnt); + if (ret) { + switch (ret) { + case -ENODEV: + pr_err("fail to find dev"); + sipa_eth->stats.rx_errors++; + dt_stats->rx_fail++; + dev->stats.rx_errors++; + break; + case -ENODATA: + atomic_set(&sipa_eth->rx_busy, 0); + break; + } + break; + } + + if (!skb) { + pr_err("recv skb is null\n"); + return -EINVAL; + } + + sipa_eth_prepare_skb(sipa_eth, skb); + + sipa_eth->stats.rx_packets++; + sipa_eth->stats.rx_bytes += skb->len; + sipa_eth_rx_stats_update(dt_stats, skb->len); + napi_gro_receive(&sipa_eth->napi, skb); + //netif_receive_skb(skb); + skb_cnt++; + } + + return skb_cnt; +} + +static int sipa_eth_rx_poll_handler(struct napi_struct *napi, int budget) +{ + struct SIPA_ETH *sipa_eth = container_of(napi, struct SIPA_ETH, napi); + int tmp = 0, pkts; + + /* If the number of pkt is more than weight(64), + * we cannot read them all with a single poll. + * When the return value of poll func equals to weight(64), + * napi structure invokes the poll func one more time by + * __raise_softirq_irqoff.(See napi_poll for details) + * So do not do napi_complete in that case. + */ +READ_AGAIN: + /* For example: + * pkts = 60, tmp = 60, budget = 4 + * if rx_evt is true, we goto READ_AGAIN, + * pkts = 4, tmp = 64, budget = 0, + * then we goto out, return 64 to napi, + * In that case, we force napi to do polling again. + */ + pkts = sipa_eth_rx(sipa_eth, budget); + tmp += pkts; + budget -= pkts; + /* + * If budget is 0 here, means we has not finished reading yet, + * so we should return a weight-number(64) to napi to ask it + * do another polling. + */ + if (!budget) + goto out; + + /* Due to a cuncurrency issue, we have to do napi_complete + * cautiously. If a socket is in the process of napi polling, + * a SIPA_RECEIVE is arriving to trigger another socket to do receiving, + * we must record it because it will be blocked by rx_busy + * at the first beginning. + * Since this SIPA_RECEIVE notification is a one-shot behaviour + * in sipa_nic. if we chose to ignore this event, we may lose + * the chance to receive forever. + */ + if (atomic_read(&sipa_eth->rx_evt)) + goto READ_AGAIN; + + /* If the number of budget is more than 0, it means the pkts + * we received is smaller than napi weight(64). + * Then we are okay to do napi_complete. + */ + if (budget) { + napi_complete(napi); + /* Test in a lab, ten threads of TCP streams, + * TPUT reaches to 1Gbps, another edge case occurs, + * rx_busy might be 0, and rx_evt might be 1, + * after we do napi_complete. + * So do rx_handler manually to prevent + * sipa_eth from stopping receiving pkts. + */ + if (atomic_read(&sipa_eth->rx_evt) || + atomic_read(&sipa_eth->rx_busy)) { + pr_debug("rx evt recv after napi complete"); + atomic_set(&sipa_eth->rx_evt, 0); + napi_schedule(&sipa_eth->napi); + } + } + +out: + return tmp; +} + +/* Attention, The whole RX is deprecated, we use sipa_dummy to rx. */ +static void sipa_eth_poll_rx_handler (void *priv) +{ + struct SIPA_ETH *sipa_eth = (struct SIPA_ETH *)priv; + + if (!sipa_eth) { + pr_err("data is NULL\n"); + return; + } + + if (!atomic_cmpxchg(&sipa_eth->rx_busy, 0, 1)) { + atomic_set(&sipa_eth->rx_evt, 0); + napi_schedule(&sipa_eth->napi); + /* Trigger a NET_RX_SOFTIRQ softirq directly, + * or there will be a delay + */ + //raise_softirq(NET_RX_SOFTIRQ); + } +} + +static void sipa_eth_flowctrl_handler(void *priv, int flowctrl) +{ + struct SIPA_ETH *sipa_eth = (struct SIPA_ETH *)priv; + struct net_device *dev = sipa_eth->netdev; + + if (flowctrl) { + netif_stop_queue(dev); + } else if (netif_queue_stopped(dev)) { + netif_wake_queue(dev); + } +} + +static void sipa_eth_notify_cb(void *priv, enum sipa_evt_type evt, + unsigned int data) +{ + struct SIPA_ETH *sipa_eth = (struct SIPA_ETH *)priv; + + switch (evt) { + case SIPA_RECEIVE: + atomic_set(&sipa_eth->rx_evt, 1); + sipa_eth_poll_rx_handler(priv); + break; + case SIPA_LEAVE_FLOWCTRL: + pr_info("SIPA LEAVE FLOWCTRL\n"); + sipa_eth_flowctrl_handler(priv, 0); + break; + case SIPA_ENTER_FLOWCTRL: + pr_info("SIPA ENTER FLOWCTRL\n"); + sipa_eth_flowctrl_handler(priv, 1); + break; + default: + break; + } +} + +static int sipa_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct SIPA_ETH *sipa_eth = netdev_priv(dev); + struct sipa_eth_init_data *pdata = sipa_eth->pdata; + struct sipa_eth_dtrans_stats *dt_stats; + int ret = 0, dhcp = 0; + int netid; + + dt_stats = &sipa_eth->dt_stats; + if (sipa_eth->state != DEV_ON) { + pr_err("called when %s is down\n", dev->name); + dt_stats->tx_fail++; + netif_carrier_off(dev); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + netid = pdata->netid; + + + if (dev->type != ARPHRD_ETHER) { + switch (skb->data[0] & 0xf0) { + case 0x40: + { + struct iphdr *ip4h = (struct iphdr *)(&skb->data[0]); + if (ip4h->protocol == IPPROTO_UDP && ip4h->saddr == 0x00000000 && ip4h->daddr == 0xFFFFFFFF) { + dhcp = 1; + } + skb->protocol = htons(ETH_P_IP); + } + break; + case 0x60: + skb->protocol = htons(ETH_P_IPV6); + break; + default: + if(skb->protocol != htons(ETH_P_ARP)) { + pr_err("unknow skb->protocol %02x\n", skb->data[0]); + goto err; + } + skb->protocol = htons(ETH_P_ARP); + arp_hdr(skb)->ar_hrd = htons(ARPHRD_ETHER); + break; + + } + skb_push(skb, ETH_HLEN); + skb_reset_mac_header(skb); + + memcpy(eth_hdr(skb)->h_source, dev->dev_addr, ETH_ALEN); + if(dhcp) + memcpy(eth_hdr(skb)->h_dest, dhcp_dst_addr, ETH_ALEN); + else + memcpy(eth_hdr(skb)->h_dest, pdata->modem_mac, ETH_ALEN); + eth_hdr(skb)->h_proto = skb->protocol; + } + + ret = sipa_nic_tx(sipa_eth->nic_id, pdata->term_type, netid, skb); + if (unlikely(ret != 0)) { + if (ret == -EAGAIN) { + /* + * resume skb, otherwise + * we may pull this skb ETH_HLEN-bytes twice + */ + if (!pdata->mac_h) + skb_push(skb, ETH_HLEN); + dt_stats->tx_fail++; + sipa_eth->stats.tx_errors++; + /* + if (ret == -EAGAIN) { + netif_stop_queue(dev); + sipa_nic_trigger_flow_ctrl_work(sipa_eth->nic_id, ret); + } + */ + return NETDEV_TX_BUSY; + } + pr_err("fail to send skb, dev 0x%p eth 0x%p nic_id %d, ret %d\n", + dev, sipa_eth, sipa_eth->nic_id, ret); + goto err; + } + + /* update netdev statistics */ + sipa_eth->stats.tx_packets++; + sipa_eth->stats.tx_bytes += skb->len; + sipa_eth_tx_stats_update(dt_stats, skb->len); + + return NETDEV_TX_OK; + +err: + sipa_eth->netdev->stats.tx_dropped++; + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +static int sipa_eth_change_dev_list_state(struct SIPA_ETH *sipa_eth, int state) { + struct sipa_eth_netid_device *netid_dev_info; + int netid = sipa_eth->pdata->netid; + + if (strncmp(sipa_eth->netdev->name, + SIPA_ETH_VPCIE_PREF, + strlen(SIPA_ETH_VPCIE_PREF))) + return 0; + + if (netid < 0 || netid >= SIPA_DUMMY_IFACE_NUM) { + pr_info("illegal netid %d\n", netid); + return -EINVAL; + } + + netid_dev_info = dev_list[netid]; + netid_dev_info->state = state; + + pr_info("set %s netid %d %s for dummy\n", + sipa_eth->netdev->name, netid, state ? "DEV_ON" : "DEV_OFF"); + return 0; +} + +/* Open interface */ +static int sipa_eth_open(struct net_device *dev) +{ + struct SIPA_ETH *sipa_eth = netdev_priv(dev); + struct sipa_eth_init_data *pdata = sipa_eth->pdata; + int ret = 0; + + pr_info("dev 0x%p eth 0x%p open %s netid %d term %d mac_h %d\n", + dev, sipa_eth, dev->name, pdata->netid, pdata->term_type, + pdata->mac_h); + ret = sipa_nic_open( + pdata->term_type, + pdata->netid, + sipa_eth_notify_cb, + (void *)sipa_eth); + + if (ret < 0) + return -EINVAL; + + sipa_eth->nic_id = ret; + sipa_eth->state = DEV_ON; + sipa_eth_dt_stats_init(&sipa_eth->dt_stats); + memset(&sipa_eth->stats, 0, sizeof(sipa_eth->stats)); + + if (!netif_carrier_ok(sipa_eth->netdev)) { + pr_info("set netif_carrier_on\n"); + netif_carrier_on(sipa_eth->netdev); + } + + atomic_set(&sipa_eth->rx_busy, 0); + sipa_eth_change_dev_list_state(sipa_eth, DEV_ON); + netif_start_queue(dev); + napi_enable(&sipa_eth->napi); + + return 0; +} + +/* Close interface */ +static int sipa_eth_close(struct net_device *dev) +{ + struct SIPA_ETH *sipa_eth = netdev_priv(dev); + pr_info("close %s!\n", dev->name); + + sipa_nic_close(sipa_eth->nic_id); + sipa_eth->state = DEV_OFF; + + napi_disable(&sipa_eth->napi); + netif_stop_queue(dev); + + sipa_eth_change_dev_list_state(sipa_eth, DEV_OFF); + return 0; +} + +static struct net_device_stats *sipa_eth_get_stats(struct net_device *dev) +{ + struct SIPA_ETH *sipa_eth = netdev_priv(dev); + + return &sipa_eth->stats; +} + +/* + * For example, if an application starts a tcp connection, + * it finally invokes tcp_connect func to send out a TCP SYN. + * In a func tcp_init_nondata_skb, the skb->ip_summed is set + * to CHECKSUM_PARTIAL. Because only the pesuode header is + * calculated and stored. It expects that the netdevice + * to calculate the checksum for TCP header&TCP paylod and store + * the final checksum into tcphdr->check. + * Then __dev_queue_xmit -> validate_xmit_skb, it check the features + * of this current network card, if it with + * NETIF_F_IP_CSUM/NETIF_F_IPV6_CSUM/NETIF_F_HW_CSUM, + * skb_checksum_help func will be invoked for this calculation. + * + * So we have to implement ndo_features_check func, since we + * have any ability to calculate a checksum for a pkt. + */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION( 3,11,0 )) +netdev_features_t sipa_eth_features_check(struct sk_buff *skb, struct net_device *dev, + netdev_features_t features) +{ + features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM); + + return features; +} +#endif + +static const struct net_device_ops sipa_eth_ops = { + .ndo_open = sipa_eth_open, + .ndo_stop = sipa_eth_close, + .ndo_start_xmit = sipa_eth_start_xmit, + .ndo_get_stats = sipa_eth_get_stats, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION( 3,11,0 )) + .ndo_features_check = sipa_eth_features_check, +#endif +}; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION( 3,11,0 )) +static int sipa_eth_parse_dt( + struct sipa_eth_init_data **init, + struct device *dev) +{ + struct sipa_eth_init_data *pdata = NULL; + struct device_node *np = dev->of_node; + int ret; + u32 udata, id; + s32 sdata; + + if (!np) + pr_err("dev of_node np is null\n"); + + pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return -ENOMEM; + + id = of_alias_get_id(np, "eth"); + switch (id) { + case 0 ... 7: + snprintf(pdata->name, IFNAMSIZ, "%s%d", + SIPA_ETH_IFACE_PREF, id); + break; + case 8 ... 11: + snprintf(pdata->name, IFNAMSIZ, "%s%d", + SIPA_ETH_VPCIE_PREF, id - SIPA_ETH_VPCIE_IDX); + break; + default: + pr_err("wrong alias id from dts, id %d\n", id); + return -EINVAL; + } + + ret = of_property_read_u32(np, "sprd,netid", &sdata); + if (ret) { + pr_err("read sprd,netid ret %d\n", ret); + return ret; + } + /* dts reflect */ + pdata->netid = sdata - 1; + + ret = of_property_read_u32(np, "sprd,term-type", &udata); + if (ret) { + pr_err("read sprd,term-type ret %d\n", ret); + return ret; + } + + pdata->term_type = udata; + + pdata->mac_h = of_property_read_bool(np, "sprd,mac-header"); + + *init = pdata; + pr_debug("after dt parse, name %s netid %d term-type %d mac_h %d\n", + pdata->name, pdata->netid, pdata->term_type, pdata->mac_h); + return 0; +} +#endif + +static void s_setup(struct net_device *dev) +{ +#ifndef CONFIG_SPRD_ETHERNET + ether_setup(dev); + dev->needed_headroom = 16; + + /* Raw IP mode */ + dev->header_ops = NULL; /* No header */ + dev->type = ARPHRD_RAWIP; + dev->hard_header_len = 0; + dev->flags |= IFF_NOARP | IFF_PROMISC; + dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST); + dev->addr_len = ETH_ALEN; + eth_hw_addr_random(dev); +#else + ether_setup(dev); + dev->flags |= IFF_PROMISC; +#endif +} + +static ssize_t sipa_eth_get_direct_mode(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *netdev = to_net_dev(dev); + struct SIPA_ETH *sipa_eth = netdev_priv(netdev); + + + if (sipa_eth->pdata->term_type == 0x0) + return sprintf(buf, "\n %s in mode: normal\n", sipa_eth->netdev->name); + else if (sipa_eth->pdata->term_type == 0x6) + return sprintf(buf, "\n %s in mode: direct\n", sipa_eth->netdev->name); + else + return sprintf(buf, "\n %s in mode: illegal\n", sipa_eth->netdev->name); +} + +static ssize_t sipa_eth_set_direct_mode(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct net_device *netdev = to_net_dev(dev); + struct SIPA_ETH *sipa_eth = netdev_priv(netdev); + + if (!strncmp(buf, "normal", count - 1)) { + sipa_eth->pdata->term_type = 0x0; + } else if (!strncmp(buf, "direct", count - 1)) { +#ifndef CONFIG_SPRD_ETHERNET + sipa_eth->pdata->term_type = 0x6; +#else + pr_info("mode only can be set as normal when CONFIG_SPRD_ETHERNET is enabled!\n"); + return -EINVAL; +#endif + } else { + return -EINVAL; + } + + return count; +} + +static DEVICE_ATTR(mode, 0664, sipa_eth_get_direct_mode, sipa_eth_set_direct_mode); + +static struct attribute *sipa_eth_attributes[] = { + &dev_attr_mode.attr, + NULL, +}; + +static struct attribute_group sipa_eth_attribute_group = { + .attrs = sipa_eth_attributes, +}; + +static rx_handler_result_t sipa_eth_rx_handler(struct sk_buff **pskb) +{ + struct sk_buff *skb = *pskb; + struct rmnet_nss_cb *nss_cb; + + if (!skb) + return RX_HANDLER_CONSUMED; + + //printk("%s skb=%p, len=%d, protocol=%x, hdr_len=%d\n", __func__, skb, skb->len, skb->protocol, skb->hdr_len); + + /* Check this so that we dont loop around netif_receive_skb */ + + if (skb->cb[0] == 1) { + skb->cb[0] = 0; + + return RX_HANDLER_PASS; + } + + rmnet_nss_dereference(nss_cb); + if (nss_cb) { + nss_cb->nss_tx(skb); + return RX_HANDLER_CONSUMED; + } + + return RX_HANDLER_PASS; +} + +static int sipa_eth_probe(struct platform_device *pdev) +{ + struct sipa_eth_netid_device *netid_dev_info = NULL; + struct sipa_eth_init_data *pdata = pdev->dev.platform_data; + struct net_device *netdev; + struct SIPA_ETH *sipa_eth; + char ifname[IFNAMSIZ]; + struct rmnet_nss_cb *nss_cb; + int ret; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION( 3,11,0 )) + if (pdev->dev.of_node && !pdata) { + ret = sipa_eth_parse_dt(&pdata, &pdev->dev); + if (ret) { + pr_err("failed to parse device tree, ret=%d\n", ret); + return ret; + } + pdev->dev.platform_data = pdata; + } +#endif + + rmnet_nss_dereference(nss_cb); + strlcpy(ifname, pdata->name, IFNAMSIZ); +#ifdef NET_NAME_PREDICTABLE + netdev = alloc_netdev( + sizeof(struct SIPA_ETH), + ifname, + NET_NAME_PREDICTABLE, + s_setup); +#else + netdev = alloc_netdev( + sizeof(struct SIPA_ETH), + ifname, + s_setup); +#endif + + if (!netdev) { + pr_err("alloc_netdev() failed.\n"); + return -ENOMEM; + } + + sipa_eth = netdev_priv(netdev); + sipa_eth_dt_stats_init(&sipa_eth->dt_stats); + sipa_eth->netdev = netdev; + sipa_eth->pdata = pdata; + atomic_set(&sipa_eth->rx_busy, 0); + atomic_set(&sipa_eth->rx_evt, 0); + netdev->netdev_ops = &sipa_eth_ops; + netdev->watchdog_timeo = 1 * HZ; + netdev->irq = 0; + netdev->dma = 0; + netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM); + random_ether_addr(netdev->dev_addr); + netdev->sysfs_groups[0] = &sipa_eth_attribute_group; + + netif_napi_add(netdev, + &sipa_eth->napi, + sipa_eth_rx_poll_handler, + SIPA_ETH_NAPI_WEIGHT); + + /* Register new Ethernet interface */ + ret = register_netdev(netdev); + if (ret) { + pr_err("register_netdev() failed (%d)\n", ret); + netif_napi_del(&sipa_eth->napi); + free_netdev(netdev); + return ret; + } + + sipa_eth->state = DEV_OFF; + /* Set link as disconnected */ + netif_carrier_off(netdev); + platform_set_drvdata(pdev, sipa_eth); + sipa_eth_debugfs_mknod(root, (void *)sipa_eth); + + if (!strncmp(netdev->name, SIPA_ETH_VPCIE_PREF, strlen(SIPA_ETH_VPCIE_PREF))) { + netid_dev_info = kzalloc(sizeof(*netid_dev_info), GFP_ATOMIC); + if (!netid_dev_info) + return -ENOMEM; + + if (nss_cb) { + int rc = nss_cb->nss_create(netdev); + if (rc) { + /* Log, but don't fail the device creation */ + netdev_err(netdev, "Device will not use NSS path: %d\n", rc); + } else { + netdev_info(netdev, "NSS context created\n"); + rtnl_lock(); + netdev_rx_handler_register(netdev, sipa_eth_rx_handler, NULL); + rtnl_unlock(); + } + } + + netid_dev_info->ndev = netdev; + netid_dev_info->napi = sipa_eth->napi; + netid_dev_info->state = DEV_OFF; + netid_dev_info->netid = pdata->netid; + dev_list[pdata->netid] = netid_dev_info; + } + //sysfs_create_group(&pdev->dev.kobj, &sipa_eth_attribute_group); + return 0; +} + +/* Cleanup Ethernet device driver. */ +static int sipa_eth_remove(struct platform_device *pdev) +{ + struct SIPA_ETH *sipa_eth = platform_get_drvdata(pdev); + struct rmnet_nss_cb *nss_cb; + rx_handler_func_t *rx_handler; + struct sipa_eth_init_data *pdata = pdev->dev.platform_data; + + + rmnet_nss_dereference(nss_cb); + rcu_read_lock(); + rx_handler = rcu_dereference(sipa_eth->netdev->rx_handler); + rcu_read_unlock(); + if (nss_cb && rx_handler == sipa_eth_rx_handler) { + rtnl_lock(); + netdev_rx_handler_unregister(sipa_eth->netdev); + rtnl_unlock(); + nss_cb->nss_free(sipa_eth->netdev); + } + + netif_napi_del(&sipa_eth->napi); + unregister_netdev(sipa_eth->netdev); + + if(dev_list[pdata->netid]) { + kfree(dev_list[pdata->netid]); + dev_list[pdata->netid] = NULL; + } + + if (!IS_ERR_OR_NULL(sipa_eth->subroot)) + debugfs_remove_recursive(sipa_eth->subroot); + free_netdev(sipa_eth->netdev); + return 0; +} + +#ifdef SPRD_PCIE_USE_DTS +static const struct of_device_id sipa_eth_match_table[] = { + { .compatible = "sprd,sipa_eth"}, + { } +}; +#endif + +static struct platform_driver sipa_eth_driver = { + .probe = sipa_eth_probe, + .remove = sipa_eth_remove, + .driver = { + .owner = THIS_MODULE, + .name = SIPA_ETH_IFACE_PREF, +#ifdef SPRD_PCIE_USE_DTS + .of_match_table = sipa_eth_match_table +#endif + } +}; + +#ifndef SPRD_PCIE_USE_DTS +static struct platform_device *sipa_eth_pdev[SIPA_ETH_NUM]; +static struct sipa_eth_init_data *sipa_eth_data[SIPA_ETH_NUM]; + +static int sipa_eth_platform_device_reigster(void) +{ + int retval = -ENOMEM; + int i; + + for(i = 0; i < SIPA_ETH_NUM; i++) { + sipa_eth_pdev[i] = platform_device_alloc(SIPA_ETH_IFACE_PREF, i); + if (!sipa_eth_pdev[i]) { + i--; + while (i >= 0) + platform_device_put(sipa_eth_pdev[i--]); + return retval; + } + } + + for (i = 0; i < SIPA_ETH_NUM; i++) { + sipa_eth_data[i] = kzalloc(sizeof(struct sipa_eth_init_data), GFP_KERNEL); + if (!sipa_eth_data[i]) { + retval = -ENOMEM; + goto err_add_pdata; + } + + snprintf(sipa_eth_data[i]->name, IFNAMSIZ, "%s%d", + SIPA_ETH_VPCIE_PREF, i); +#ifndef CONFIG_SPRD_ETHERNET + sipa_eth_data[i]->term_type = 0x6; +#else + sipa_eth_data[i]->term_type = 0x0; +#endif + sipa_eth_data[i]->netid = i; + sipa_eth_data[i]->mac_h = true; + + retval = platform_device_add_data(sipa_eth_pdev[i], sipa_eth_data[i], + sizeof(struct sipa_eth_init_data)); + if (retval) + goto err_add_pdata; + } + + for (i = 0; i < SIPA_ETH_NUM; i++) { + retval = platform_device_add(sipa_eth_pdev[i]); + if (retval < 0) { + i--; + while (i >= 0) + platform_device_del(sipa_eth_pdev[i]); + goto err_add_pdata; + } + } + + return retval; +err_add_pdata: + for (i = 0; i < SIPA_ETH_NUM; i++) { + if(sipa_eth_data[i]) + kfree(sipa_eth_data[i]); + } + for (i = 0; i < SIPA_ETH_NUM; i++) + platform_device_put(sipa_eth_pdev[i]); + return retval; +} + +static void sipa_eth_platform_device_unreigster(void) +{ + int i; + + for (i = 0; i < SIPA_ETH_NUM; i++) { + struct sipa_eth_init_data *init_data; + + init_data = dev_get_platdata(&sipa_eth_pdev[i]->dev); + platform_device_unregister(sipa_eth_pdev[i]); + } + + if (!IS_ERR_OR_NULL(root)) + debugfs_remove_recursive(root); + +} +#endif + +static int sipa_eth_debug_show(struct seq_file *m, void *v) +{ + struct SIPA_ETH *sipa_eth = (struct SIPA_ETH *)(m->private); + struct sipa_eth_dtrans_stats *stats; + struct sipa_eth_init_data *pdata; + int i; + + if (!sipa_eth) { + pr_err("invalid data, sipa_eth is NULL\n"); + return -EINVAL; + } + pdata = sipa_eth->pdata; + stats = &sipa_eth->dt_stats; + + seq_puts(m, "*************************************************\n"); + seq_printf(m, "DEVICE: %s, term_type %d, netid %d, state %s mac_h %d\n", + pdata->name, pdata->term_type, pdata->netid, + sipa_eth->state == DEV_ON ? "UP" : "DOWN", pdata->mac_h); + seq_puts(m, "\nRX statistics:\n"); + seq_printf(m, "rx_sum=%u, rx_cnt=%u\n", + stats->rx_sum, + stats->rx_cnt); + seq_printf(m, "rx_fail=%u\n", + stats->rx_fail); + + seq_printf(m, "rx_busy=%d\n", atomic_read(&sipa_eth->rx_busy)); + seq_printf(m, "rx_evt=%d\n", atomic_read(&sipa_eth->rx_evt)); + + seq_puts(m, "\nTX statistics:\n"); + seq_printf(m, "tx_sum=%u, tx_cnt=%u\n", + stats->tx_sum, + stats->tx_cnt); + seq_printf(m, "tx_fail=%u\n", + stats->tx_fail); + + for (i = 0; i < SIPA_DUMMY_IFACE_NUM; i++) { + if(dev_list[i] == NULL) { + break; + } + seq_printf(m, "dev %s, netid %d state %d\n", + dev_list[i]->ndev->name, dev_list[i]->netid, dev_list[i]->state); + } + + seq_puts(m, "*************************************************\n"); + + return 0; +} + +static int sipa_eth_debug_open(struct inode *inode, struct file *file) +{ + return single_open(file, sipa_eth_debug_show, inode->i_private); +} + +static const struct file_operations sipa_eth_debug_fops = { + .open = sipa_eth_debug_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int debugfs_gro_enable_get(void *data, u64 *val) +{ + *val = *(u64 *)data; + return 0; +} + +static int debugfs_gro_enable_set(void *data, u64 val) +{ + *(u64 *)data = val; + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(fops_gro_enable, + debugfs_gro_enable_get, + debugfs_gro_enable_set, + "%llu\n"); + +static int sipa_eth_debugfs_mknod(void *root, void *data) +{ + struct SIPA_ETH *sipa_eth = (struct SIPA_ETH *)data; + struct dentry *subroot; + + if (!sipa_eth) + return -ENODEV; + + if (!root) + return -ENXIO; + subroot = debugfs_create_dir(sipa_eth->netdev->name, (struct dentry *)root); + if (!subroot) + return -ENOMEM; + + sipa_eth->subroot = subroot; + debugfs_create_file("stats", + 0444, + subroot, + data, + &sipa_eth_debug_fops); + + debugfs_create_file("gro_enable", + 0600, + (struct dentry *)root, + &gro_enable, + &fops_gro_enable); + + return 0; +} + +static void __init sipa_eth_debugfs_init(void) +{ + root = debugfs_create_dir(SIPA_ETH_IFACE_PREF, NULL); + if (!root) + pr_err("failed to create sipa_eth debugfs dir\n"); +} + +#ifdef CONFIG_QCA_NSS_DRV +/* + EXTRA_CFLAGS="-I$(STAGING_DIR)/usr/include/qca-nss-drv $(EXTRA_CFLAGS)" + qsdk/qca/src/data-kernel/drivers/rmnet-nss/rmnet_nss.c +*/ +#include "rmnet_nss.c" +#endif + +int sipa_eth_init(void) +{ + sipa_eth_debugfs_init(); + + RCU_INIT_POINTER(rmnet_nss_callbacks, NULL); +#ifdef CONFIG_QCA_NSS_DRV + if (qca_nss_enabled) + rmnet_nss_init(); +#endif + +#ifndef SPRD_PCIE_USE_DTS + sipa_eth_platform_device_reigster(); +#endif + return platform_driver_register(&sipa_eth_driver); +} +EXPORT_SYMBOL(sipa_eth_init); + +void sipa_eth_exit(void) +{ + int i; +#ifdef CONFIG_QCA_NSS_DRV + if (qca_nss_enabled) + rmnet_nss_exit(); +#endif + platform_driver_unregister(&sipa_eth_driver); +#ifndef SPRD_PCIE_USE_DTS + sipa_eth_platform_device_unreigster(); +#endif + for (i = 0; i < SIPA_ETH_NUM; i++) { + if(sipa_eth_data[i]){ + kfree(sipa_eth_data[i]); + } + } +} +EXPORT_SYMBOL(sipa_eth_exit); diff --git a/package/wwan/driver/quectel_SRPD_PCIE/src/sipa/sipa_eth.h b/package/wwan/driver/quectel_SRPD_PCIE/src/sipa/sipa_eth.h new file mode 100644 index 000000000..0519225fd --- /dev/null +++ b/package/wwan/driver/quectel_SRPD_PCIE/src/sipa/sipa_eth.h @@ -0,0 +1,65 @@ +#ifndef _SIPA_ETH_H_ +#define _SIPA_ETH_H_ + +#include "../include/sipa.h" +#include + +#define SIPA_ETH_NUM 2 +#define SIPA_DUMMY_IFACE_NUM 4 + +/* Struct of data transfer statistics */ +struct sipa_eth_dtrans_stats { + u32 rx_sum; + u32 rx_cnt; + u32 rx_fail; + + u32 tx_sum; + u32 tx_cnt; + u32 tx_fail; +}; + +/* Device instance data. */ +struct SIPA_ETH { + int state; + atomic_t rx_busy; + atomic_t rx_evt; + struct net_device *netdev;/* Linux net device */ + enum sipa_nic_id nic_id; + struct napi_struct napi;/* Napi instance */ + /* Record data_transfer statistics */ + struct sipa_eth_dtrans_stats dt_stats; + struct net_device_stats stats;/* Net statistics */ + struct sipa_eth_init_data *pdata;/* Platform data */ + struct dentry *subroot; +}; + +struct sipa_eth_init_data { + char name[IFNAMSIZ]; + unsigned char modem_mac[ETH_ALEN]; + u32 term_type; + s32 netid; + bool mac_h; +}; + +struct sipa_eth_netid_device { + int state; + int netid; + struct net_device *ndev; + struct napi_struct napi;/* Napi instance */ + /* Record data_transfer statistics */ + struct net_device_stats stats;/* Net statistics */ +}; + +/* Device instance data. */ +struct SIPA_DUMMY { + atomic_t rx_busy; + atomic_t rx_evt; + struct net_device *netdev;/* Linux net device */ + struct napi_struct napi;/* Napi instance */ + struct net_device_stats stats;/* Net statistics */ +}; + +void sipa_dummy_recv_trigger(void); + +#endif + diff --git a/package/wwan/driver/quectel_SRPD_PCIE/src/sipa/sipa_nic.c b/package/wwan/driver/quectel_SRPD_PCIE/src/sipa/sipa_nic.c new file mode 100644 index 000000000..6a69b0de1 --- /dev/null +++ b/package/wwan/driver/quectel_SRPD_PCIE/src/sipa/sipa_nic.c @@ -0,0 +1,332 @@ +/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../include/sipa.h" +#include "sipa_core.h" + +#define SIPA_CP_SRC ((1 << SIPA_TERM_CP0) | \ + (1 << SIPA_TERM_CP1) | (1 << SIPA_TERM_VCP) | \ + (1 << 0x19) | (1 << 0x18)) + +struct sipa_nic_statics_info { + u32 src_mask; + int netid; +}; + +static struct sipa_nic_statics_info s_spia_nic_statics[SIPA_NIC_MAX] = { + { + .src_mask = SIPA_CP_SRC, + .netid = 0, + }, + { + .src_mask = SIPA_CP_SRC, + .netid = 1, + }, + { + .src_mask = SIPA_CP_SRC, + .netid = 2, + }, + { + .src_mask = SIPA_CP_SRC, + .netid = 3, + }, + { + .src_mask = SIPA_CP_SRC, + .netid = 4, + }, + { + .src_mask = SIPA_CP_SRC, + .netid = 5, + }, + { + .src_mask = SIPA_CP_SRC, + .netid = 6, + }, + { + .src_mask = SIPA_CP_SRC, + .netid = 7, + }, + { + .src_mask = SIPA_CP_SRC, + .netid = 8, + }, + { + .src_mask = SIPA_CP_SRC, + .netid = 9, + }, +}; + +int sipa_nic_open(enum sipa_term_type src, int netid, + sipa_notify_cb cb, void *priv) +{ + struct sipa_nic *nic = NULL; + struct sk_buff *skb; + enum sipa_nic_id nic_id = SIPA_NIC_MAX; + struct sipa_core *ctrl = sipa_get_ctrl_pointer(); + + if (!ctrl) { + return -EINVAL; + } + + if(!ctrl->remote_ready) + return -EINVAL; + + nic_id = netid; + dev_info(ctrl->dev, "open nic_id = %d\n", nic_id); + if (nic_id == SIPA_NIC_MAX) + return -EINVAL; + + if (ctrl->nic[nic_id]) { + nic = ctrl->nic[nic_id]; + if (atomic_read(&nic->status) == NIC_OPEN) + return -EBUSY; + while ((skb = skb_dequeue(&nic->rx_skb_q)) != NULL) + dev_kfree_skb_any(skb); + } else { + nic = kzalloc(sizeof(*nic), GFP_KERNEL); + if (!nic) + return -ENOMEM; + ctrl->nic[nic_id] = nic; + skb_queue_head_init(&nic->rx_skb_q); + } + + atomic_set(&nic->status, NIC_OPEN); + nic->nic_id = nic_id; + nic->send_ep = &ctrl->ep; + nic->need_notify = 0; + nic->src_mask = s_spia_nic_statics[nic_id].src_mask; + nic->netid = netid; + nic->cb = cb; + nic->cb_priv = priv; + nic->continue_notify = true; + + /* every receiver may receive cp packets */ + //sipa_receiver_add_nic(ctrl->receiver, nic); + sipa_skb_sender_add_nic(ctrl->sender, nic); + + return nic_id; +} +EXPORT_SYMBOL(sipa_nic_open); + +void sipa_nic_close(enum sipa_nic_id nic_id) +{ + struct sipa_nic *nic = NULL; + struct sk_buff *skb; + struct sipa_core *ctrl = sipa_get_ctrl_pointer(); + + if (!ctrl) { + dev_err(ctrl->dev, "sipa driver may not register\n"); + return; + } + + if (nic_id == SIPA_NIC_MAX || !ctrl->nic[nic_id]) + return; + + nic = ctrl->nic[nic_id]; + nic->continue_notify = false; + atomic_set(&nic->status, NIC_CLOSE); + /* free all pending skbs */ + while ((skb = skb_dequeue(&nic->rx_skb_q)) != NULL) + dev_kfree_skb_any(skb); + + sipa_skb_sender_remove_nic(ctrl->sender, nic); + dev_info(ctrl->dev, "close nic_id = %d\n", nic_id); +} +EXPORT_SYMBOL(sipa_nic_close); + +void sipa_nic_notify_evt(struct sipa_nic *nic, enum sipa_evt_type evt) +{ + struct sipa_core *ipa = sipa_get_ctrl_pointer(); + if (!ipa->remote_ready) { + return; + } + + if (nic->cb) + nic->cb(nic->cb_priv, evt, 0); +} +EXPORT_SYMBOL(sipa_nic_notify_evt); + +void sipa_nic_check_flow_ctrl(void) +{ + int i; + struct sipa_nic *nic; + struct sipa_core *ipa = sipa_get_ctrl_pointer(); + + for (i = 0; i < SIPA_NIC_MAX; i++) { + nic = ipa->nic[i]; + if (nic && nic->rm_flow_ctrl) { + nic->rm_flow_ctrl = false; + nic->cb(nic->cb_priv, SIPA_LEAVE_FLOWCTRL, 0); + } + } +} +EXPORT_SYMBOL(sipa_nic_check_flow_ctrl); + +void sipa_nic_try_notify_recv(struct sipa_nic *nic) +{ + if (atomic_read(&nic->status) == NIC_CLOSE) + return; + + if (nic->cb) + nic->cb(nic->cb_priv, SIPA_RECEIVE, 0); +} +EXPORT_SYMBOL(sipa_nic_try_notify_recv); + +void sipa_nic_push_skb(struct sipa_nic *nic, struct sk_buff *skb) +{ + struct sipa_core *ctrl = sipa_get_ctrl_pointer(); + + atomic_inc(&ctrl->recv_cnt); + skb_queue_tail(&nic->rx_skb_q, skb); + if (nic->rx_skb_q.qlen == 1 || nic->continue_notify) + nic->need_notify = 1; +} +EXPORT_SYMBOL(sipa_nic_push_skb); + +int sipa_nic_tx(enum sipa_nic_id nic_id, enum sipa_term_type dst, + int netid, struct sk_buff *skb) +{ + int ret; + struct sipa_core *ctrl = sipa_get_ctrl_pointer(); + + if (!ctrl || !ctrl->sender) { + dev_err(ctrl->dev, "sipa driver may not register\n"); + return -EINVAL; + } + + if (!ctrl->remote_ready) { + ctrl->nic[nic_id]->rm_flow_ctrl = true; +// dev_err(ctrl->dev, "remote ipa not ready\n"); + return -EINPROGRESS; + } + + ret = sipa_skb_sender_send_data(ctrl->sender, skb, dst, netid); +// if (ret == -EAGAIN) +// ctrl->nic[nic_id]->flow_ctrl_status = true; + + return ret; +} +EXPORT_SYMBOL(sipa_nic_tx); + +int sipa_nic_rx(int *netid, struct sk_buff **out_skb, int index) +{ + struct sk_buff *skb; + + skb = sipa_recv_skb(netid, index); + *out_skb = skb; + + return (skb) ? 0 : -ENODATA; +} +EXPORT_SYMBOL(sipa_nic_rx); + +int sipa_nic_rx_has_data(enum sipa_nic_id nic_id) +{ + struct sipa_nic *nic; + struct sipa_core *ctrl = sipa_get_ctrl_pointer(); + + if (!ctrl) { + pr_err("sipa driver may not register\n"); + return -EINVAL; + } + if (!ctrl->nic[nic_id] || + atomic_read(&ctrl->nic[nic_id]->status) == NIC_CLOSE) + return 0; + + nic = ctrl->nic[nic_id]; + + return (!!nic->rx_skb_q.qlen); +} +EXPORT_SYMBOL(sipa_nic_rx_has_data); + +int sipa_nic_trigger_flow_ctrl_work(enum sipa_nic_id nic_id, int err) +{ + struct sipa_core *ctrl = sipa_get_ctrl_pointer(); + + if (!ctrl) { + pr_err("sipa driver may not register\n"); + return -EINVAL; + } + + if (!ctrl->sender) + return -ENODEV; + + switch (err) { + case -EAGAIN: + //ctrl->sender->free_notify_net = true; + schedule_work(&ctrl->flow_ctrl_work); + break; + default: + dev_warn(ctrl->dev, + "don't have this flow ctrl err type\n"); + break; + } + + return 0; +} +EXPORT_SYMBOL(sipa_nic_trigger_flow_ctrl_work); + +u32 sipa_nic_get_filled_num(void) +{ + struct sipa_core *ctrl = sipa_get_ctrl_pointer(); + enum sipa_cmn_fifo_index id = ctrl->receiver->ep->recv_fifo->fifo_id; + + if (!ctrl->remote_ready) { + dev_err(ctrl->dev, "remote sipa not ready %d\n", + ctrl->remote_ready); + return 0; + } + + return ctrl->hal_ops.recv_node_from_tx_fifo(ctrl->dev, id, + ctrl->cmn_fifo_cfg, -1); +} +EXPORT_SYMBOL(sipa_nic_get_filled_num); + +void sipa_nic_restore_irq(void) +{ + struct sipa_core *ctrl = sipa_get_ctrl_pointer(); + enum sipa_cmn_fifo_index id = ctrl->receiver->ep->recv_fifo->fifo_id; + + if (!ctrl->remote_ready) { + dev_err(ctrl->dev, "remote sipa not ready %d\n", + ctrl->remote_ready); + return; + } + + ctrl->hal_ops.clr_tout_th_intr(id, ctrl->cmn_fifo_cfg); + ctrl->hal_ops.set_intr_eb(id, ctrl->cmn_fifo_cfg, true, + SIPA_FIFO_THRESHOLD_IRQ_EN | + SIPA_FIFO_DELAY_TIMER_IRQ_EN); +} +EXPORT_SYMBOL(sipa_nic_restore_irq); + +void sipa_nic_set_tx_fifo_rp(u32 rptr) +{ + struct sipa_core *ctrl = sipa_get_ctrl_pointer(); + enum sipa_cmn_fifo_index id = ctrl->receiver->ep->recv_fifo->fifo_id; + + if (!ctrl->remote_ready) { + dev_err(ctrl->dev, "remote sipa not ready %d\n", + ctrl->remote_ready); + return; + } + + ctrl->hal_ops.set_tx_fifo_rp(id, ctrl->cmn_fifo_cfg, rptr); +} +EXPORT_SYMBOL(sipa_nic_set_tx_fifo_rp); diff --git a/package/wwan/driver/quectel_SRPD_PCIE/src/sipa/sipa_phy_v0/Makefile b/package/wwan/driver/quectel_SRPD_PCIE/src/sipa/sipa_phy_v0/Makefile new file mode 100644 index 000000000..dd44ed63a --- /dev/null +++ b/package/wwan/driver/quectel_SRPD_PCIE/src/sipa/sipa_phy_v0/Makefile @@ -0,0 +1 @@ +obj-y += sipa_common_fifo_hal.o sipa_fifo_irq_hal.o diff --git a/package/wwan/driver/quectel_SRPD_PCIE/src/sipa/sipa_phy_v0/sipa_common_fifo_hal.c b/package/wwan/driver/quectel_SRPD_PCIE/src/sipa/sipa_phy_v0/sipa_common_fifo_hal.c new file mode 100644 index 000000000..512196825 --- /dev/null +++ b/package/wwan/driver/quectel_SRPD_PCIE/src/sipa/sipa_phy_v0/sipa_common_fifo_hal.c @@ -0,0 +1,1234 @@ +#include +#include + +#include "../../include/sipa.h" +#include "../sipa_core.h" +#include "sipa_fifo_phy.h" + +#define PTR_MASK(depth) (depth | (depth - 1)) + +static inline int +ipa_put_pkt_to_cache_rx_fifo(struct device *dev, + struct sipa_cmn_fifo_cfg_tag *fifo_cfg, + struct sipa_node_description_tag *desc, + u32 num) +{ + dma_addr_t dma_addr; + u32 tmp = 0, tmp1 = 0, ret = 0, index = 0, left_cnt = 0; + ssize_t node_size = sizeof(struct sipa_node_description_tag); + struct sipa_node_description_tag *node = + (struct sipa_node_description_tag *) + fifo_cfg->rx_fifo.virtual_addr; + + dma_addr = fifo_cfg->rx_fifo.fifo_base_addr_l; + + left_cnt = fifo_cfg->rx_fifo.depth - + ipa_phy_get_rx_fifo_filled_depth(fifo_cfg->fifo_reg_base); + + if (!left_cnt) + return -ENOSPC; + + if (left_cnt < num) + num = left_cnt; + + index = fifo_cfg->rx_fifo.wr & (fifo_cfg->rx_fifo.depth - 1); + if (index + num <= fifo_cfg->rx_fifo.depth) { + memcpy(node + index, desc, node_size * num); + } else { + tmp = fifo_cfg->rx_fifo.depth - index; + memcpy(node + index, desc, tmp * node_size); + tmp1 = num - tmp; + memcpy(node, desc + tmp, tmp1 * node_size); + } + + fifo_cfg->rx_fifo.wr = (fifo_cfg->rx_fifo.wr + num) & + PTR_MASK(fifo_cfg->rx_fifo.depth); + smp_wmb(); + ret = ipa_phy_update_rx_fifo_wptr(fifo_cfg->fifo_reg_base, + fifo_cfg->rx_fifo.wr); + + if (!ret) + pr_err("sipa_phy_update_rx_fifo_rptr fail\n"); + + return num; +} + +static inline u32 +ipa_recv_pkts_from_tx_fifo(struct device *dev, + struct sipa_cmn_fifo_cfg_tag *fifo_cfg, + u32 num) +{ + return ipa_phy_get_tx_fifo_filled_depth(fifo_cfg->fifo_reg_base); +} + +static int ipa_common_fifo_hal_open(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base, + void *cookie) +{ + struct sipa_cmn_fifo_cfg_tag *ipa_term_fifo = NULL; + + if (likely(id < SIPA_FIFO_MAX)) { + ipa_term_fifo = cfg_base + id; + } else { + pr_err("sipa don't have this id %d\n", id); + return -EINVAL; + } + + if (ipa_term_fifo->state) { + pr_err("sipa fifo_id = %d has already opened state = %d\n", + ipa_term_fifo->fifo_id, ipa_term_fifo->state); + return -EBUSY; + } + + ipa_phy_set_rx_fifo_total_depth(ipa_term_fifo->fifo_reg_base, + ipa_term_fifo->rx_fifo.depth); + ipa_phy_set_rx_fifo_addr(ipa_term_fifo->fifo_reg_base, + ipa_term_fifo->rx_fifo.fifo_base_addr_l, + ipa_term_fifo->rx_fifo.fifo_base_addr_h); + + ipa_phy_set_tx_fifo_total_depth(ipa_term_fifo->fifo_reg_base, + ipa_term_fifo->tx_fifo.depth); + ipa_phy_set_tx_fifo_addr(ipa_term_fifo->fifo_reg_base, + ipa_term_fifo->tx_fifo.fifo_base_addr_l, + ipa_term_fifo->tx_fifo.fifo_base_addr_h); + + ipa_phy_set_cur_term_num(ipa_term_fifo->fifo_reg_base, + ipa_term_fifo->cur); + ipa_phy_set_dst_term_num(ipa_term_fifo->fifo_reg_base, + ipa_term_fifo->dst); + + ipa_phy_update_rx_fifo_rptr(ipa_term_fifo->fifo_reg_base, 0); + ipa_phy_update_rx_fifo_wptr(ipa_term_fifo->fifo_reg_base, 0); + ipa_phy_update_tx_fifo_rptr(ipa_term_fifo->fifo_reg_base, 0); + ipa_phy_update_tx_fifo_wptr(ipa_term_fifo->fifo_reg_base, 0); + ipa_phy_clear_stop_receive(ipa_term_fifo->fifo_reg_base); + + ipa_term_fifo->rx_fifo.rd = 0; + ipa_term_fifo->rx_fifo.wr = 0; + ipa_term_fifo->tx_fifo.rd = 0; + ipa_term_fifo->tx_fifo.wr = 0; + + ipa_term_fifo->fifo_name = cookie; + ipa_term_fifo->state = true; + + return 0; +} + +static int ipa_common_fifo_hal_close(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base) +{ + struct sipa_cmn_fifo_cfg_tag *ipa_term_fifo; + + if (likely(id < SIPA_FIFO_MAX)) { + ipa_term_fifo = cfg_base + id; + } else { + pr_err("sipa don't have this id %d\n", id); + return -EINVAL; + } + + ipa_phy_set_rx_fifo_total_depth(ipa_term_fifo->fifo_reg_base, 0); + ipa_phy_set_tx_fifo_total_depth(ipa_term_fifo->fifo_reg_base, 0); + + ipa_phy_update_rx_fifo_rptr(ipa_term_fifo->fifo_reg_base, 0); + ipa_phy_update_rx_fifo_wptr(ipa_term_fifo->fifo_reg_base, 0); + ipa_phy_update_tx_fifo_rptr(ipa_term_fifo->fifo_reg_base, 0); + ipa_phy_update_tx_fifo_wptr(ipa_term_fifo->fifo_reg_base, 0); + + ipa_term_fifo->rx_fifo.rd = 0; + ipa_term_fifo->rx_fifo.wr = 0; + ipa_term_fifo->tx_fifo.rd = 0; + ipa_term_fifo->tx_fifo.wr = 0; + + ipa_term_fifo->state = 0; + + return 0; +} + +static int +ipa_common_fifo_hal_set_rx_depth(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base, + u32 depth) +{ + struct sipa_cmn_fifo_cfg_tag *ipa_term_fifo; + + if (likely(id < SIPA_FIFO_MAX)) { + ipa_term_fifo = cfg_base + id; + } else { + pr_err("sipa don't have this id %d\n", id); + return -EINVAL; + } + + return ipa_phy_set_rx_fifo_total_depth(ipa_term_fifo->fifo_reg_base, + depth); +} + +static u32 +ipa_common_fifo_hal_get_rx_depth(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base) +{ + struct sipa_cmn_fifo_cfg_tag *ipa_term_fifo; + + if (likely(id < SIPA_FIFO_MAX)) { + ipa_term_fifo = cfg_base + id; + } else { + pr_err("sipa don't have this id %d\n", id); + return 0; + } + + return ipa_phy_get_rx_fifo_total_depth(ipa_term_fifo->fifo_reg_base); +} + +static int +ipa_common_fifo_hal_set_tx_depth(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base, + u32 depth) +{ + struct sipa_cmn_fifo_cfg_tag *ipa_term_fifo; + + if (likely(id < SIPA_FIFO_MAX)) { + ipa_term_fifo = cfg_base + id; + } else { + pr_err("sipa don't have this id %d\n", id); + return 0; + } + + return ipa_phy_set_tx_fifo_total_depth(ipa_term_fifo->fifo_reg_base, + depth); +} + +static u32 +ipa_common_fifo_hal_get_tx_depth(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base) +{ + struct sipa_cmn_fifo_cfg_tag *ipa_term_fifo; + + if (likely(id < SIPA_FIFO_MAX)) { + ipa_term_fifo = cfg_base + id; + } else { + pr_err("sipa don't have this id %d\n", id); + return 0; + } + + return ipa_phy_get_tx_fifo_total_depth(ipa_term_fifo->fifo_reg_base); +} + +static int +ipa_common_fifo_hal_set_intr_drop_packet(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base, + u32 enable, sipa_irq_notify_cb cb) +{ + struct sipa_cmn_fifo_cfg_tag *ipa_term_fifo; + + if (likely(id < SIPA_FIFO_MAX)) { + ipa_term_fifo = cfg_base + id; + } else { + pr_err("sipa don't have this id %d\n", id); + return -EINVAL; + } + + if (enable) + return ipa_phy_enable_int_bit(ipa_term_fifo->fifo_reg_base, + IPA_DROP_PACKET_OCCUR_INT_EN); + else + return ipa_phy_disable_int_bit(ipa_term_fifo->fifo_reg_base, + IPA_DROP_PACKET_OCCUR_INT_EN); +} + +static int +ipa_common_fifo_hal_set_intr_error_code(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base, + u32 enable, sipa_irq_notify_cb cb) +{ + struct sipa_cmn_fifo_cfg_tag *ipa_term_fifo; + + if (likely(id < SIPA_FIFO_MAX)) { + ipa_term_fifo = cfg_base + id; + } else { + pr_err("sipa don't have this id %d\n", id); + return -EINVAL; + } + + if (enable) + return ipa_phy_enable_int_bit(ipa_term_fifo->fifo_reg_base, + IPA_ERRORCODE_IN_TX_FIFO_EN); + else + return ipa_phy_disable_int_bit(ipa_term_fifo->fifo_reg_base, + IPA_ERRORCODE_IN_TX_FIFO_EN); +} + +static int +ipa_common_fifo_hal_set_intr_timeout(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base, + u32 enable, u32 time, + sipa_irq_notify_cb cb) +{ + int ret; + struct sipa_cmn_fifo_cfg_tag *ipa_term_fifo; + + if (likely(id < SIPA_FIFO_MAX)) { + ipa_term_fifo = cfg_base + id; + } else { + pr_err("sipa don't have this id %d\n", id); + return -EINVAL; + } + + if (enable) { + ret = ipa_phy_set_tx_fifo_interrupt_delay_timer( + ipa_term_fifo->fifo_reg_base, + time); + if (ret) + ret = ipa_phy_enable_int_bit( + ipa_term_fifo->fifo_reg_base, + IPA_TXFIFO_INT_DELAY_TIMER_SW_EN); + else + pr_err("fifo(%d) set timeout threshold fail\n", id); + } else { + ret = ipa_phy_disable_int_bit(ipa_term_fifo->fifo_reg_base, + IPA_TXFIFO_INT_DELAY_TIMER_SW_EN); + } + + return ret; +} + +static int +ipa_common_fifo_hal_set_hw_intr_timeout(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base, + u32 enable, u32 time, + sipa_irq_notify_cb cb) +{ + int ret; + struct sipa_cmn_fifo_cfg_tag *ipa_term_fifo; + + if (likely(id < SIPA_FIFO_MAX)) { + ipa_term_fifo = cfg_base + id; + } else { + pr_err("don't have this id %d\n", id); + return -EINVAL; + } + + if (enable) { + ret = ipa_phy_set_tx_fifo_interrupt_delay_timer( + ipa_term_fifo->fifo_reg_base, time); + if (!ret) + ret = ipa_phy_enable_int_bit( + ipa_term_fifo->fifo_reg_base, + IPA_TX_FIFO_DELAY_TIMER_EN); + else + pr_err("fifo(%d) set timeout threshold fail\n", id); + } else { + ret = ipa_phy_disable_int_bit(ipa_term_fifo->fifo_reg_base, + IPA_TX_FIFO_DELAY_TIMER_EN); + } + + return ret; +} + +static int +ipa_common_fifo_hal_set_intr_threshold(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base, + u32 enable, u32 cnt, + sipa_irq_notify_cb cb) +{ + int ret; + struct sipa_cmn_fifo_cfg_tag *ipa_term_fifo; + + if (likely(id < SIPA_FIFO_MAX)) { + ipa_term_fifo = cfg_base + id; + } else { + pr_err("don't have this id %d\n", id); + return -EINVAL; + } + + if (enable) { + ret = ipa_phy_set_tx_fifo_interrupt_threshold( + ipa_term_fifo->fifo_reg_base, cnt); + if (!ret) { + ret = ipa_phy_enable_int_bit( + ipa_term_fifo->fifo_reg_base, + IPA_TXFIFO_INT_THRESHOLD_ONESHOT_EN); + } else { + pr_err("fifo(%d) set threshold fail\n", id); + } + } else { + ret = + ipa_phy_disable_int_bit(ipa_term_fifo->fifo_reg_base, + IPA_TXFIFO_INT_THRESHOLD_ONESHOT_EN); + } + + return ret; +} + +static int +ipa_common_fifo_hal_set_hw_intr_thres(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base, + u32 enable, u32 cnt, + sipa_irq_notify_cb cb) +{ + int ret; + struct sipa_cmn_fifo_cfg_tag *ipa_term_fifo; + + if (likely(id < SIPA_FIFO_MAX)) { + ipa_term_fifo = cfg_base + id; + } else { + pr_err("don't have this id %d\n", id); + return -EINVAL; + } + + if (enable) { + ret = ipa_phy_set_tx_fifo_interrupt_threshold( + ipa_term_fifo->fifo_reg_base, cnt); + if (!ret) + ret = ipa_phy_enable_int_bit( + ipa_term_fifo->fifo_reg_base, + IPA_TX_FIFO_THRESHOLD_EN); + else + pr_err("fifo(%d) set threshold fail\n", id); + } else { + ret = ipa_phy_disable_int_bit(ipa_term_fifo->fifo_reg_base, + IPA_TX_FIFO_THRESHOLD_EN); + } + + return ret; +} + +static int +ipa_common_fifo_hal_set_src_dst_term(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base, + u32 src, u32 dst) +{ + int ret; + struct sipa_cmn_fifo_cfg_tag *ipa_term_fifo; + + if (likely(id < SIPA_FIFO_MAX)) { + ipa_term_fifo = cfg_base + id; + } else { + pr_err("don't have this id %d\n", id); + return -EINVAL; + } + + ret = ipa_phy_set_cur_term_num(ipa_term_fifo->fifo_reg_base, src); + if (ret) { + pr_err("fifo %d set cur failed\n", id); + return ret; + } + + ret = ipa_phy_set_dst_term_num(ipa_term_fifo->fifo_reg_base, dst); + if (ret) { + pr_err("fifo %d set dst failed\n", id); + return ret; + } + + return 0; +} + +static int +ipa_common_fifo_hal_enable_local_flowctrl_intr(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag * + cfg_base, u32 enable, + u32 irq_mode, + sipa_irq_notify_cb cb) +{ + int ret; + u32 irq; + struct sipa_cmn_fifo_cfg_tag *ipa_term_fifo; + + if (likely(id < SIPA_FIFO_MAX)) { + ipa_term_fifo = cfg_base + id; + } else { + pr_err("don't have this id %d\n", id); + return -EINVAL; + } + + switch (irq_mode) { + case 0: + irq = IPA_RX_FIFO_INT_EXIT_FLOW_CTRL_EN; + break; + case 1: + irq = IPA_RX_FIFO_INT_ENTER_FLOW_CTRL_EN; + break; + case 2: + irq = IPA_RX_FIFO_INT_EXIT_FLOW_CTRL_EN | + IPA_RX_FIFO_INT_ENTER_FLOW_CTRL_EN; + break; + default: + pr_err("don't have this %d irq type\n", irq_mode); + return -EINVAL; + } + + if (enable) { + ret = ipa_phy_enable_int_bit(ipa_term_fifo->fifo_reg_base, irq); + if (ret) { + pr_err("fifo_id = %d irq_mode = %d set failed\n", + id, irq); + return ret; + } + } else { + ret = ipa_phy_disable_int_bit(ipa_term_fifo->fifo_reg_base, + irq); + } + + return ret; +} + +static int +ipa_common_fifo_hal_enable_remote_flowctrl_intr(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag * + cfg_base, u32 work_mode, + u32 tx_entry_watermark, + u32 tx_exit_watermark, + u32 rx_entry_watermark, + u32 rx_exit_watermark) +{ + int ret; + struct sipa_cmn_fifo_cfg_tag *ipa_term_fifo; + + if (likely(id < SIPA_FIFO_MAX)) { + ipa_term_fifo = cfg_base + id; + } else { + pr_err("don't have this id %d\n", id); + return -EINVAL; + } + + ret = ipa_phy_set_tx_fifo_exit_flow_ctrl_watermark( + ipa_term_fifo->fifo_reg_base, tx_exit_watermark); + if (unlikely(!ret)) { + pr_err("fifo_id = %d tx_exit_watermark(0x%x) failed\n", + id, tx_exit_watermark); + return ret; + } + + ret = ipa_phy_set_tx_fifo_entry_flow_ctrl_watermark( + ipa_term_fifo->fifo_reg_base, tx_entry_watermark); + if (unlikely(!ret)) { + pr_err("fifo_id = %d tx_entry_watermark(0x%x) failed\n", + id, tx_entry_watermark); + return ret; + } + + ret = ipa_phy_set_rx_fifo_exit_flow_ctrl_watermark( + ipa_term_fifo->fifo_reg_base, rx_exit_watermark); + if (unlikely(!ret)) { + pr_err("fifo_id = %d rx_exit_watermark(0x%x) failed\n", + id, rx_exit_watermark); + return ret; + } + + ret = ipa_phy_set_rx_fifo_entry_flow_ctrl_watermark( + ipa_term_fifo->fifo_reg_base, rx_entry_watermark); + if (unlikely(!ret)) { + pr_err("fifo_id = %d rx_entry_watermark(0x%x) failed\n", + id, rx_entry_watermark); + return ret; + } + + ret = ipa_phy_set_flow_ctrl_config(ipa_term_fifo->fifo_reg_base, + work_mode); + + return ret; +} + +static int +ipa_common_fifo_hal_set_interrupt_intr(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base, + u32 enable, + sipa_irq_notify_cb cb) +{ + int ret; + struct sipa_cmn_fifo_cfg_tag *ipa_term_fifo; + + if (likely(id < SIPA_FIFO_MAX)) { + ipa_term_fifo = cfg_base + id; + } else { + pr_err("don't have this id %d\n", id); + return -EINVAL; + } + + if (enable) { + ret = ipa_phy_enable_int_bit(ipa_term_fifo->fifo_reg_base, + IPA_TX_FIFO_INTR_SW_BIT_EN); + } else { + ret = ipa_phy_disable_int_bit(ipa_term_fifo->fifo_reg_base, + IPA_TX_FIFO_INTR_SW_BIT_EN); + } + + return ret; +} + +static int +ipa_common_fifo_hal_set_intr_txfifo_overflow(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag * + cfg_base, u32 enable, + sipa_irq_notify_cb cb) +{ + int ret; + struct sipa_cmn_fifo_cfg_tag *ipa_term_fifo; + + if (likely(id < SIPA_FIFO_MAX)) { + ipa_term_fifo = cfg_base + id; + } else { + pr_err("don't have this id %d\n", id); + return -EINVAL; + } + + if (enable) { + ret = ipa_phy_enable_int_bit(ipa_term_fifo->fifo_reg_base, + IPA_TXFIFO_OVERFLOW_EN); + } else { + ret = ipa_phy_disable_int_bit(ipa_term_fifo->fifo_reg_base, + IPA_TXFIFO_OVERFLOW_EN); + } + + return ret; +} + +static int +ipa_common_fifo_hal_set_intr_txfifo_full(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base, + u32 enable, sipa_irq_notify_cb cb) +{ + int ret; + struct sipa_cmn_fifo_cfg_tag *ipa_term_fifo; + + if (likely(id < SIPA_FIFO_MAX)) { + ipa_term_fifo = cfg_base + id; + } else { + pr_err("don't have this id %d\n", id); + return -EINVAL; + } + + if (enable) { + ret = ipa_phy_enable_int_bit(ipa_term_fifo->fifo_reg_base, + IPA_TXFIFO_FULL_INT_EN); + } else { + ret = ipa_phy_disable_int_bit(ipa_term_fifo->fifo_reg_base, + IPA_TXFIFO_FULL_INT_EN); + } + + return ret; +} + +/** + * Description: Receive Node from tx fifo. + * Input: + * @id: The FIFO id that need to be operated. + * @pkt: The node that need to be stored address. + * @num: The num of receive. + * OUTPUT: + * @The num that has be received from tx fifo successful. + * Note: + */ +static int +ipa_common_fifo_hal_put_node_to_rx_fifo(struct device *dev, + enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base, + struct sipa_node_description_tag *node, + u32 force_intr, u32 num) +{ + struct sipa_cmn_fifo_cfg_tag *ipa_term_fifo; + + if (likely(id < SIPA_FIFO_MAX)) { + ipa_term_fifo = cfg_base + id; + } else { + pr_err("don't have this id %d\n", id); + return -EINVAL; + } + + return ipa_put_pkt_to_cache_rx_fifo(dev, ipa_term_fifo, + node, num); +} + +static u32 +ipa_common_fifo_hal_get_left_cnt(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base) +{ + u32 left_cnt; + struct sipa_cmn_fifo_cfg_tag *ipa_term_fifo = NULL; + + if (likely(id < SIPA_FIFO_MAX)) { + ipa_term_fifo = cfg_base + id; + } else { + pr_err("don't have this id %d\n", id); + return 0; + } + + left_cnt = + ipa_phy_get_tx_fifo_total_depth(ipa_term_fifo->fifo_reg_base) - + ipa_phy_get_tx_fifo_filled_depth(ipa_term_fifo->fifo_reg_base); + + return left_cnt; +} + +/* + * Description: Send Node to rx fifo. + * Input: + * id: The FIFO id that need to be operated. + * pkt: The node address that need send to rx fifo. + * num: The number of need to send. + * OUTPUT: + * The number that has get from tx fifo successful. + * Note: + */ +static u32 +ipa_common_fifo_hal_recv_node_from_tx_fifo(struct device *dev, + enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag * + cfg_base, u32 num) +{ + struct sipa_cmn_fifo_cfg_tag *ipa_term_fifo; + + if (likely(id < SIPA_FIFO_MAX)) { + ipa_term_fifo = cfg_base + id; + } else { + pr_err("don't have this id %d\n", id); + return 0; + } + + return ipa_recv_pkts_from_tx_fifo(dev, ipa_term_fifo, num); +} + +static void +ipa_common_fifo_hal_get_rx_ptr(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base, + u32 *wr, u32 *rd) +{ + struct sipa_cmn_fifo_cfg_tag *ipa_term_fifo; + + if (likely(id < SIPA_FIFO_MAX)) { + ipa_term_fifo = cfg_base + id; + } else { + pr_err("don't have this id %d\n", id); + return; + } + + if (rd) + *rd = ipa_phy_get_rx_fifo_rptr(ipa_term_fifo->fifo_reg_base); + if (wr) + *wr = ipa_phy_get_rx_fifo_wptr(ipa_term_fifo->fifo_reg_base); +} + +static void +ipa_common_fifo_hal_get_tx_ptr(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base, + u32 *wr, u32 *rd) +{ + struct sipa_cmn_fifo_cfg_tag *ipa_term_fifo; + + if (likely(id < SIPA_FIFO_MAX)) { + ipa_term_fifo = cfg_base + id; + } else { + pr_err("don't have this id %d\n", id); + return; + } + + if (rd) + *rd = ipa_phy_get_tx_fifo_rptr(ipa_term_fifo->fifo_reg_base); + if (wr) + *wr = ipa_phy_get_tx_fifo_wptr(ipa_term_fifo->fifo_reg_base); +} + +static void +ipa_common_fifo_hal_get_filled_depth(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base, + u32 *rx_filled, u32 *tx_filled) +{ + struct sipa_cmn_fifo_cfg_tag *ipa_term_fifo; + void __iomem *reg_base; + + if (likely(id < SIPA_FIFO_MAX)) { + ipa_term_fifo = cfg_base + id; + } else { + pr_err("don't have this id %d\n", id); + return; + } + + reg_base = ipa_term_fifo->fifo_reg_base; + if (tx_filled) + *tx_filled = ipa_phy_get_tx_fifo_filled_depth(reg_base); + if (rx_filled) + *rx_filled = ipa_phy_get_rx_fifo_filled_depth(reg_base); +} + +static u32 +ipa_common_fifo_hal_get_tx_full_status(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base) +{ + struct sipa_cmn_fifo_cfg_tag *ipa_term_fifo; + + if (likely(id < SIPA_FIFO_MAX)) { + ipa_term_fifo = cfg_base + id; + } else { + pr_err("don't have this id %d\n", id); + return 0; + } + + return ipa_phy_get_tx_fifo_full_status(ipa_term_fifo->fifo_reg_base); +} + +static u32 +ipa_common_fifo_hal_get_tx_empty_status(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base) +{ + struct sipa_cmn_fifo_cfg_tag *ipa_term_fifo; + + if (likely(id < SIPA_FIFO_MAX)) { + ipa_term_fifo = cfg_base + id; + } else { + pr_err("don't have this id %d\n", id); + return 0; + } + + return ipa_phy_get_tx_fifo_empty_status(ipa_term_fifo->fifo_reg_base); +} + +static u32 +ipa_common_fifo_hal_get_rx_full_status(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base) +{ + struct sipa_cmn_fifo_cfg_tag *ipa_term_fifo; + + if (likely(id < SIPA_FIFO_MAX)) { + ipa_term_fifo = cfg_base + id; + } else { + pr_err("don't have this id %d\n", id); + return 0; + } + + return ipa_phy_get_rx_fifo_full_status(ipa_term_fifo->fifo_reg_base); +} + +static u32 +ipa_common_fifo_hal_get_rx_empty_status(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base) +{ + struct sipa_cmn_fifo_cfg_tag *ipa_term_fifo; + + if (likely(id < SIPA_FIFO_MAX)) { + ipa_term_fifo = cfg_base + id; + } else { + pr_err("don't have this id %d\n", id); + return 0; + } + + return ipa_phy_get_rx_fifo_empty_status(ipa_term_fifo->fifo_reg_base); +} + +static bool +ipa_common_fifo_hal_set_rx_fifo_wptr(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base, + u32 wptr) +{ + u32 ret; + u32 rx_wptr; + struct sipa_cmn_fifo_cfg_tag *ipa_term_fifo; + + if (likely(id < SIPA_FIFO_MAX)) { + ipa_term_fifo = cfg_base + id; + } else { + pr_err("don't have this id %d\n", id); + return false; + } + + rx_wptr = ipa_phy_get_rx_fifo_wptr(ipa_term_fifo->fifo_reg_base); + + if (wptr != rx_wptr) { + wptr = wptr & PTR_MASK(ipa_term_fifo->rx_fifo.depth); + ipa_term_fifo->rx_fifo.wr = wptr; + ret = ipa_phy_update_rx_fifo_wptr(ipa_term_fifo->fifo_reg_base, + wptr); + if (!ret) { + pr_err("fifo id = %d update rx fifo wptr = 0x%x failed !!!", + id, wptr); + return false; + } + } + + return true; +} + +static bool +ipa_common_fifo_hal_set_tx_fifo_wptr(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base, + u32 wptr) +{ + u32 ret; + u32 tx_wptr; + struct sipa_cmn_fifo_cfg_tag *ipa_term_fifo; + + if (likely(id < SIPA_FIFO_MAX)) { + ipa_term_fifo = cfg_base + id; + } else { + pr_err("don't have this id %d\n", id); + return false; + } + + tx_wptr = ipa_phy_get_tx_fifo_wptr(ipa_term_fifo->fifo_reg_base); + + if (wptr != tx_wptr) { + wptr = wptr & PTR_MASK(ipa_term_fifo->rx_fifo.depth); + ipa_term_fifo->tx_fifo.wr = wptr; + ret = ipa_phy_update_tx_fifo_wptr(ipa_term_fifo->fifo_reg_base, + wptr); + if (!ret) { + pr_err("fifo id = %d update tx fifo wptr = 0x%x failed !!!", + id, wptr); + return false; + } + } + + return true; +} + +static int +ipa_common_fifo_hal_set_rx_tx_fifo_ptr(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base, + u32 rx_rd, u32 rx_wr, + u32 tx_rd, u32 tx_wr) +{ + int ret = 0, ret1 = 0; + u32 rx_rptr = 0, rx_wptr = 0; + u32 tx_rptr = 0, tx_wptr = 0; + struct sipa_cmn_fifo_cfg_tag *ipa_term_fifo; + + if (likely(id < SIPA_FIFO_MAX)) { + ipa_term_fifo = cfg_base + id; + } else { + pr_err("don't have this id %d\n", id); + return -EINVAL; + } + + tx_wptr = ipa_phy_get_tx_fifo_wptr(ipa_term_fifo->fifo_reg_base); + tx_rptr = ipa_phy_get_tx_fifo_rptr(ipa_term_fifo->fifo_reg_base); + rx_wptr = ipa_phy_get_rx_fifo_wptr(ipa_term_fifo->fifo_reg_base); + rx_rptr = ipa_phy_get_rx_fifo_rptr(ipa_term_fifo->fifo_reg_base); + + if (rx_rd != rx_rptr) { + rx_rd = rx_rd & PTR_MASK(ipa_term_fifo->rx_fifo.depth); + ipa_term_fifo->rx_fifo.rd = rx_rd; + ret = ipa_phy_update_rx_fifo_rptr(ipa_term_fifo->fifo_reg_base, + rx_rd); + if (!ret) { + ret1 = -EIO; + pr_err("update rx fifo rptr = 0x%x failed !!!", rx_rd); + } + } + + if (rx_wr != rx_wptr) { + rx_wr = rx_wr & PTR_MASK(ipa_term_fifo->rx_fifo.depth); + ipa_term_fifo->rx_fifo.wr = rx_wr; + ret = ipa_phy_update_rx_fifo_wptr(ipa_term_fifo->fifo_reg_base, + rx_wr); + if (!ret) { + ret1 = -EIO; + pr_err("update rx fifo wptr = 0x%x failed !!!", rx_wr); + } + } + + if (tx_rd != tx_rptr) { + tx_rd = tx_rd & PTR_MASK(ipa_term_fifo->rx_fifo.depth); + ipa_term_fifo->tx_fifo.rd = tx_rd; + ret = ipa_phy_update_tx_fifo_rptr(ipa_term_fifo->fifo_reg_base, + tx_rd); + if (!ret) { + ret1 = -EIO; + pr_err("update tx fifo rptr = 0x%x failed !!!", tx_rd); + } + } + + if (tx_wr != tx_wptr) { + tx_wr = tx_wr & PTR_MASK(ipa_term_fifo->rx_fifo.depth); + ipa_term_fifo->tx_fifo.wr = tx_wr; + ret = ipa_phy_update_tx_fifo_wptr(ipa_term_fifo->fifo_reg_base, + tx_wr); + if (!ret) { + ret1 = -EIO; + pr_err("update tx fifo wptr = 0x%x failed !!!", tx_wr); + } + } + + return ret1; +} + +static int +ipa_common_fifo_hal_ctrl_receive(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base, + bool stop) +{ + struct sipa_cmn_fifo_cfg_tag *ipa_term_fifo; + + if (likely(id < SIPA_FIFO_MAX)) { + ipa_term_fifo = cfg_base + id; + } else { + pr_err("don't have this id %d\n", id); + return -EINVAL; + } + + if (stop) + return ipa_phy_stop_receive(ipa_term_fifo->fifo_reg_base); + else + return ipa_phy_clear_stop_receive(ipa_term_fifo->fifo_reg_base); +} + +static struct sipa_node_description_tag * +ipa_common_fifo_hal_get_tx_fifo_rp(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base, + u32 index) +{ + u32 tmp; + struct sipa_cmn_fifo_cfg_tag *fifo_cfg; + struct sipa_node_description_tag *node; + + if (unlikely(id >= SIPA_FIFO_MAX)) + return NULL; + + fifo_cfg = cfg_base + id; + node = (struct sipa_node_description_tag *) + fifo_cfg->tx_fifo.virtual_addr; + + if (unlikely(!node)) + return NULL; + + tmp = (fifo_cfg->tx_fifo.rd + index) & (fifo_cfg->tx_fifo.depth - 1); + + return node + tmp; +} + +static struct sipa_node_description_tag * +ipa_common_fifo_hal_get_rx_fifo_wr(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base, + u32 index) +{ + u32 tmp; + struct sipa_cmn_fifo_cfg_tag *fifo_cfg; + struct sipa_node_description_tag *node; + + if (unlikely(id >= SIPA_FIFO_MAX)) + return NULL; + + fifo_cfg = cfg_base + id; + node = (struct sipa_node_description_tag *) + fifo_cfg->rx_fifo.virtual_addr; + + if (unlikely(!node)) + return NULL; + + if (index >= fifo_cfg->rx_fifo.depth) + return NULL; + + tmp = (fifo_cfg->rx_fifo.wr + index) & (fifo_cfg->rx_fifo.depth - 1); + + return node + tmp; +} + +static int ipa_common_fifo_hal_set_tx_fifo_rp(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag * + cfg_base, u32 tx_rd) +{ + int ret; + struct sipa_cmn_fifo_cfg_tag *fifo_cfg; + + if (unlikely(id >= SIPA_FIFO_MAX)) + return -EINVAL; + + fifo_cfg = cfg_base + id; + fifo_cfg->tx_fifo.rd = (fifo_cfg->tx_fifo.rd + tx_rd) & + PTR_MASK(fifo_cfg->tx_fifo.depth); + ret = ipa_phy_update_tx_fifo_rptr(fifo_cfg->fifo_reg_base, + fifo_cfg->tx_fifo.rd); + + if (ret) { + pr_err("update tx fifo rptr fail !!!\n"); + return -EINVAL; + } + + return 0; +} + +static int ipa_common_fifo_hal_set_rx_fifo_wr(struct device *dev, + enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag * + cfg_base, u32 num) +{ + int ret; + dma_addr_t dma_addr; + struct sipa_cmn_fifo_cfg_tag *fifo_cfg; + u32 tmp = 0, tmp1 = 0, index = 0, left_cnt = 0; + ssize_t node_size = sizeof(struct sipa_node_description_tag); + + if (unlikely(id >= SIPA_FIFO_MAX)) + return -EINVAL; + + fifo_cfg = cfg_base + id; + dma_addr = fifo_cfg->rx_fifo.fifo_base_addr_l; + + left_cnt = fifo_cfg->rx_fifo.depth - + ipa_phy_get_rx_fifo_filled_depth(fifo_cfg->fifo_reg_base); + + if (!left_cnt) + return -ENOSPC; + + if (left_cnt < num) + num = left_cnt; + + index = fifo_cfg->rx_fifo.wr & (fifo_cfg->rx_fifo.depth - 1); + if (index + num <= fifo_cfg->rx_fifo.depth) { + dma_sync_single_for_device(dev, dma_addr + index * node_size, + node_size * num, DMA_TO_DEVICE); + } else { + tmp = fifo_cfg->rx_fifo.depth - index; + dma_sync_single_for_device(dev, dma_addr + index * node_size, + node_size * tmp, DMA_TO_DEVICE); + tmp1 = num - tmp; + dma_sync_single_for_device(dev, dma_addr, + node_size * tmp1, DMA_TO_DEVICE); + } + + fifo_cfg->rx_fifo.wr = (fifo_cfg->rx_fifo.wr + num) & + PTR_MASK(fifo_cfg->rx_fifo.depth); + ret = ipa_phy_update_rx_fifo_wptr(fifo_cfg->fifo_reg_base, + fifo_cfg->rx_fifo.wr); + + if (ret) { + pr_err("update rx fifo rptr fail !!!\n"); + return -EIO; + } + + return 0; +} + +static int ipa_common_fifo_set_intr_eb(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *cfg_base, + bool eb, u32 type) +{ + struct sipa_cmn_fifo_cfg_tag *fifo_cfg; + + if (unlikely(id >= SIPA_FIFO_MAX)) + return -EINVAL; + + fifo_cfg = cfg_base + id; + + if (eb) + return ipa_phy_enable_int_bit(fifo_cfg->fifo_reg_base, type); + else + return ipa_phy_disable_int_bit(fifo_cfg->fifo_reg_base, type); +} + +static void ipa_common_fifo_clr_tout_th_intr(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag * + cfg_base) +{ + struct sipa_cmn_fifo_cfg_tag *fifo_cfg; + + if (unlikely(id >= SIPA_FIFO_MAX)) + return; + + fifo_cfg = cfg_base + id; + + ipa_phy_clear_int(fifo_cfg->fifo_reg_base, + IPA_TX_FIFO_TIMER_CLR_BIT | + IPA_TX_FIFO_THRESHOLD_CLR_BIT); +} + +void sipa_fifo_ops_init(struct sipa_fifo_hal_ops *ops) +{ + ops->open = ipa_common_fifo_hal_open; + + ops->close = ipa_common_fifo_hal_close; + + ops->enable_remote_flowctrl_intr = + ipa_common_fifo_hal_enable_remote_flowctrl_intr; + + ops->enable_local_flowctrl_intr = + ipa_common_fifo_hal_enable_local_flowctrl_intr; + + ops->get_left_cnt = ipa_common_fifo_hal_get_left_cnt; + + ops->put_node_to_rx_fifo = + ipa_common_fifo_hal_put_node_to_rx_fifo; + + ops->recv_node_from_tx_fifo = + ipa_common_fifo_hal_recv_node_from_tx_fifo; + + ops->set_intr_drop_packet = + ipa_common_fifo_hal_set_intr_drop_packet; + + ops->set_intr_error_code = + ipa_common_fifo_hal_set_intr_error_code; + + ops->set_intr_threshold = + ipa_common_fifo_hal_set_intr_threshold; + + ops->set_intr_timeout = + ipa_common_fifo_hal_set_intr_timeout; + + ops->set_hw_intr_thres = + ipa_common_fifo_hal_set_hw_intr_thres; + + ops->set_hw_intr_timeout = + ipa_common_fifo_hal_set_hw_intr_timeout; + + ops->set_interrupt_intr = + ipa_common_fifo_hal_set_interrupt_intr; + + ops->set_intr_txfifo_full = + ipa_common_fifo_hal_set_intr_txfifo_full; + + ops->set_intr_txfifo_overflow = + ipa_common_fifo_hal_set_intr_txfifo_overflow; + + ops->set_rx_depth = + ipa_common_fifo_hal_set_rx_depth; + + ops->set_tx_depth = + ipa_common_fifo_hal_set_tx_depth; + + ops->get_rx_depth = + ipa_common_fifo_hal_get_rx_depth; + + ops->get_tx_depth = + ipa_common_fifo_hal_get_tx_depth; + + ops->get_tx_ptr = + ipa_common_fifo_hal_get_tx_ptr; + + ops->get_rx_ptr = + ipa_common_fifo_hal_get_rx_ptr; + + ops->get_filled_depth = + ipa_common_fifo_hal_get_filled_depth; + + ops->get_tx_empty_status = + ipa_common_fifo_hal_get_tx_empty_status; + + ops->get_tx_full_status = + ipa_common_fifo_hal_get_tx_full_status; + + ops->get_rx_empty_status = + ipa_common_fifo_hal_get_rx_empty_status; + + ops->get_rx_full_status = + ipa_common_fifo_hal_get_rx_full_status; + + ops->set_rx_tx_fifo_ptr = + ipa_common_fifo_hal_set_rx_tx_fifo_ptr; + + ops->set_tx_fifo_wptr = + ipa_common_fifo_hal_set_tx_fifo_wptr; + + ops->set_rx_fifo_wptr = + ipa_common_fifo_hal_set_rx_fifo_wptr; + + ops->set_src_dst_term = + ipa_common_fifo_hal_set_src_dst_term; + + ops->ctrl_receive = + ipa_common_fifo_hal_ctrl_receive; + + ops->set_tx_fifo_rp = ipa_common_fifo_hal_set_tx_fifo_rp; + ops->get_tx_fifo_rp = ipa_common_fifo_hal_get_tx_fifo_rp; + ops->get_rx_fifo_wr = ipa_common_fifo_hal_get_rx_fifo_wr; + ops->set_rx_fifo_wr = ipa_common_fifo_hal_set_rx_fifo_wr; + + ops->set_intr_eb = ipa_common_fifo_set_intr_eb; + ops->clr_tout_th_intr = ipa_common_fifo_clr_tout_th_intr; +} +EXPORT_SYMBOL(sipa_fifo_ops_init); diff --git a/package/wwan/driver/quectel_SRPD_PCIE/src/sipa/sipa_phy_v0/sipa_fifo_irq_hal.c b/package/wwan/driver/quectel_SRPD_PCIE/src/sipa/sipa_phy_v0/sipa_fifo_irq_hal.c new file mode 100644 index 000000000..36fa1dcc0 --- /dev/null +++ b/package/wwan/driver/quectel_SRPD_PCIE/src/sipa/sipa_phy_v0/sipa_fifo_irq_hal.c @@ -0,0 +1,74 @@ +#include "../../include/sipa.h" + +#include "../sipa_core.h" +#include "sipa_fifo_phy.h" + +static void ipa_fifo_traverse_int_bit(enum sipa_cmn_fifo_index id, + struct sipa_cmn_fifo_cfg_tag *ipa_cfg) +{ + void __iomem *fifo_base; + u32 clr_sts = 0; + u32 int_status = 0; + + fifo_base = ipa_cfg->fifo_reg_base; + int_status = ipa_phy_get_fifo_all_int_sts(fifo_base); + + if (!(int_status & IPA_INT_STS_GROUP)) + return; + + if (int_status & IPA_INT_EXIT_FLOW_CTRL_STS) { + ipa_cfg->exit_flow_ctrl_cnt++; + clr_sts |= IPA_EXIT_FLOW_CONTROL_CLR_BIT; + } + + if (int_status & IPA_INT_ERRORCODE_IN_TX_FIFO_STS) + clr_sts |= IPA_ERROR_CODE_INTR_CLR_BIT; + + if (int_status & IPA_INT_ENTER_FLOW_CTRL_STS) { + ipa_cfg->enter_flow_ctrl_cnt++; + clr_sts |= IPA_ENTRY_FLOW_CONTROL_CLR_BIT; + } + + if (int_status & IPA_INT_INTR_BIT_STS) + clr_sts |= IPA_TX_FIFO_INTR_CLR_BIT; + + if (int_status & IPA_INT_THRESHOLD_STS || + int_status & IPA_INT_DELAY_TIMER_STS) { + ipa_phy_disable_int_bit(ipa_cfg->fifo_reg_base, + IPA_TX_FIFO_THRESHOLD_EN | + IPA_TX_FIFO_DELAY_TIMER_EN); + clr_sts |= IPA_TX_FIFO_THRESHOLD_CLR_BIT | + IPA_TX_FIFO_TIMER_CLR_BIT; + } + + if (int_status & IPA_INT_DROP_PACKT_OCCUR) + clr_sts |= IPA_DROP_PACKET_INTR_CLR_BIT; + + if (int_status & IPA_INT_TXFIFO_OVERFLOW_STS) + clr_sts |= IPA_TX_FIFO_OVERFLOW_CLR_BIT; + + if (int_status & IPA_INT_TXFIFO_FULL_INT_STS) + clr_sts |= IPA_TX_FIFO_FULL_INT_CLR_BIT; + + if (ipa_cfg->irq_cb) + ipa_cfg->irq_cb(ipa_cfg->priv, int_status, id); + else + pr_err("Don't register this fifo(%d) irq callback\n", id); + + ipa_phy_clear_int(ipa_cfg->fifo_reg_base, clr_sts); +} + +int sipa_int_callback_func(int evt, void *cookie) +{ + struct sipa_core *ipa = sipa_get_ctrl_pointer(); + + if (ipa->remote_ready) { + ipa_fifo_traverse_int_bit(SIPA_FIFO_PCIE_DL, + &ipa->cmn_fifo_cfg[SIPA_FIFO_PCIE_DL]); + ipa_fifo_traverse_int_bit(SIPA_FIFO_PCIE_UL, + &ipa->cmn_fifo_cfg[SIPA_FIFO_PCIE_UL]); + } + + return 0; +} +EXPORT_SYMBOL(sipa_int_callback_func); diff --git a/package/wwan/driver/quectel_SRPD_PCIE/src/sipa/sipa_phy_v0/sipa_fifo_phy.h b/package/wwan/driver/quectel_SRPD_PCIE/src/sipa/sipa_phy_v0/sipa_fifo_phy.h new file mode 100644 index 000000000..d578585c2 --- /dev/null +++ b/package/wwan/driver/quectel_SRPD_PCIE/src/sipa/sipa_phy_v0/sipa_fifo_phy.h @@ -0,0 +1,1413 @@ +#ifndef _IPA_CP0_FIFO_PHY_H_ +#define _IPA_CP0_FIFO_PHY_H_ + +#include + +/* Common fifo reg */ +#define IPA_COMMON_RX_FIFO_DEPTH 0x00UL +#define IPA_COMMON_RX_FIFO_WR 0x04UL +#define IPA_COMMON_RX_FIFO_RD 0x08UL +#define IPA_COMMON_TX_FIFO_DEPTH 0x0CUL +#define IPA_COMMON_TX_FIFO_WR 0x10UL +#define IPA_COMMON_TX_FIFO_RD 0x14UL +#define IPA_COMMON_RX_FIFO_ADDRL 0x18UL +#define IPA_COMMON_RX_FIFO_ADDRH 0x1CUL +#define IPA_COMMON_TX_FIFO_ADDRL 0x20UL +#define IPA_COMMON_TX_FIFO_ADDRH 0x24UL +#define IPA_PERFETCH_FIFO_CTL 0x28UL +#define IPA_INT_GEN_CTL_TX_FIFO_VALUE 0x2CUL +#define IPA_INT_GEN_CTL_EN 0x30UL +#define IPA_DROP_PACKET_CNT 0x34UL +#define IPA_FLOW_CTRL_CONFIG 0x38UL +#define IPA_TX_FIFO_FLOW_CTRL 0x3CUL +#define IPA_RX_FIFO_FLOW_CTRL 0x40UL +#define IPA_RX_FIFO_FULL_NEG_PULSE_NUM 0x44UL +#define IPA_INT_GEN_CTL_CLR 0x48UL +#define IPA_INTR_RX_FIFO_FULL_ADDR_HIGH 0x4CUL +#define IPA_INTR_MEM_WR_ADDR_LOW 0x50UL +#define IPA_RXFIFO_FULL_MEM_WR_ADDR_LOW 0x54UL +#define IPA_INTR_MEM_WR_PATTERN 0x58UL +#define IPA_RX_FIFO_FULL_MEM_WR_PATTERN 0x5CUL +#define IPA_TX_FIFO_WR_INIT 0x60UL +#define IPA_COMMON_RX_FIFO_AXI_STS 0x64UL +#define IPA_ERRCODE_INT_ADDR_LOW 0x68UL +#define IPA_ERRCODE_INT_PATTERN 0x6CUL + +/* Fifo interrupt enable bit */ +#define IPA_TXFIFO_INT_THRESHOLD_ONESHOT_EN BIT(11) +#define IPA_TXFIFO_INT_THRESHOLD_SW_EN BIT(10) +#define IPA_TXFIFO_INT_DELAY_TIMER_SW_EN BIT(9) +#define IPA_TXFIFO_FULL_INT_EN BIT(8) +#define IPA_TXFIFO_OVERFLOW_EN BIT(7) +#define IPA_ERRORCODE_IN_TX_FIFO_EN BIT(6) +#define IPA_DROP_PACKET_OCCUR_INT_EN BIT(5) +#define IPA_RX_FIFO_INT_EXIT_FLOW_CTRL_EN BIT(4) +#define IPA_RX_FIFO_INT_ENTER_FLOW_CTRL_EN BIT(3) +#define IPA_TX_FIFO_INTR_SW_BIT_EN BIT(2) +#define IPA_TX_FIFO_THRESHOLD_EN BIT(1) +#define IPA_TX_FIFO_DELAY_TIMER_EN BIT(0) +#define IPA_INT_EN_BIT_GROUP 0x00000FFFUL + +/* Fifo interrupt status bit */ +#define IPA_INT_TX_FIFO_THRESHOLD_SW_STS BIT(22) +#define IPA_INT_EXIT_FLOW_CTRL_STS BIT(20) +#define IPA_INT_ENTER_FLOW_CTRL_STS BIT(19) +#define IPA_INT_TXFIFO_FULL_INT_STS BIT(18) +#define IPA_INT_TXFIFO_OVERFLOW_STS BIT(17) +#define IPA_INT_ERRORCODE_IN_TX_FIFO_STS BIT(16) +#define IPA_INT_INTR_BIT_STS BIT(15) +#define IPA_INT_THRESHOLD_STS BIT(14) +#define IPA_INT_DELAY_TIMER_STS BIT(13) +#define IPA_INT_DROP_PACKT_OCCUR BIT(12) +#define IPA_INT_STS_GROUP 0x5FF000UL + +/* Fifo interrupt clear bit */ +#define IPA_TX_FIFO_TIMER_CLR_BIT BIT(0) +#define IPA_TX_FIFO_THRESHOLD_CLR_BIT BIT(1) +#define IPA_TX_FIFO_INTR_CLR_BIT BIT(2) +#define IPA_ENTRY_FLOW_CONTROL_CLR_BIT BIT(3) +#define IPA_EXIT_FLOW_CONTROL_CLR_BIT BIT(4) +#define IPA_DROP_PACKET_INTR_CLR_BIT BIT(5) +#define IPA_ERROR_CODE_INTR_CLR_BIT BIT(6) +#define IPA_TX_FIFO_OVERFLOW_CLR_BIT BIT(7) +#define IPA_TX_FIFO_FULL_INT_CLR_BIT BIT(8) +#define IPA_INT_STS_CLR_GROUP 0x000001FFUL + +#define NODE_DESCRIPTION_SIZE 128UL + +/** + * Description: set rx fifo total depth. + * Input: + * @fifo_base: Need to set total depth of the fifo, + * the base address of the FIFO. + * @depth: the size of depth. + * return: + * 0: set successfully. + * non-zero: set failed. + * Note: + */ +static inline int ipa_phy_set_rx_fifo_total_depth(void __iomem *fifo_base, + u32 depth) +{ + u32 tmp; + + if (depth > 0xFFFF) + return -EINVAL; + + tmp = readl_relaxed(fifo_base + IPA_COMMON_RX_FIFO_DEPTH); + tmp &= 0x0000FFFFUL; + tmp |= (depth << 16); + writel_relaxed(tmp, fifo_base + IPA_COMMON_RX_FIFO_DEPTH); + tmp = readl_relaxed(fifo_base + IPA_COMMON_RX_FIFO_DEPTH); + if ((tmp & 0xFFFF0000UL) == tmp) + return 0; + else + return -EINVAL; +} + +/** + * Description: get rx fifo total depth. + * Input: + * @fifo_base: Need to get total depth of the fifo, the base address of the + * FIFO. + * return: The size of toal depth. + * Note: + */ +static inline u32 ipa_phy_get_rx_fifo_total_depth(void __iomem *fifo_base) +{ + u32 tmp; + + tmp = readl_relaxed(fifo_base + IPA_COMMON_RX_FIFO_DEPTH); + + return (tmp >> 16) & 0x0000FFFFUL; +} + +/** + * Description: get rx fifo filled depth. + * Input: + * @fifo_base: Need to get filled depth of the FIFO, the base address of the + * FIFO. + * return: + * TRUE: The size of rx filled depth + * Note: + */ +static inline u32 ipa_phy_get_rx_fifo_filled_depth(void __iomem *fifo_base) +{ + u32 tmp; + + tmp = readl_relaxed(fifo_base + IPA_COMMON_RX_FIFO_DEPTH); + + return tmp & 0x0000FFFFUL; +} + +/** + * Description: get rx fifo full status. + * Input: + * @fifo_base: Need to get rx fifo full status of the FIFO, the base address + * of the FIFO. + * return: + * 1: rx fifo full. + * 0: rx fifo not full. + * Note: + */ +static inline u32 ipa_phy_get_rx_fifo_full_status(void __iomem *fifo_base) +{ + u32 tmp; + + tmp = readl_relaxed(fifo_base + IPA_COMMON_RX_FIFO_WR); + + return tmp & 0x1UL; +} + +/** + * Description: update rx fifo write pointer. + * Input: + * @fifo_base: Need to update rx fifo write pointer of the FIFO, the base + * address of the FIFO. + * return: + * 0: update rx fifo write pointer successfully, + * non-zero: update rx fifo write pointer failed. + * Note: + */ +static inline int ipa_phy_update_rx_fifo_wptr(void __iomem *fifo_base, + u32 wptr) +{ + u32 tmp = 0; + void __iomem *fifo_reg_addr; + + fifo_reg_addr = fifo_base + IPA_COMMON_RX_FIFO_WR; + + if (wptr > 0xFFFFUL) + return -EINVAL; + + tmp = readl_relaxed(fifo_reg_addr); + tmp &= 0x0000FFFFUL; + tmp |= (wptr << 16); + writel_relaxed(tmp, fifo_reg_addr); + + tmp = readl_relaxed(fifo_reg_addr); + if ((tmp >> 16) == wptr) + return 0; + else + return -EIO; +} + +/** + * Description: get rx fifo write pointer. + * Input: + * @fifo_base: Need to get rx fifo write pointer of the FIFO, the base + * address of the FIFO. + * return: + * The write pointer of rx fifo. + * Note: + */ +static inline u32 ipa_phy_get_rx_fifo_wptr(void __iomem *fifo_base) +{ + u32 tmp; + + tmp = readl_relaxed(fifo_base + IPA_COMMON_RX_FIFO_WR); + + return (tmp >> 16); +} + +/** + * Description: update rx fifo read pointer. + * Input: + * @fifo_base: Need to update rx fifo read pointer of the FIFO, the base + * address of the FIFO. + * return: + * 0: update rx fifo read pointer successfully, + * non-zero: update rx fifo read pointer failed. + * Note: + */ +static inline int ipa_phy_update_rx_fifo_rptr(void __iomem *fifo_base, + u32 rptr) +{ + u32 tmp = 0; + void __iomem *fifo_reg_addr; + + fifo_reg_addr = fifo_base + IPA_COMMON_RX_FIFO_RD; + + if (rptr > 0xFFFFUL) + return -EINVAL; + + tmp = readl_relaxed(fifo_reg_addr); + tmp &= 0x0000FFFFUL; + tmp |= (rptr << 16); + writel_relaxed(tmp, fifo_reg_addr); + + tmp = readl_relaxed(fifo_reg_addr); + + if ((tmp >> 16) == rptr) + return 0; + else + return -EIO; +} + +/** + * Description: get rx fifo read pointer. + * Input: + * @fifo_base: Need to get rx fifo read pointer of the FIFO, the base + * address of the FIFO. + * return: + * The read pointer of rx fifo. + * Note: + */ +static inline u32 ipa_phy_get_rx_fifo_rptr(void __iomem *fifo_base) +{ + u32 tmp; + + tmp = readl_relaxed(fifo_base + IPA_COMMON_RX_FIFO_RD); + + return (tmp >> 16); +} + +/** + * Description: get rx fifo empty status. + * Input: + * @fifo_base: Need to get rx fifo empty status of the FIFO, the base + * address of the FIFO. + * return: + * The empty status of rx fifo. + * Note: + */ +static inline u32 ipa_phy_get_rx_fifo_empty_status(void __iomem *fifo_base) +{ + u32 tmp; + + tmp = readl_relaxed(fifo_base + IPA_COMMON_RX_FIFO_RD); + + return tmp & 0x1UL; +} + +/** + * Description: set tx fifo total depth. + * Input: + * @fifo_base: Need to set tx fifo empty status of the FIFO, the base + * address of the FIFO. + * return: + * 0: set tx fifo total depth successfully. + * non-zero: set tx fifo total_depth failed. + * Note: + */ +static inline int ipa_phy_set_tx_fifo_total_depth(void __iomem *fifo_base, + u32 depth) +{ + u32 tmp; + void __iomem *fifo_reg_addr; + + fifo_reg_addr = fifo_base + IPA_COMMON_TX_FIFO_DEPTH; + + if (depth > 0xFFFFUL) + return -EINVAL; + + tmp = readl_relaxed(fifo_reg_addr); + tmp &= 0x0000FFFFUL; + tmp |= (depth << 16); + writel_relaxed(tmp, fifo_reg_addr); + + tmp = readl_relaxed(fifo_reg_addr); + if ((tmp >> 16) == depth) + return 0; + else + return -EIO; +} + +/** + * Description: get tx fifo total depth. + * Input: + * @fifo_base: Need to get tx fifo empty status of the FIFO, the base + * address of the FIFO. + * return: + * The total depth of tx fifo. + * Note: + */ +static inline u32 ipa_phy_get_tx_fifo_total_depth(void __iomem *fifo_base) +{ + u32 tmp; + + tmp = readl_relaxed(fifo_base + IPA_COMMON_TX_FIFO_DEPTH); + + return ((tmp >> 16) & 0x0000FFFF); +} + +/** + * Description: get tx fifo filled depth. + * Input: + * @fifo_base: Need to get tx fifo filled depth of the FIFO, the base + * address of the FIFO. + * return: + * The tx fifo filled depth. + * Note: + */ +static inline u32 ipa_phy_get_tx_fifo_filled_depth(void __iomem *fifo_base) +{ + u32 tmp; + + tmp = readl_relaxed(fifo_base + IPA_COMMON_TX_FIFO_DEPTH); + + return (tmp & 0x0000FFFFUL); +} + +/** + * Description: get tx fifo full status. + * Input: + * @fifo_base: Need to get tx fifo full status of the FIFO, the base + * address of the FIFO. + * return: + * The full status of tx fifo. + * Note: + */ +static inline u32 ipa_phy_get_tx_fifo_full_status(void __iomem *fifo_base) +{ + u32 tmp; + + tmp = readl_relaxed(fifo_base + IPA_COMMON_TX_FIFO_WR); + + return (tmp & 0x1UL); +} + +/** + * Description: get tx fifo empty status. + * Input: + * @fifo_base: Need to get tx fifo empty status of the FIFO, the base + * address of the FIFO. + * return: + * The empty status of tx fifo. + * Note: + */ +static inline u32 ipa_phy_get_tx_fifo_empty_status(void __iomem *fifo_base) +{ + u32 tmp; + + tmp = readl_relaxed(fifo_base + IPA_COMMON_TX_FIFO_RD); + + return (tmp & 0x1UL); +} + +/** + * Description: update tx fifo write pointer. + * Input: + * @fifo_base: Need to update tx fifo write pointer of the FIFO, the base + * address of the FIFO. + * return: + * 0: update tx fifo write pointer successfully. + * non-zero: update tx fifo write pointer failed. + * Note: + */ +static inline int ipa_phy_update_tx_fifo_wptr(void __iomem *fifo_base, + u32 wptr) +{ + u32 tmp; + void __iomem *fifo_reg_addr; + + fifo_reg_addr = fifo_base + IPA_TX_FIFO_WR_INIT; + + if (wptr > 0xFFFFUL) + return -EINVAL; + + tmp = readl_relaxed(fifo_reg_addr); + tmp &= 0x0000FFFFUL; + tmp |= (wptr << 16); + writel_relaxed(tmp, fifo_reg_addr); + + tmp = readl_relaxed(fifo_reg_addr); + tmp |= 0x2; + writel_relaxed(tmp, fifo_reg_addr); + + tmp = readl_relaxed(fifo_reg_addr); + tmp &= 0xFFFFFFFDUL; + writel_relaxed(tmp, fifo_reg_addr); + + tmp = readl_relaxed(fifo_reg_addr); + + if ((tmp >> 16) == wptr) + return 0; + else + return -EIO; +} + +/** + * Description: get tx fifo write pointer. + * Input: + * @fifo_base: Need to get tx fifo write pointer of the FIFO, the base + * address of the FIFO. + * return: + * The write pointer of tx fifo. + * Note: + */ +static inline u32 ipa_phy_get_tx_fifo_wptr(void __iomem *fifo_base) +{ + u32 tmp; + + tmp = readl_relaxed(fifo_base + IPA_COMMON_TX_FIFO_WR); + + return (tmp >> 16); +} + +/** + * Description: update tx fifo read pointer. + * Input: + * @fifo_base: Need to update tx fifo read pointer of the FIFO, the base + * address of the FIFO. + * return: + * 0: update tx fifo read pointer successfully. + * non-zero: update tx fifo read pointer failed. + * Note: + */ +static inline int ipa_phy_update_tx_fifo_rptr(void __iomem *fifo_base, + u32 rptr) +{ + u32 tmp; + void __iomem *fifo_reg_addr; + + fifo_reg_addr = fifo_base + IPA_COMMON_TX_FIFO_RD; + + if (rptr > 0xFFFFUL) + return -EINVAL; + + tmp = readl_relaxed(fifo_reg_addr); + tmp &= 0x0000FFFF; + tmp |= (rptr << 16); + writel_relaxed(tmp, fifo_reg_addr); + + tmp = readl_relaxed(fifo_reg_addr); + + if ((tmp >> 16) == rptr) + return 0; + else + return -EIO; +} + +/** + * Description: get tx fifo write pointer. + * Input: + * @fifo_base: Need to get tx fifo write pointer of the FIFO, the base + * address of the FIFO. + * return: + * The write pointer of rx fifo. + * Note: + */ +static inline u32 ipa_phy_get_tx_fifo_rptr(void __iomem *fifo_base) +{ + u32 tmp; + + tmp = readl_relaxed(fifo_base + IPA_COMMON_TX_FIFO_RD); + + return (tmp >> 16); +} + +/** + * Description: set rx fifo address of iram. + * Input: + * @fifo_base: Need to set rx fifo address of the FIFO, the base + * address of the FIFO. + * @addr_l: low 32 bit. + * @addr_h: high 8 bit. + * return: + * 0: update rx fifo address of iram successfully. + * non-zero: update rx fifo address of iram failed. + * Note: + */ +static inline int ipa_phy_set_rx_fifo_addr(void __iomem *fifo_base, + u32 addr_l, u32 addr_h) +{ + u32 tmp_l, tmp_h; + + writel_relaxed(addr_l, fifo_base + IPA_COMMON_RX_FIFO_ADDRL); + writel_relaxed(addr_h, fifo_base + IPA_COMMON_RX_FIFO_ADDRH); + + tmp_l = readl_relaxed(fifo_base + IPA_COMMON_RX_FIFO_ADDRL); + tmp_h = readl_relaxed(fifo_base + IPA_COMMON_RX_FIFO_ADDRH); + + if ((tmp_l == addr_l) && (tmp_h == addr_h)) + return 0; + else + return -EIO; +} + +/** + * Description: get rx fifo address of iram. + * Input: + * @fifo_base: Need to get rx fifo address of the FIFO, the base + * address of the FIFO. + * @addr_l: low 32 bit. + * @addr_h: high 8 bit. + * return: + * void. + * Note: + */ +static inline void ipa_phy_get_rx_fifo_addr(void __iomem *fifo_base, + u32 *addr_l, u32 *addr_h) +{ + *addr_l = readl_relaxed(fifo_base + IPA_COMMON_RX_FIFO_ADDRL); + *addr_h = readl_relaxed(fifo_base + IPA_COMMON_RX_FIFO_ADDRH); +} + +/** + * Description: set tx fifo address of iram. + * Input: + * @fifo_base: Need to set tx fifo address of the FIFO, the base + * address of the FIFO. + * @addr_l: low 32 bit. + * @addr_h: high 8 bit. + * return: + * 0: update tx fifo address of iram successfully. + * non-zero: update tx fifo address of iram failed. + * Note: + */ +static inline int ipa_phy_set_tx_fifo_addr(void __iomem *fifo_base, + u32 addr_l, u32 addr_h) +{ + u32 tmp_l, tmp_h; + + writel_relaxed(addr_l, fifo_base + IPA_COMMON_TX_FIFO_ADDRL); + writel_relaxed(addr_h, fifo_base + IPA_COMMON_TX_FIFO_ADDRH); + + tmp_l = readl_relaxed(fifo_base + IPA_COMMON_TX_FIFO_ADDRL); + tmp_h = readl_relaxed(fifo_base + IPA_COMMON_TX_FIFO_ADDRH); + + if ((tmp_l == addr_l) && (tmp_h == addr_h)) + return 0; + else + return -EIO; +} + +/** + * Description: get tx fifo address of iram. + * Input: + * @fifo_base: Need to get tx fifo address of the FIFO, the base + * address of the FIFO. + * @addr_l: low 32 bit. + * @addr_h: high 8 bit. + * return: + * void. + * Note: + */ +static inline void ipa_phy_get_tx_fifo_addr(void __iomem *fifo_base, + u32 *addr_l, u32 *addr_h) +{ + *addr_l = readl_relaxed(fifo_base + IPA_COMMON_TX_FIFO_ADDRL); + *addr_h = readl_relaxed(fifo_base + IPA_COMMON_TX_FIFO_ADDRH); +} + +/** + * Description: Enable interrupt bit. + * Input: + * @fifo_base: Need to enable interrupr bit of the FIFO, the base + * address of the FIFO. + * @int_bit: The interrupt bit that need to enable. + * return: + * 0: Enable successfully. + * non-zero: Enable successfully. + * Note: + */ +static inline int ipa_phy_enable_int_bit(void __iomem *fifo_base, + u32 int_bit) +{ + u32 tmp; + void __iomem *fifo_reg_addr; + + fifo_reg_addr = fifo_base + IPA_INT_GEN_CTL_EN; + + tmp = readl_relaxed(fifo_reg_addr); + tmp |= int_bit; + writel_relaxed(tmp, fifo_reg_addr); + + tmp = readl_relaxed(fifo_reg_addr); + + if ((tmp & int_bit) == int_bit) + return 0; + else + return -EIO; +} + +/** + * Description: Disable interrupt bit. + * Input: + * @fifo_base: Need to Disable interrupr bit of the FIFO, the base + * address of the FIFO. + * @int_bit: The interrupt bit that need to disable. + * return: + * 0: Disable successfully. + * non-zero: Disable failed. + * Note: + */ +static inline int ipa_phy_disable_int_bit(void __iomem *fifo_base, + u32 int_bit) +{ + u32 tmp = 0; + void __iomem *fifo_reg_addr; + + fifo_reg_addr = fifo_base + IPA_INT_GEN_CTL_EN; + + tmp = readl_relaxed(fifo_reg_addr); + tmp &= (~int_bit); + writel_relaxed(tmp, fifo_reg_addr); + + tmp = readl_relaxed(fifo_reg_addr); + tmp &= int_bit; + + if (tmp) { + pr_err("Disable interrupt bit = 0x%x set failed!\n", + int_bit); + return -EIO; + } + + return 0; +} + +static inline u32 ipa_phy_get_all_intr_enable_status(void __iomem *fifo_base) +{ + u32 tmp = 0; + void __iomem *fifo_reg_addr; + + fifo_reg_addr = fifo_base + IPA_INT_GEN_CTL_EN; + + tmp = readl_relaxed(fifo_reg_addr); + + tmp &= IPA_INT_EN_BIT_GROUP; + + return tmp; +} + +/** + * Description: Get specified interrupt bit status. + * Input: + * @fifo_base: Need to get interrupt bit of the FIFO, the base + * address of the FIFO. + * @int_bit: The specified interrupt bit that need to get. + * return: + * 0: interrupt bit enable. + * non-zero: interrupt bit disable. + * Note: + */ +static inline int ipa_phy_get_fifo_int_sts(void __iomem *fifo_base, u32 sts) +{ + u32 tmp; + + tmp = readl_relaxed(fifo_base + IPA_INT_GEN_CTL_EN); + + if (tmp & sts) + return 0; + else + return -EIO; +} + +/** + * Description: Get interrupt group status. + * Input: + * @fifo_base: Need to get interrupt group status of the FIFO, the base + * address of the FIFO. + * return: + * Interrupt group status. + * Note: + */ +static inline u32 ipa_phy_get_fifo_all_int_sts(void __iomem *fifo_base) +{ + u32 tmp; + + tmp = readl_relaxed(fifo_base + IPA_INT_GEN_CTL_EN); + + return (tmp & IPA_INT_STS_GROUP); +} + +/** + * Description: Clear interrupt flag, need to write 1, then write 0. + * Input: + * @fifo_base: Need to clear interrupt flag of the FIFO, the base + * address of the FIFO. + * return: + * void. + * Note: + */ +static inline void ipa_phy_clear_int(void __iomem *fifo_base, u32 clr_bit) +{ + writel_relaxed(clr_bit, fifo_base + IPA_INT_GEN_CTL_CLR); +} + +/** + * Description: Get drop packet count. + * Input: + * @fifo_base: Need to get drop packet count of the FIFO, the base + * address of the FIFO. + * return: + * Drop packet count. + * Note: + */ +static inline u32 ipa_phy_get_drop_packet_cnt(void __iomem *fifo_base) +{ + u32 tmp; + + tmp = readl_relaxed(fifo_base + IPA_DROP_PACKET_CNT); + + return tmp; +} + +/** + * Description: Get tx fifo threshold interrupt. + * Input: + * @fifo_base: Need to get threshold interrupt of the FIFO, the base + * address of the FIFO. + * OUTPUT: + * threshold value. + * Note: + */ +static inline u32 ipa_phy_get_tx_fifo_interrupt_threshold(void __iomem + *fifo_base) +{ + u32 tmp; + + tmp = readl_relaxed(fifo_base + IPA_INT_GEN_CTL_TX_FIFO_VALUE); + + return (tmp >> 16); +} + +/** + * Description: Set tx fifo interrupt threshold of value. + * Input: + * @fifo_base: Need to get threshold interrupt value of the FIFO, the base + * address of the FIFO. + * return: + * TRUE: set successfully. + * FALSE: set failed. + * Note: + */ +static inline int +ipa_phy_set_tx_fifo_interrupt_threshold(void __iomem *fifo_base, u32 threshold) +{ + u32 tmp = 0; + void __iomem *fifo_reg_addr; + + fifo_reg_addr = fifo_base + IPA_INT_GEN_CTL_TX_FIFO_VALUE; + + if (threshold > 0xFFFFUL) + return -EINVAL; + + tmp = readl_relaxed(fifo_reg_addr); + tmp &= 0x0000FFFFUL; + tmp |= (threshold << 16); + writel_relaxed(tmp, fifo_reg_addr); + tmp = readl_relaxed(fifo_reg_addr); + + if ((tmp >> 16) == threshold) + return 0; + else + return -EIO; +} + +/** + * Description: Get tx fifo interrupt of delay timer value. + * Input: + * @fifo_base: Need to get delay timer interrupt of the FIFO, the base + * address of the FIFO. + * OUTPUT: + * delay timer value. + * Note: + */ +static inline u32 +ipa_phy_get_tx_fifo_interrupt_delay_timer(void __iomem *fifo_base) +{ + u32 tmp; + + tmp = readl_relaxed(fifo_base + IPA_INT_GEN_CTL_TX_FIFO_VALUE); + + return (tmp & 0x0000FFFFUL); +} + +/** + * Description: Set tx fifo interrupt of delay timer value. + * Input: + * @fifo_base: Need to set delay timer interrupt of the FIFO, the base + * address of the FIFO. + * @threshold: The overflow value that need to set. + * return: + * 0: Set successfully. + * non-zero: set failed. + * Note: + */ +static inline int +ipa_phy_set_tx_fifo_interrupt_delay_timer(void __iomem *fifo_base, + u32 threshold) +{ + u32 tmp = 0; + void __iomem *fifo_reg_addr; + + fifo_reg_addr = fifo_base + IPA_INT_GEN_CTL_TX_FIFO_VALUE; + + if (threshold > 0xFFFFUL) + return -EINVAL; + + tmp = readl_relaxed(fifo_reg_addr); + tmp &= 0xFFFF0000UL; + tmp |= threshold; + writel_relaxed(tmp, fifo_reg_addr); + tmp = readl_relaxed(fifo_reg_addr); + + if ((tmp & 0x0000FFFF) == threshold) + return 0; + else + return -EIO; +} + +/** + * Description: Get current term number. + * Input: + * @fifo_base: Need to get current term number of the FIFO, the base + * address of the FIFO. + * return: + * Current term number. + * Note: + */ +static inline u32 ipa_phy_get_cur_term_num(void __iomem *fifo_base) +{ + u32 tmp; + + tmp = readl_relaxed(fifo_base + IPA_PERFETCH_FIFO_CTL); + + return ((tmp & 0x0003E000UL) >> 13); +} + +/** + * Description: Set current term number. + * Input: + * @fifo_base: Need to set current term number of the FIFO, the base + * address of the FIFO. + * return: + * 0: Set successfully. + * non-zero: Set failed. + * Note: + */ +static inline int ipa_phy_set_cur_term_num(void __iomem *fifo_base, + u32 num) +{ + u32 tmp = 0; + void __iomem *fifo_reg_addr; + + fifo_reg_addr = fifo_base + IPA_PERFETCH_FIFO_CTL; + + if (num > 0x1FUL) + return -EINVAL; + + tmp = readl_relaxed(fifo_reg_addr); + tmp &= 0xFFFC1FFFUL; + tmp |= (num << 13); + writel_relaxed(tmp, fifo_reg_addr); + + tmp = readl_relaxed(fifo_reg_addr); + if (((tmp & 0x0003E000) >> 13) == num) + return 0; + else + return -EIO; +} + +/** + * Description: Get dst term number. + * Input: + * @fifo_base: Need to get dst term number of the FIFO, the base + * address of the FIFO. + * return: + * Dst term number. + * Note: + */ +static inline u32 ipa_phy_get_dst_term_num(void __iomem *fifo_base) +{ + u32 tmp; + + tmp = readl_relaxed(fifo_base + IPA_PERFETCH_FIFO_CTL); + + return ((tmp & 0x00001F00UL) >> 8); +} + +/** + * Description: Set dst term number. + * Input: + * @fifo_base: Need to set dst term number of the FIFO, the base + * address of the FIFO. + * return: + * 0: Set successfully. + * non-zero: Set failed. + * Note: + */ +static inline int ipa_phy_set_dst_term_num(void __iomem *fifo_base, + u32 num) +{ + u32 tmp = 0; + void __iomem *fifo_reg_addr; + + fifo_reg_addr = fifo_base + IPA_PERFETCH_FIFO_CTL; + if (num > 0x1FUL) + return -EINVAL; + + tmp = readl_relaxed(fifo_reg_addr); + tmp &= 0xFFFFE0FFUL; + tmp |= (num << 8); + writel_relaxed(tmp, fifo_reg_addr); + + tmp = readl_relaxed(fifo_reg_addr); + if (((tmp & 0x00001F00UL) >> 8) == num) + return 0; + else + return -EIO; +} + +/** + * Description: Get prefetch fifo priority. + * Input: + * @fifo_base: Need to get prefetch fifo priority of the FIFO, the base + * address of the FIFO. + * return: + * Prefetch fifo priority. + * Note: + */ +static inline u32 ipa_phy_get_prefetch_fifo_priority(void __iomem *fifo_base) +{ + u32 tmp; + + tmp = readl_relaxed(fifo_base + IPA_PERFETCH_FIFO_CTL); + + return ((tmp & 0x000000F0UL) >> 4); +} + +/** + * Description: Set prefetch fifo priority. + * Input: + * @fifo_base: Need to set prefetch fifo priority of the FIFO, the base + * address of the FIFO. + * return: + * 0: Set successfully. + * non-zero: Set failed. + * Note: + */ +static inline int ipa_phy_set_prefetch_fifo_priority(void __iomem *fifo_base, + u32 pri) +{ + u32 tmp = 0; + void __iomem *fifo_reg_base; + + fifo_reg_base = fifo_base + IPA_PERFETCH_FIFO_CTL; + + if (pri > 0xFUL) + return -EINVAL; + + tmp = readl_relaxed(fifo_reg_base); + tmp &= 0xFFFFFF0FUL; + tmp |= (pri << 4); + writel_relaxed(tmp, fifo_reg_base); + + tmp = readl_relaxed(fifo_reg_base); + if (((tmp & 0x000000F0UL) >> 4) == pri) + return 0; + else + return -EIO; +} + +/** + * Description: Get prefetch threshold. + * Input: + * @fifo_base: Need to get prefetch threshold of the FIFO, the base + * address of the FIFO. + * return: + * Prefetch threshold. + * Note: + */ +static inline u32 ipa_phy_get_prefetch_threshold(void __iomem *fifo_base) +{ + u32 tmp; + + tmp = readl_relaxed(fifo_base + IPA_PERFETCH_FIFO_CTL); + + return (tmp & 0xFUL); +} + +/** + * Description: Set prefetch threshold. + * Input: + * @fifo_base: Need to get threshold of the FIFO, the base + * address of the FIFO. + * return: + * 0: Set successfully. + * non-zero: Set failed. + * Note: + */ +static inline int ipa_phy_set_prefetch_threshold(void __iomem *fifo_base, + u32 threshold) +{ + u32 tmp = 0; + void __iomem *fifo_reg_addr; + + fifo_reg_addr = fifo_base + IPA_PERFETCH_FIFO_CTL; + + if (threshold > 0xFUL) + return -EINVAL; + + tmp = readl_relaxed(fifo_reg_addr); + tmp &= 0xFFFFFFF0UL; + tmp |= threshold; + writel_relaxed(tmp, fifo_reg_addr); + + tmp = readl_relaxed(fifo_reg_addr); + if ((tmp & 0xFUL) == threshold) + return 0; + else + return -EIO; +} + +/** + * Description: Set stop receive bit. + * Input: + * @fifo_base: Need to set stop receive bit of the FIFO, the base + * address of the FIFO. + * return: + * 0: Set successfully. + * non-zero: Set failed. + * Note: + */ +static inline int ipa_phy_stop_receive(void __iomem *fifo_base) +{ + u32 tmp; + void __iomem *fifo_reg_addr; + + fifo_reg_addr = fifo_base + IPA_FLOW_CTRL_CONFIG; + + tmp = readl_relaxed(fifo_reg_addr); + tmp |= 0x8; + writel_relaxed(tmp, fifo_reg_addr); + + tmp = readl_relaxed(fifo_reg_addr); + if (tmp & 0x8) + return 0; + else + return -EIO; +} + +/** + * Description: Clear stop receive bit. + * Input: + * @fifo_base: Need to clear stop receive bit of the FIFO, the base + * address of the FIFO. + * return: + * 0: clear successfully. + * non-zero: clear failed. + * Note: + */ +static inline int ipa_phy_clear_stop_receive(void __iomem *fifo_base) +{ + u32 tmp; + void __iomem *fifo_reg_addr; + + fifo_reg_addr = fifo_base + IPA_FLOW_CTRL_CONFIG; + + tmp = readl_relaxed(fifo_reg_addr); + tmp &= 0xFFFFFFF7; + writel_relaxed(tmp, fifo_reg_addr); + tmp = readl_relaxed(fifo_reg_addr); + + if (!(tmp & 0x8)) + return 0; + else + return -EIO; +} + +/** + * Description: recover fifo work. + * Input: + * @fifo_base: Need to be recovered of the FIFO, the base + * address of the FIFO. + * return: + * 0: Recover successfully. + * non-zero: Recover failed. + * Note: + */ +static inline int ipa_phy_flow_ctrl_recover(void __iomem *fifo_base) +{ + u32 tmp; + void __iomem *fifo_reg_addr; + + fifo_reg_addr = fifo_base + IPA_FLOW_CTRL_CONFIG; + + tmp = readl_relaxed(fifo_reg_addr); + tmp |= 0x4UL; + writel_relaxed(tmp, fifo_reg_addr); + + tmp = readl_relaxed(fifo_reg_addr); + if (tmp & 0x4UL) { + tmp &= 0xFFFFFFFBUL; + writel_relaxed(tmp, fifo_reg_addr); + + tmp = readl_relaxed(fifo_reg_addr); + if (!(tmp & 0x4UL)) + return 0; + else + return -EIO; + } else { + return -EINVAL; + } +} + +/** + * Description: Set flow ctrl mode. + * Input: + * @fifo_base: Need to set flow ctrl mode of the FIFO, the base + * address of the FIFO. + * return: + * 0: Set successfully. + * non-zero: Set failed. + * Note: + */ +static inline int ipa_phy_set_flow_ctrl_config(void __iomem *fifo_base, + u32 config) +{ + u32 tmp; + void __iomem *fifo_reg_addr; + + fifo_reg_addr = fifo_base + IPA_FLOW_CTRL_CONFIG; + + tmp = readl_relaxed(fifo_reg_addr); + tmp &= 0xFFFFFFFC; + tmp |= config; + writel_relaxed(tmp, fifo_reg_addr); + + tmp = readl_relaxed(fifo_reg_addr); + if ((tmp & 0x00000003) == config) + return 0; + else + return -EIO; +} + +/** + * Description: Get flow ctrl mode. + * Input: + * @fifo_base: Need to get flow ctrl mode of the FIFO, the base + * address of the FIFO. + * return: + * Flow ctrl config + * Note: + */ +static inline u32 ipa_phy_get_flow_ctrl_config(void __iomem *fifo_base) +{ + u32 tmp; + + tmp = readl_relaxed(fifo_base + IPA_FLOW_CTRL_CONFIG); + + return (tmp & 0x00000003); +} + +/** + * Description: Set tx fifo exit flow ctrl watermark. + * Input: + * @fifo_base: Need to be set of the FIFO, the base + * address of the FIFO. + * @watermark: The need to be set. + * return: + * 0: Set successfully. + * non-zero: Set failed. + * Note: + */ +static inline int +ipa_phy_set_tx_fifo_exit_flow_ctrl_watermark(void __iomem *fifo_base, + u32 watermark) +{ + u32 tmp; + void __iomem *fifo_reg_addr; + + fifo_reg_addr = fifo_base + IPA_TX_FIFO_FLOW_CTRL; + + tmp = readl_relaxed(fifo_reg_addr); + tmp &= 0x0000FFFFUL; + tmp |= (watermark << 16); + writel_relaxed(tmp, fifo_reg_addr); + + tmp = readl_relaxed(fifo_reg_addr); + if ((tmp >> 16) == watermark) + return 0; + else + return -EIO; +} + +/** + * Description: Get tx fifo exit flow ctrl watermark. + * Input: + * @fifo_base: Need to be get of the FIFO, the base + * address of the FIFO. + * return: + * Tx fifo exit watermark. + * Note: + */ +static inline u32 +ipa_phy_get_tx_fifo_exit_flow_ctrl_watermark(void __iomem *fifo_base) +{ + u32 tmp; + + tmp = readl_relaxed(fifo_base + IPA_TX_FIFO_FLOW_CTRL); + + return (tmp >> 16); +} + +/** + * Description: Set tx fifo entry flow ctrl watermark. + * Input: + * @fifo_base: Need to be set of the FIFO, the base + * address of the FIFO. + * @watermark: The need to be set. + * return: + * 0: Set successfully. + * non-zero: Set failed. + * Note: + */ +static inline int +ipa_phy_set_tx_fifo_entry_flow_ctrl_watermark(void __iomem *fifo_base, + u32 watermark) +{ + u32 tmp; + void __iomem *fifo_reg_addr; + + fifo_reg_addr = fifo_base + IPA_TX_FIFO_FLOW_CTRL; + + tmp = readl_relaxed(fifo_reg_addr); + tmp &= 0xFFFF0000UL; + tmp |= watermark; + writel_relaxed(tmp, fifo_reg_addr); + tmp = readl_relaxed(fifo_reg_addr); + + if ((tmp & 0x0000FFFFUL) == watermark) + return 0; + else + return -EIO; +} + +/** + * Description: Get tx fifo entry flow ctrl watermark. + * Input: + * @fifo_base: Need to be get of the FIFO, the base + * address of the FIFO. + * return: + * @The value of tx fifo entry watermark. + * Note: + */ +static inline u32 +ipa_phy_get_tx_fifo_entry_flow_ctrl_watermark(void __iomem *fifo_base) +{ + u32 tmp; + + tmp = readl_relaxed(fifo_base + IPA_TX_FIFO_FLOW_CTRL); + + return (tmp & 0x0000FFFF); +} + +/** + * Description: Set rx fifo exit flow ctrl watermark. + * Input: + * @fifo_base: Need to be set of the FIFO, the base + * address of the FIFO. + * @watermark: The value of rx fifo exit watermark. + * return: + * 0: Set successfully. + * non-zero: Set failed. + * Note: + */ +static inline int +ipa_phy_set_rx_fifo_exit_flow_ctrl_watermark(void __iomem *fifo_base, + u32 watermark) +{ + u32 tmp; + void __iomem *fifo_reg_addr; + + fifo_reg_addr = fifo_base + IPA_RX_FIFO_FLOW_CTRL; + + tmp = readl_relaxed(fifo_reg_addr); + tmp &= 0x0000FFFFUL; + tmp |= (watermark << 16); + writel_relaxed(tmp, fifo_reg_addr); + tmp = readl_relaxed(fifo_reg_addr); + + if ((tmp >> 16) == watermark) + return 0; + else + return -EIO; +} + +/** + * Description: Get rx fifo exit flow ctrl watermark. + * Input: + * @fifo_base: Need to be get of the FIFO, the base + * address of the FIFO. + * return: + * The value of rx fifo exit watermark. + * Note: + */ +static inline u32 +ipa_phy_get_rx_fifo_exit_flow_ctrl_watermark(void __iomem *fifo_base) +{ + u32 tmp; + + tmp = readl_relaxed(fifo_base + IPA_RX_FIFO_FLOW_CTRL); + + return (tmp >> 16); +} + +/** + * Description: Set rx fifo entry flow ctrl watermark. + * Input: + * @fifo_base: Need to be set of the FIFO, the base + * address of the FIFO. + * @watermark: The value of rx fifo entry watermark. + * return: + * TRUE: Set successfully. + * FALSE: Set failed. + * Note: + */ +static inline int +ipa_phy_set_rx_fifo_entry_flow_ctrl_watermark(void __iomem *fifo_base, + u32 watermark) +{ + u32 tmp; + void __iomem *fifo_reg_addr; + + fifo_reg_addr = fifo_base + IPA_RX_FIFO_FLOW_CTRL; + tmp = readl_relaxed(fifo_reg_addr); + tmp &= 0xFFFF0000UL; + tmp |= watermark; + writel_relaxed(tmp, fifo_reg_addr); + tmp = readl_relaxed(fifo_reg_addr); + + if ((tmp & 0x0000FFFFUL) == watermark) + return 0; + else + return -EIO; +} + +/** + * Description: Get rx fifo entry flow ctrl watermark. + * Input: + * @fifo_base: Need to be get of the FIFO, the base + * address of the FIFO. + * return: + * The value of rx fifo entry watermark. + * Note: + */ +static inline u32 +ipa_phy_get_rx_fifo_entry_flow_ctrl_watermark(void __iomem *fifo_base) +{ + u32 tmp; + + tmp = readl_relaxed(fifo_base + IPA_RX_FIFO_FLOW_CTRL); + + return (tmp & 0x0000FFFF); +} + +/** + * Description: Get rx_axi_read_cmd_sts + * return: + * rx_axi_read_cmd_sts. + * Note: + */ +static inline u32 ipa_phy_get_rx_fifo_axi_sts(void __iomem *fifo_base) +{ + u32 tmp; + + tmp = readl_relaxed(fifo_base + IPA_COMMON_RX_FIFO_AXI_STS); + + return (tmp & 0x00000003); +} +#endif diff --git a/package/wwan/driver/quectel_SRPD_PCIE/src/sipa/sipa_skb_recv.c b/package/wwan/driver/quectel_SRPD_PCIE/src/sipa/sipa_skb_recv.c new file mode 100644 index 000000000..63c60b98b --- /dev/null +++ b/package/wwan/driver/quectel_SRPD_PCIE/src/sipa/sipa_skb_recv.c @@ -0,0 +1,674 @@ +/* + * Copyright (C) 2020 Unisoc Communications Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 4,4,60 )) +#include +#include +#endif + + +#include "../include/sipa.h" +#include "sipa_core.h" +#include "sipa_eth.h" + +#define SIPA_RECV_BUF_LEN 1600 +#define SIPA_RECV_RSVD_LEN 128 + +static int put_recv_array_node(struct sipa_skb_array *p, + struct sk_buff *skb, u64 *dma_addr) +{ + u32 pos; + + if ((p->wp - p->rp) < p->depth) { + pos = p->wp & (p->depth - 1); + p->array[pos].skb = skb; + p->array[pos].dma_addr = *dma_addr; + /* + * Ensure that we put the item to the fifo before + * we update the fifo wp. + */ + smp_wmb(); + p->wp++; + return 0; + } else { + return -1; + } +} + +static int get_recv_array_node(struct sipa_skb_array *p, + struct sk_buff **skb, u64 *dma_addr) +{ + u32 pos; + + if (p->rp != p->wp) { + pos = p->rp & (p->depth -1); + *skb = p->array[pos].skb; + *dma_addr = p->array[pos].dma_addr; + /* + * Ensure that we remove the item from the fifo before + * we update the fifo rp. + */ + smp_wmb(); + p->rp++; + return 0; + } else { + return -1; + } +} + +static int create_recv_array(struct sipa_skb_array *p, u32 depth) +{ + p->array = kzalloc(sizeof(*p->array) * depth, + GFP_KERNEL); + if (!p->array) + return -ENOMEM; + p->rp = 0; + p->wp = 0; + p->depth = depth; + + return 0; +} + +static void destroy_recv_array(struct sipa_skb_array *p) +{ + kfree(p->array); + + p->array = NULL; + p->rp = 0; + p->wp = 0; + p->depth = 0; +} + +static struct sk_buff *alloc_recv_skb(u32 req_len, u8 rsvd) +{ + struct sk_buff *skb; + u32 hr; + struct sipa_core *ctrl = sipa_get_ctrl_pointer(); + + skb = __dev_alloc_skb(req_len + rsvd, GFP_KERNEL | GFP_NOWAIT); + if (!skb) { + dev_err(ctrl->dev, "failed to alloc skb!\n"); + return NULL; + } + + /* save skb ptr to skb->data */ + hr = skb_headroom(skb); + if (hr < rsvd) + skb_reserve(skb, rsvd - hr); + + return skb; +} + +static void sipa_prepare_free_node_init(struct sipa_skb_receiver *receiver, + u32 cnt) +{ + struct sk_buff *skb; + u32 tmp, fail_cnt = 0; + int i; + u32 success_cnt = 0; + u64 dma_addr; + struct sipa_node_description_tag *node; +#if defined (__BIG_ENDIAN_BITFIELD) + struct sipa_node_description_tag node_tmp; +#endif + struct sipa_core *ctrl = sipa_get_ctrl_pointer(); + struct sipa_cmn_fifo_cfg_tag *cmn = receiver->ep->recv_fifo; + + for (i = 0; i < cnt; i++) { + skb = alloc_recv_skb(SIPA_RECV_BUF_LEN, receiver->rsvd); + if (!skb) { + fail_cnt++; + break; + } + + tmp = skb_headroom(skb); + if (unlikely(tmp > SIPA_RECV_RSVD_LEN)) { + tmp -= SIPA_RECV_RSVD_LEN; + skb_put(skb, SIPA_RECV_BUF_LEN - tmp); + skb_push(skb, tmp); + } else { + skb_put(skb, SIPA_RECV_BUF_LEN); + } + + dma_addr = (u64)dma_map_single(ctrl->pci_dev, + skb->head, + SIPA_RECV_BUF_LEN + + skb_headroom(skb), + DMA_FROM_DEVICE); + if (dma_mapping_error(ctrl->pci_dev, (dma_addr_t)dma_addr)) { + dev_kfree_skb_any(skb); + dev_err(ctrl->dev, + "prepare free node dma map err\n"); + fail_cnt++; + break; + } + + node = ctrl->hal_ops.get_rx_fifo_wr(cmn->fifo_id, + ctrl->cmn_fifo_cfg, + i); + if (!node) { + dma_unmap_single(ctrl->pci_dev, dma_addr, + SIPA_RECV_BUF_LEN + + skb_headroom(skb), + DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); + dev_err(ctrl->dev, + "get node fail index = %d\n", i); + fail_cnt++; + break; + } + + dma_addr += ctrl->pcie_mem_offset; +#if defined (__BIG_ENDIAN_BITFIELD) + memset(&node_tmp, 0, sizeof(node_tmp)); + node_tmp.address = dma_addr; + node_tmp.length = skb->len; + node_tmp.offset = skb_headroom(skb); + node_tmp.dst = ctrl->ep.recv_fifo->dst; + node_tmp.src = ctrl->ep.recv_fifo->cur; + node_tmp.intr = 0; + node_tmp.net_id = 0; + node_tmp.err_code = 0; + sipa_set_node_desc((u8 *)node, (u8 *)&node_tmp); +#else + node->address = dma_addr; + node->length = skb->len; + node->offset = skb_headroom(skb); + node->dst = ctrl->ep.recv_fifo->dst; + node->src = ctrl->ep.recv_fifo->cur; + node->intr = 0; + node->net_id = 0; + node->err_code = 0; +#endif + if (dma_addr == 0 || node->address == 0) + pr_info("cnt = %d, i = %d, dma_addr 0x%llx, node->address 0x%llx\n", + cnt, i, dma_addr, (long long unsigned int)node->address); + put_recv_array_node(&receiver->recv_array, skb, &dma_addr); + success_cnt++; + } + if (fail_cnt) + dev_err(ctrl->dev, + "fail_cnt = %d success_cnt = %d\n", + fail_cnt, success_cnt); +} + +static void fill_free_fifo(struct sipa_skb_receiver *receiver, u32 cnt) +{ + struct sk_buff *skb; + u32 tmp, fail_cnt = 0; + int i; + u32 success_cnt = 0, depth; + u64 dma_addr; + struct sipa_node_description_tag *node; +#if defined (__BIG_ENDIAN_BITFIELD) + struct sipa_node_description_tag node_tmp; +#endif + struct sipa_core *ctrl = sipa_get_ctrl_pointer(); + struct sipa_cmn_fifo_cfg_tag *cmn = receiver->ep->recv_fifo; + + depth = cmn->rx_fifo.depth; + if (cnt > (depth - depth / 4)) { +// dev_warn(ctrl->dev, "free node is not enough,need fill %d\n", cnt); + receiver->rx_danger_cnt++; + } + + for (i = 0; i < cnt; i++) { + skb = alloc_recv_skb(SIPA_RECV_BUF_LEN, receiver->rsvd); + if (!skb) { + fail_cnt++; + break; + } + + tmp = skb_headroom(skb); + if (unlikely(tmp > SIPA_RECV_RSVD_LEN)) { + tmp -= SIPA_RECV_RSVD_LEN; + skb_put(skb, SIPA_RECV_BUF_LEN - tmp); + skb_push(skb, tmp); + } else { + skb_put(skb, SIPA_RECV_BUF_LEN); + } + + dma_addr = (u64)dma_map_single(ctrl->pci_dev, + skb->head, + SIPA_RECV_BUF_LEN + + skb_headroom(skb), + DMA_FROM_DEVICE); + if (dma_mapping_error(ctrl->pci_dev, (dma_addr_t)dma_addr)) { + dev_kfree_skb_any(skb); + dev_err(ctrl->dev, + "prepare free node dma map err\n"); + fail_cnt++; + break; + } + node = ctrl->hal_ops.get_rx_fifo_wr(cmn->fifo_id, + ctrl->cmn_fifo_cfg, + i); + if (!node) { + dma_unmap_single(ctrl->pci_dev, dma_addr, + SIPA_RECV_BUF_LEN + + skb_headroom(skb), + DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); + dev_err(ctrl->dev, + "get node fail index = %d\n", i); + fail_cnt++; + break; + } + + dma_addr += ctrl->pcie_mem_offset; +#if defined (__BIG_ENDIAN_BITFIELD) + memset(&node_tmp, 0, sizeof(node_tmp)); + node_tmp.address = dma_addr; + node_tmp.length = skb->len; + node_tmp.offset = skb_headroom(skb); + node_tmp.dst = ctrl->ep.recv_fifo->dst; + node_tmp.src = ctrl->ep.recv_fifo->cur; + node_tmp.intr = 0; + node_tmp.net_id = 0; + node_tmp.err_code = 0; + sipa_set_node_desc((u8 *)node, (u8 *)&node_tmp); +#else + node->address = dma_addr; + node->length = skb->len; + node->offset = skb_headroom(skb); + node->dst = ctrl->ep.recv_fifo->dst; + node->src = ctrl->ep.recv_fifo->cur; + node->intr = 0; + node->net_id = 0; + node->err_code = 0; +#endif + + put_recv_array_node(&receiver->recv_array, skb, &dma_addr); + success_cnt++; + } + + if (success_cnt) { + ctrl->hal_ops.set_rx_fifo_wr(ctrl->pci_dev, + cmn->fifo_id, + ctrl->cmn_fifo_cfg, + success_cnt); + if (atomic_read(&receiver->need_fill_cnt) > 0) + atomic_sub(success_cnt, + &receiver->need_fill_cnt); + } + + if (fail_cnt) + dev_err(ctrl->dev, + "fill free fifo fail_cnt = %d\n", fail_cnt); +} + +static void sipa_fill_free_node(struct sipa_skb_receiver *receiver, u32 cnt) +{ + struct sipa_core *ctrl = sipa_get_ctrl_pointer(); + + ctrl->hal_ops.set_rx_fifo_wr(ctrl->pci_dev, + receiver->ep->recv_fifo->fifo_id, + ctrl->cmn_fifo_cfg, cnt); + + if (atomic_read(&receiver->need_fill_cnt) > 0) + dev_info(ctrl->dev, + "a very serious problem, mem cover may appear\n"); + + atomic_set(&receiver->need_fill_cnt, 0); +} + +static void sipa_receiver_notify_cb(void *priv, enum sipa_irq_evt_type evt, + unsigned long data) +{ + struct sipa_core *ctrl = sipa_get_ctrl_pointer(); + struct sipa_skb_receiver *receiver = (struct sipa_skb_receiver *)priv; + + if (evt & SIPA_RECV_WARN_EVT) { + dev_dbg(ctrl->dev, + "sipa maybe poor resources evt = 0x%x\n", evt); + receiver->tx_danger_cnt++; + } + + sipa_dummy_recv_trigger(); +} + +static void sipa_free_recv_skb(struct sipa_skb_receiver *receiver) +{ + u64 addr = 0; + struct sk_buff *recv_skb = NULL; + while(!get_recv_array_node(&receiver->recv_array, &recv_skb, &addr)) + { + dev_kfree_skb_any(recv_skb); + } +} + +struct sk_buff *sipa_recv_skb(int *netid, int index) +{ + int ret = -1; + u32 retry_cnt = 10; + u64 addr = 0; + struct sk_buff *recv_skb = NULL; +#if defined (__BIG_ENDIAN_BITFIELD) + struct sipa_node_description_tag node; +#else + struct sipa_node_description_tag *node; +#endif + struct sipa_core *ctrl = sipa_get_ctrl_pointer(); + struct sipa_skb_receiver *receiver = ctrl->receiver; + enum sipa_cmn_fifo_index id = receiver->ep->recv_fifo->fifo_id; + + ret = get_recv_array_node(&receiver->recv_array, + &recv_skb, &addr); +read_again: +#if defined (__BIG_ENDIAN_BITFIELD) + sipa_get_node_desc((u8 *)ctrl->hal_ops.get_tx_fifo_rp(id, + ctrl->cmn_fifo_cfg, index), &node); +#else + node = ctrl->hal_ops.get_tx_fifo_rp(id, ctrl->cmn_fifo_cfg, index); +#endif + +#if defined (__BIG_ENDIAN_BITFIELD) + if (!node.address) { +#else + if (!node->address) { +#endif + if (retry_cnt--) { + udelay(1); + goto read_again; + } + +#if defined (__BIG_ENDIAN_BITFIELD) + dev_err(ctrl->dev, "phy addr is null = %llx\n", + (u64)node.address); +#else + dev_err(ctrl->dev, "phy addr is null = %llx\n", + (u64)node->address); +#endif + if(!ret) { + dma_unmap_single(ctrl->pci_dev, (dma_addr_t)(addr - ctrl->pcie_mem_offset), + SIPA_RECV_BUF_LEN + skb_headroom(recv_skb), + DMA_FROM_DEVICE); + dev_kfree_skb_any(recv_skb); + atomic_add(1, &receiver->need_fill_cnt); + ctrl->hal_ops.set_tx_fifo_rp(id, ctrl->cmn_fifo_cfg, 1); + dev_err(ctrl->dev, + "recv addr is null, but recv_array addr:0x%llx\n", + addr); + } + return NULL; + } + + retry_cnt = 10; +check_again: + if (ret) { +#if defined (__BIG_ENDIAN_BITFIELD) + dev_err(ctrl->dev, + "recv addr:0x%llx, but recv_array is empty\n", + (u64)node.address); +#else + dev_err(ctrl->dev, + "recv addr:0x%llx, but recv_array is empty\n", + (u64)node->address); +#endif + return NULL; +#if defined (__BIG_ENDIAN_BITFIELD) + } else if (addr != node.address && retry_cnt) { +#else + } else if (addr != node->address && retry_cnt) { +#endif + retry_cnt--; + udelay(1); +#if defined (__BIG_ENDIAN_BITFIELD) + sipa_get_node_desc((u8 *)ctrl->hal_ops.get_tx_fifo_rp(id, + ctrl->cmn_fifo_cfg, index), &node); +#endif + goto check_again; +#if defined (__BIG_ENDIAN_BITFIELD) + } else if (addr != node.address && !retry_cnt) { +#else + } else if (addr != node->address && !retry_cnt) { +#endif + dma_unmap_single(ctrl->pci_dev, (dma_addr_t)(addr - ctrl->pcie_mem_offset), + SIPA_RECV_BUF_LEN + skb_headroom(recv_skb), + DMA_FROM_DEVICE); + dev_kfree_skb_any(recv_skb); + atomic_add(1, &receiver->need_fill_cnt); + dev_err(ctrl->dev, + "recv addr:0x%llx, but recv_array addr:0x%llx not equal\n", +#if defined (__BIG_ENDIAN_BITFIELD) + (u64)node.address, addr); +#else + (u64)node->address, addr); +#endif + ctrl->hal_ops.set_tx_fifo_rp(id, ctrl->cmn_fifo_cfg, 1); + return NULL; + } + dma_unmap_single(ctrl->pci_dev, (dma_addr_t)(addr - ctrl->pcie_mem_offset), + SIPA_RECV_BUF_LEN + skb_headroom(recv_skb), + DMA_FROM_DEVICE); + + atomic_add(1, &receiver->need_fill_cnt); + if (atomic_read(&receiver->need_fill_cnt) > 0x30) + wake_up(&receiver->fill_recv_waitq); + +#if defined (__BIG_ENDIAN_BITFIELD) + *netid = node.net_id; +#else + *netid = node->net_id; +#endif + return recv_skb; +} +EXPORT_SYMBOL(sipa_recv_skb); + +static int fill_recv_thread(void *data) +{ + int ret; + struct sipa_skb_receiver *receiver = (struct sipa_skb_receiver *)data; + struct sched_param param = {.sched_priority = 92}; + unsigned long flags; + + sched_setscheduler(current, SCHED_RR, ¶m); + + while (!kthread_should_stop()) { + ret = wait_event_interruptible(receiver->fill_recv_waitq, + (atomic_read(&receiver->need_fill_cnt) > 0) || receiver->run == 0); + spin_lock_irqsave(&receiver->exit_lock, flags); + if(receiver->run == 0) { + spin_unlock_irqrestore(&receiver->exit_lock, flags); + break; + } + spin_unlock_irqrestore(&receiver->exit_lock, flags); + if (!ret) + fill_free_fifo(receiver, atomic_read(&receiver->need_fill_cnt)); + } + + sipa_free_recv_skb(receiver); + if (receiver->recv_array.array) + destroy_recv_array(&receiver->recv_array); + + kfree(receiver); + return 0; +} + +bool sipa_check_recv_tx_fifo_empty(void) +{ + struct sipa_core *ctrl = sipa_get_ctrl_pointer(); + enum sipa_cmn_fifo_index id = ctrl->receiver->ep->recv_fifo->fifo_id; + + if (!ctrl->remote_ready) + return true; + + return ctrl->hal_ops.get_tx_empty_status(id, ctrl->cmn_fifo_cfg); +} +EXPORT_SYMBOL(sipa_check_recv_tx_fifo_empty); + +void sipa_receiver_open_cmn_fifo(struct sipa_skb_receiver *receiver) +{ + struct sipa_core *ctrl = sipa_get_ctrl_pointer(); + struct sipa_cmn_fifo_cfg_tag *fifo_cfg = receiver->ep->recv_fifo; + + if (unlikely(!ctrl || !receiver)) { + pr_err("ctrl %p receiver %p not ready\n", ctrl, receiver); + return; + } + + ctrl->hal_ops.open(fifo_cfg->fifo_id, ctrl->cmn_fifo_cfg, NULL); + sipa_fill_free_node(receiver, fifo_cfg->rx_fifo.depth); + + ctrl->hal_ops.set_hw_intr_thres(fifo_cfg->fifo_id, + ctrl->cmn_fifo_cfg, + true, 64, NULL); + /* timeout = 1 / ipa_sys_clk * 1024 * value */ + ctrl->hal_ops.set_hw_intr_timeout(fifo_cfg->fifo_id, + ctrl->cmn_fifo_cfg, + true, 0x32, NULL); + +// ctrl->hal_ops.set_intr_txfifo_full(fifo_cfg->fifo_id, +// ctrl->cmn_fifo_cfg, +// true, NULL); +} +EXPORT_SYMBOL(sipa_receiver_open_cmn_fifo); + +static void sipa_receiver_init(struct sipa_skb_receiver *receiver, u32 rsvd) +{ + u32 depth; + struct sipa_core *ctrl = sipa_get_ctrl_pointer(); + enum sipa_cmn_fifo_index fifo_id = receiver->ep->recv_fifo->fifo_id; + + dev_info(ctrl->dev, + "fifo_id = %d rx_fifo depth = 0x%x\n", + receiver->ep->recv_fifo->fifo_id, + receiver->ep->recv_fifo->rx_fifo.depth); + + ctrl->cmn_fifo_cfg[fifo_id].irq_cb = + (sipa_irq_notify_cb)sipa_receiver_notify_cb; + ctrl->cmn_fifo_cfg[fifo_id].priv = receiver; + + /* reserve space for dma flushing cache issue */ + receiver->rsvd = rsvd; + depth = receiver->ep->recv_fifo->rx_fifo.depth; + + sipa_prepare_free_node_init(receiver, depth); +} + +void sipa_receiver_add_nic(struct sipa_skb_receiver *receiver, + struct sipa_nic *nic) +{ + int i; + unsigned long flags; + + for (i = 0; i < receiver->nic_cnt; i++) + if (receiver->nic_array[i] == nic) + return; + spin_lock_irqsave(&receiver->lock, flags); + if (receiver->nic_cnt < SIPA_NIC_MAX) + receiver->nic_array[receiver->nic_cnt++] = nic; + spin_unlock_irqrestore(&receiver->lock, flags); +} +EXPORT_SYMBOL(sipa_receiver_add_nic); + +void sipa_reinit_recv_array(struct sipa_skb_receiver *receiver) +{ + if (!receiver) { + pr_err("sipa receiver is null\n"); + return; + } + + if (!receiver->recv_array.array) { + pr_err("sipa p->array is null\n"); + return; + } + + receiver->recv_array.rp = 0; + receiver->recv_array.wp = receiver->recv_array.depth; +} + +int create_sipa_skb_receiver(struct sipa_endpoint *ep, + struct sipa_skb_receiver **receiver_pp) +{ + int ret; + struct sipa_skb_receiver *receiver = NULL; + struct sipa_core *ctrl = sipa_get_ctrl_pointer(); + + receiver = kzalloc(sizeof(*receiver), GFP_KERNEL); + if (!receiver) + return -ENOMEM; + + receiver->ep = ep; + receiver->rsvd = SIPA_RECV_RSVD_LEN; + + atomic_set(&receiver->need_fill_cnt, 0); + + ret = create_recv_array(&receiver->recv_array, + receiver->ep->recv_fifo->rx_fifo.depth); + if (ret) { + dev_err(ctrl->dev, + "create_sipa_sipa_receiver: recv_array kzalloc err.\n"); + kfree(receiver); + return -ENOMEM; + } + + spin_lock_init(&receiver->lock); + spin_lock_init(&receiver->exit_lock); + init_waitqueue_head(&receiver->fill_recv_waitq); + + sipa_receiver_init(receiver, SIPA_RECV_RSVD_LEN); + receiver->run = 1; + receiver->fill_thread = kthread_create(fill_recv_thread, receiver, + "sipa-fill"); + if (IS_ERR(receiver->fill_thread)) { + dev_err(ctrl->dev, "Failed to create kthread: ipa-fill\n"); + ret = PTR_ERR(receiver->fill_thread); + kfree(receiver->recv_array.array); + kfree(receiver); + return ret; + } + + wake_up_process(receiver->fill_thread); + + *receiver_pp = receiver; + return 0; +} +EXPORT_SYMBOL(create_sipa_skb_receiver); + +void destroy_sipa_skb_receiver(struct sipa_skb_receiver *receiver) +{ + unsigned long flags; + + spin_lock_irqsave(&receiver->exit_lock, flags); + receiver->run = 0; + wake_up_interruptible_all(&receiver->fill_recv_waitq); + spin_unlock_irqrestore(&receiver->exit_lock, flags); +} +EXPORT_SYMBOL(destroy_sipa_skb_receiver); diff --git a/package/wwan/driver/quectel_SRPD_PCIE/src/sipa/sipa_skb_send.c b/package/wwan/driver/quectel_SRPD_PCIE/src/sipa/sipa_skb_send.c new file mode 100644 index 000000000..f70bd16d3 --- /dev/null +++ b/package/wwan/driver/quectel_SRPD_PCIE/src/sipa/sipa_skb_send.c @@ -0,0 +1,556 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 4,4,60 )) +#include +#include +#endif + + +#include "sipa_phy_v0/sipa_fifo_phy.h" +#include "../include/sipa.h" +#include "sipa_core.h" +#include "sipa_eth.h" + +#define SIPA_RECEIVER_BUF_LEN 1600 + +static void sipa_inform_evt_to_nics(struct sipa_skb_sender *sender, + enum sipa_evt_type evt) +{ + + struct sipa_nic *nic; + unsigned long flags; + spin_lock_irqsave(&sender->nic_lock, flags); + + if(SIPA_LEAVE_FLOWCTRL == evt){ + if(sender->free_notify_net == true){ + pr_info("%s, not leave flowctl, free_notify_net is true\n", __func__); + return; + } + + if(sender->ep_cover_net == true){ + pr_info("%s, not leave flowctl, ep_cover_net is true\n", __func__); + return; + } + + pr_info("%s, leave flowctl\n", __func__); + list_for_each_entry(nic, &sender->nic_list, list) { + if (nic->flow_ctrl_status == true) { + nic->flow_ctrl_status = false; + sipa_nic_notify_evt(nic, evt); + } + } + }else{ + pr_info("%s, enter flowctl\n", __func__); + list_for_each_entry(nic, &sender->nic_list, list) { + if (nic->flow_ctrl_status == false) { + nic->flow_ctrl_status = true; + sipa_nic_notify_evt(nic, evt); + } + } + } + + spin_unlock_irqrestore(&sender->nic_lock, flags); +} + +static void sipa_sender_notify_cb(void *priv, enum sipa_irq_evt_type evt, + unsigned long data) +{ + unsigned long flags; + struct sipa_skb_sender *sender = (struct sipa_skb_sender *)priv; + struct sipa_core *ctrl = sipa_get_ctrl_pointer(); + + if (evt & SIPA_RECV_WARN_EVT) { + dev_err(ctrl->dev, + "sipa overflow on ep\n"); + sender->no_free_cnt++; + } + + if (evt & SIPA_IRQ_ENTER_FLOW_CTRL) { + spin_lock_irqsave(&sender->send_lock, flags); + pr_info("sipa_sender_notify_cb set ep_cover_net true!!!!!\n"); + sender->enter_flow_ctrl_cnt++; + sender->ep_cover_net = true; + sipa_inform_evt_to_nics(sender, SIPA_ENTER_FLOWCTRL); + spin_unlock_irqrestore(&sender->send_lock, flags); + } + + if (evt & SIPA_IRQ_EXIT_FLOW_CTRL) { + spin_lock_irqsave(&sender->send_lock, flags); + sender->exit_flow_ctrl_cnt++; + sender->ep_cover_net = false; + sipa_inform_evt_to_nics(sender, SIPA_LEAVE_FLOWCTRL); + spin_unlock_irqrestore(&sender->send_lock, flags); + } + wake_up(&sender->free_waitq); +} + +static void sipa_free_sent_items(struct sipa_skb_sender *sender) +{ + bool status = false; + unsigned long flags; + u32 i, num, success_cnt = 0, retry_cnt = 10, failed_cnt = 0; + struct sipa_skb_dma_addr_node *iter, *_iter; +#if defined (__BIG_ENDIAN_BITFIELD) + struct sipa_node_description_tag node; +#else + struct sipa_node_description_tag *node; +#endif + struct sipa_core *ctrl = sipa_get_ctrl_pointer(); + enum sipa_cmn_fifo_index id = sender->ep->send_fifo->fifo_id; + u32 tx_wr, tx_rd, rx_wr, rx_rd; + int exit_flow = 0; + struct sipa_cmn_fifo_cfg_tag *fifo_cfg; + void __iomem *fifo_base; + u32 clr_sts = 0; + u32 int_status = 0; + u32 read_count = 0; + + num = ctrl->hal_ops.recv_node_from_tx_fifo(ctrl->dev, id, + ctrl->cmn_fifo_cfg, -1); + for (i = 0; i < num; i++) { + retry_cnt = 10; +#if defined (__BIG_ENDIAN_BITFIELD) + sipa_get_node_desc((u8 *)ctrl->hal_ops.get_tx_fifo_rp(id, + ctrl->cmn_fifo_cfg, i), &node); +#else + node = ctrl->hal_ops.get_tx_fifo_rp(id, ctrl->cmn_fifo_cfg, i); +#endif + +#if defined (__BIG_ENDIAN_BITFIELD) + if (node.err_code) + dev_err(ctrl->dev, "have node transfer err = %d\n", + node.err_code); +#else + if (node->err_code) + dev_err(ctrl->dev, "have node transfer err = %d\n", + node->err_code); +#endif + +check_again: + spin_lock_irqsave(&sender->send_lock, flags); + if (list_empty(&sender->sending_list)) { + ctrl->hal_ops.get_rx_ptr(SIPA_FIFO_PCIE_UL, ctrl->cmn_fifo_cfg, &rx_wr, &rx_rd); + ctrl->hal_ops.get_tx_ptr(SIPA_FIFO_PCIE_UL, ctrl->cmn_fifo_cfg, &tx_wr, &tx_rd); + dev_err(ctrl->dev, "fifo id %d: send list is empty, old tx_wr=%x tx_rd=%x, rx_wr=%x, rx_rd=%x, left_cnt=%d\n", + sender->ep->send_fifo->fifo_id, tx_wr, tx_rd, rx_wr, rx_rd, atomic_read(&sender->left_cnt)); + + spin_unlock_irqrestore(&sender->send_lock, flags); + goto sipa_free_end; + } + + list_for_each_entry_safe(iter, _iter, &sender->sending_list, list) { +#if defined (__BIG_ENDIAN_BITFIELD) + if (iter->dma_addr == node.address) { +#else + if (iter->dma_addr == node->address) { +#endif + list_del(&iter->list); + list_add_tail(&iter->list, + &sender->pair_free_list); + status = true; + break; + } + } + spin_unlock_irqrestore(&sender->send_lock, flags); + + if (status) { + dma_unmap_single(ctrl->pci_dev, + (dma_addr_t)(iter->dma_addr - ctrl->pcie_mem_offset), + iter->skb->len + + skb_headroom(iter->skb), + DMA_TO_DEVICE); + + dev_kfree_skb_any(iter->skb); + success_cnt++; + status = false; + } else { + if (retry_cnt--) { +#if defined (__BIG_ENDIAN_BITFIELD) + sipa_get_node_desc((u8 *)ctrl->hal_ops.get_tx_fifo_rp(id, + ctrl->cmn_fifo_cfg, i), &node); +#endif + //dev_err(ctrl->dev, "free send skb warning, retry_cnt = %d\n", retry_cnt); + goto check_again; + } + failed_cnt++; + } + } + if(failed_cnt >0){ + dev_err(ctrl->dev, "can't find matching nodes num=%d\n", failed_cnt); + } + + ctrl->hal_ops.set_tx_fifo_rp(id, ctrl->cmn_fifo_cfg, i); + atomic_add(success_cnt, &sender->left_cnt); + if (num != success_cnt) + dev_err(ctrl->dev, "recv num = %d release num = %d\n", num, success_cnt); + +sipa_free_end: + if (sender->free_notify_net && atomic_read(&sender->left_cnt) > sender->ep->send_fifo->rx_fifo.depth / 4) { + sender->free_notify_net = false; + exit_flow = 1; + } + + if(sender->ep_cover_net == true){ + fifo_cfg = ctrl->cmn_fifo_cfg + sender->ep->send_fifo->fifo_id; + fifo_base = fifo_cfg->fifo_reg_base; + + int_status = ipa_phy_get_fifo_all_int_sts(fifo_base); + + if (int_status & IPA_INT_EXIT_FLOW_CTRL_STS) { + exit_flow = 1; + sender->ep_cover_net = false; + clr_sts |= IPA_EXIT_FLOW_CONTROL_CLR_BIT; + ipa_phy_clear_int(fifo_base, clr_sts); + pr_info("%s, exit flow control\n", __func__); + }else{ + pr_info("%s, still in flow control\n", __func__); + } + } + + if(exit_flow == 1){ + spin_lock_irqsave(&sender->send_lock, flags); + sipa_inform_evt_to_nics(sender, SIPA_LEAVE_FLOWCTRL); + spin_unlock_irqrestore(&sender->send_lock, flags); + } +} + +static bool sipa_sender_ck_unfree(struct sipa_skb_sender *sender) +{ + struct sipa_core *ctrl = sipa_get_ctrl_pointer(); + enum sipa_cmn_fifo_index id = sender->ep->send_fifo->fifo_id; + if (!ctrl->remote_ready) { + printk("%s: need wait remote_ready!\n", __func__); + return false; + } + + if (ctrl->hal_ops.get_tx_empty_status(id, ctrl->cmn_fifo_cfg)) { + ctrl->hal_ops.clr_tout_th_intr(id, ctrl->cmn_fifo_cfg); + ctrl->hal_ops.set_intr_eb(id, ctrl->cmn_fifo_cfg, true, + SIPA_FIFO_THRESHOLD_IRQ_EN | + SIPA_FIFO_DELAY_TIMER_IRQ_EN); + return false; + } else { + return true; + } +} + +static void sipa_free_send_skb(struct sipa_skb_sender *sender) +{ + struct sipa_skb_dma_addr_node *iter, *_iter; + struct sipa_core *ctrl = sipa_get_ctrl_pointer(); + unsigned long flags; + + spin_lock_irqsave(&sender->send_lock, flags); + if (list_empty(&sender->sending_list)) { + dev_err(ctrl->dev, "fifo id %d: send list is empty\n", sender->ep->send_fifo->fifo_id); + spin_unlock_irqrestore(&sender->send_lock, flags); + return; + } + + list_for_each_entry_safe(iter, _iter, &sender->sending_list, list) { + list_del(&iter->list); + list_add_tail(&iter->list, &sender->pair_free_list); + dma_unmap_single(ctrl->pci_dev, (dma_addr_t)(iter->dma_addr - ctrl->pcie_mem_offset), + iter->skb->len + skb_headroom(iter->skb), DMA_TO_DEVICE); + dev_kfree_skb_any(iter->skb); + } + spin_unlock_irqrestore(&sender->send_lock, flags); +} + +static int sipa_free_thread(void *data) +{ + struct sipa_skb_sender *sender = (struct sipa_skb_sender *)data; + struct sched_param param = {.sched_priority = 90}; + struct sipa_core *ctrl = sipa_get_ctrl_pointer(); + enum sipa_cmn_fifo_index id = sender->ep->send_fifo->fifo_id; + unsigned long flags; + struct sipa_nic *iter, *_iter; + sched_setscheduler(current, SCHED_RR, ¶m); + + while (!kthread_should_stop()) { + wait_event_interruptible(sender->free_waitq, + sender->free_notify_net || sender->run == 0 || sender->ep_cover_net || + sipa_sender_ck_unfree(sender)); + + spin_lock_irqsave(&sender->exit_lock, flags); + if(sender->run == 0) { + spin_unlock_irqrestore(&sender->exit_lock, flags); + break; + } + spin_unlock_irqrestore(&sender->exit_lock, flags); + sipa_free_sent_items(sender); + + if (!ctrl->hal_ops.get_tx_empty_status(id, ctrl->cmn_fifo_cfg)) { + usleep_range(100, 200); + //pr_info("%s, not empty\n", __func__); + } + } + + sipa_free_send_skb(sender); + kfree(sender->pair_cache); + + list_for_each_entry_safe(iter, _iter, &sender->nic_list, list) { + list_del(&iter->list); + kfree(iter); + } + + kfree(sender); + + return 0; +} + +void sipa_sender_open_cmn_fifo(struct sipa_skb_sender *sender) +{ + struct sipa_core *ctrl = sipa_get_ctrl_pointer(); + struct sipa_cmn_fifo_cfg_tag *fifo_cfg = sender->ep->send_fifo; + + if (unlikely(!ctrl || !sender)) { + pr_err("ctrl %p sender %p not ready\n", ctrl, sender); + return; + } + + fifo_cfg->irq_cb = (sipa_irq_notify_cb)sipa_sender_notify_cb; + fifo_cfg->priv = sender; + ctrl->hal_ops.open(fifo_cfg->fifo_id, ctrl->cmn_fifo_cfg, NULL); + + ctrl->hal_ops.set_hw_intr_thres(fifo_cfg->fifo_id, + ctrl->cmn_fifo_cfg, true, + 128, NULL); + ctrl->hal_ops.set_hw_intr_timeout(fifo_cfg->fifo_id, ctrl->cmn_fifo_cfg, + true, 0x64, NULL); + + ctrl->hal_ops.set_intr_txfifo_full(fifo_cfg->fifo_id, + ctrl->cmn_fifo_cfg, true, NULL); +} +EXPORT_SYMBOL(sipa_sender_open_cmn_fifo); + +int create_sipa_skb_sender(struct sipa_endpoint *ep, + struct sipa_skb_sender **sender_pp) +{ + int i, ret; + struct sipa_skb_sender *sender = NULL; + struct sipa_core *ctrl = sipa_get_ctrl_pointer(); + + dev_info(ctrl->dev, "sender create start\n"); + sender = kzalloc(sizeof(*sender), GFP_KERNEL); + if (!sender) { + dev_err(ctrl->dev, "alloc sender failed\n"); + return -ENOMEM; + } + + sender->pair_cache = kcalloc(ep->send_fifo->rx_fifo.depth, + sizeof(struct sipa_skb_dma_addr_node), + GFP_KERNEL); + if (!sender->pair_cache) { + dev_err(ctrl->dev, "alloc sender->pair_cache fail\n"); + kfree(sender); + return -ENOMEM; + } + + INIT_LIST_HEAD(&sender->nic_list); + INIT_LIST_HEAD(&sender->sending_list); + INIT_LIST_HEAD(&sender->pair_free_list); + spin_lock_init(&sender->nic_lock); + spin_lock_init(&sender->send_lock); + spin_lock_init(&sender->exit_lock); + for (i = 0; i < ep->send_fifo->rx_fifo.depth; i++) + list_add_tail(&((sender->pair_cache + i)->list), + &sender->pair_free_list); + + sender->ep = ep; + + atomic_set(&sender->left_cnt, ep->send_fifo->rx_fifo.depth / 4 * 3); + + init_waitqueue_head(&sender->free_waitq); + sender->run = 1; + sender->free_thread = kthread_create(sipa_free_thread, sender, + "sipa-free"); + if (IS_ERR(sender->free_thread)) { + dev_err(ctrl->dev, "Failed to create kthread: ipa-free\n"); + ret = PTR_ERR(sender->free_thread); + kfree(sender->pair_cache); + kfree(sender); + return ret; + } + + *sender_pp = sender; + wake_up_process(sender->free_thread); + return 0; +} +EXPORT_SYMBOL(create_sipa_skb_sender); + +void destroy_sipa_skb_sender(struct sipa_skb_sender *sender) +{ + unsigned long flags; + + spin_lock_irqsave(&sender->exit_lock, flags); + sender->run = 0; + wake_up_interruptible_all(&sender->free_waitq); + spin_unlock_irqrestore(&sender->exit_lock, flags); +} +EXPORT_SYMBOL(destroy_sipa_skb_sender); + +void sipa_skb_sender_add_nic(struct sipa_skb_sender *sender, + struct sipa_nic *nic) +{ + unsigned long flags; + + spin_lock_irqsave(&sender->nic_lock, flags); + list_add_tail(&nic->list, &sender->nic_list); + spin_unlock_irqrestore(&sender->nic_lock, flags); +} +EXPORT_SYMBOL(sipa_skb_sender_add_nic); + +void sipa_skb_sender_remove_nic(struct sipa_skb_sender *sender, + struct sipa_nic *nic) +{ + unsigned long flags; + + spin_lock_irqsave(&sender->nic_lock, flags); + list_del(&nic->list); + spin_unlock_irqrestore(&sender->nic_lock, flags); +} +EXPORT_SYMBOL(sipa_skb_sender_remove_nic); + +int sipa_skb_sender_send_data(struct sipa_skb_sender *sender, + struct sk_buff *skb, + enum sipa_term_type dst, + u8 netid) +{ + unsigned long flags; + u64 dma_addr; + struct sipa_skb_dma_addr_node *node; +#if defined (__BIG_ENDIAN_BITFIELD) + struct sipa_node_description_tag des; +#else + struct sipa_node_description_tag *des; +#endif + struct sipa_core *ctrl = sipa_get_ctrl_pointer(); + struct sipa_cmn_fifo_cfg_tag *fifo_cfg; + void __iomem *fifo_base; + u32 clr_sts = 0; + u32 int_status = 0; + + spin_lock_irqsave(&sender->send_lock, flags); + + if (sender->ep_cover_net == true){ + pr_info("%s, ep_cover_net is true, so return EAGAIN\n", __func__); + spin_unlock_irqrestore(&sender->send_lock, flags); + wake_up(&sender->free_waitq); + return -EAGAIN; + }else{ + fifo_cfg = ctrl->cmn_fifo_cfg + sender->ep->send_fifo->fifo_id; + fifo_base = fifo_cfg->fifo_reg_base; + int_status = ipa_phy_get_fifo_all_int_sts(fifo_base); + if(int_status == 0x5FF000){ + pr_err("%s: check sts failed, maybe ep is down\n", __func__); + spin_unlock_irqrestore(&sender->send_lock, flags); + return -EINPROGRESS; + } + + if (int_status & IPA_INT_ENTER_FLOW_CTRL_STS) { + pr_info("sipa_skb_sender_send_data set ep_cover_net true!!!!!\n"); + sender->ep_cover_net = true; + sender->enter_flow_ctrl_cnt++; + clr_sts |= IPA_ENTRY_FLOW_CONTROL_CLR_BIT; + ipa_phy_clear_int(fifo_base, clr_sts); + sipa_inform_evt_to_nics(sender, SIPA_ENTER_FLOWCTRL); + spin_unlock_irqrestore(&sender->send_lock, flags); + wake_up(&sender->free_waitq); + return -EAGAIN; + } + } + + if (sender->free_notify_net == true){ + pr_info("%s: free_notify_net is true, so return EAGAIN\n", __func__); + spin_unlock_irqrestore(&sender->send_lock, flags); + wake_up(&sender->free_waitq); + return -EAGAIN; + } + + if (!atomic_read(&sender->left_cnt)) { + sender->no_free_cnt++; + sender->free_notify_net = true; + sipa_inform_evt_to_nics(sender, SIPA_ENTER_FLOWCTRL); + spin_unlock_irqrestore(&sender->send_lock, flags); + wake_up(&sender->free_waitq); + return -EAGAIN; + } + + dma_addr = (u64)dma_map_single(ctrl->pci_dev, skb->head, + skb->len + skb_headroom(skb), + DMA_TO_DEVICE); + + if (unlikely(dma_mapping_error(ctrl->pci_dev, (dma_addr_t)dma_addr))) { + sender->free_notify_net = true; + sipa_inform_evt_to_nics(sender, SIPA_ENTER_FLOWCTRL); + spin_unlock_irqrestore(&sender->send_lock, flags); + wake_up(&sender->free_waitq); + return -EAGAIN; + } + + dma_addr += ctrl->pcie_mem_offset; +#if defined (__BIG_ENDIAN_BITFIELD) + memset(&des, 0, sizeof(des)); + des.address = dma_addr; + des.length = skb->len; + des.offset = skb_headroom(skb); + des.net_id = netid; + des.dst = dst; + des.src = sender->ep->send_fifo->cur; + des.err_code = 0; + des.intr = 0; + sipa_set_node_desc((u8 *)ctrl->hal_ops.get_rx_fifo_wr(sender->ep->send_fifo->fifo_id, + ctrl->cmn_fifo_cfg, 0), (u8 *)&des); +#else + des = ctrl->hal_ops.get_rx_fifo_wr(sender->ep->send_fifo->fifo_id, + ctrl->cmn_fifo_cfg, 0); + des->address = dma_addr; + des->length = skb->len; + des->offset = skb_headroom(skb); + des->net_id = netid; + des->dst = dst; + des->src = sender->ep->send_fifo->cur; + des->err_code = 0; + des->intr = 0; +#endif + node = list_first_entry(&sender->pair_free_list, + struct sipa_skb_dma_addr_node, + list); + node->skb = skb; + node->dma_addr = dma_addr; + list_del(&node->list); + list_add_tail(&node->list, &sender->sending_list); + ctrl->hal_ops.set_rx_fifo_wr(ctrl->pci_dev, + sender->ep->send_fifo->fifo_id, + ctrl->cmn_fifo_cfg, 1); + atomic_dec(&sender->left_cnt); + spin_unlock_irqrestore(&sender->send_lock, flags); + + return 0; +} +EXPORT_SYMBOL(sipa_skb_sender_send_data); diff --git a/package/wwan/driver/quectel_SRPD_PCIE/src/sipc/Kconfig b/package/wwan/driver/quectel_SRPD_PCIE/src/sipc/Kconfig new file mode 100644 index 000000000..9fe141954 --- /dev/null +++ b/package/wwan/driver/quectel_SRPD_PCIE/src/sipc/Kconfig @@ -0,0 +1,26 @@ +menu "SIPC modules" + +config SPRD_SIPC + bool "Sprd IPC" + default n + select GENERIC_ALLOCATOR + help + SIPC is a module for spreadtrum AP/CP communicaiton system. + +config SPRD_SIPC_SPIPE + bool "SPRD pipe driver based on SBUF" + default n + depends on SPRD_SIPC + help + This driver is a pipe driver base on SBUF, which create + general pipes between AP & CP. + +config SPRD_SIPC_SPOOL + bool "SPRD pool driver based on SBLOCK" + default n + depends on SPRD_SIPC + help + This driver is a pool driver base on SBLOCK, which create + general pools between AP & CP. + +endmenu diff --git a/package/wwan/driver/quectel_SRPD_PCIE/src/sipc/Makefile b/package/wwan/driver/quectel_SRPD_PCIE/src/sipc/Makefile new file mode 100644 index 000000000..caed73a4c --- /dev/null +++ b/package/wwan/driver/quectel_SRPD_PCIE/src/sipc/Makefile @@ -0,0 +1,6 @@ +ccflags-y += -DCONFIG_SPRD_PCIE_EP_DEVICE -DCONFIG_SPRD_SIPA + +obj-y += sipc.o smsg.o smem.o sbuf.o sblock.o sipc_debugfs.o +obj-y += spipe.o +obj-y += spool.o + diff --git a/package/wwan/driver/quectel_SRPD_PCIE/src/sipc/sblock.c b/package/wwan/driver/quectel_SRPD_PCIE/src/sipc/sblock.c new file mode 100644 index 000000000..730f531fd --- /dev/null +++ b/package/wwan/driver/quectel_SRPD_PCIE/src/sipc/sblock.c @@ -0,0 +1,1911 @@ +/* + * Copyright (C) 2019 Spreadtrum Communications Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 4,4,60 )) +#include +#include +#endif + +#include "../include/sipc.h" +#include "sipc_priv.h" +#include "sblock.h" + +static struct sblock_mgr *sblocks[SIPC_ID_NR][SMSG_VALID_CH_NR]; + +/* put one block pack to the pool */ +void sblock_put(u8 dst, u8 channel, struct sblock *blk) +{ + struct sblock_mgr *sblock; + struct sblock_ring *ring; + unsigned long flags; + int txpos; + int index; + u8 ch_index; + struct sblock_ring_header_op *poolhd_op; + + ch_index = sipc_channel2index(channel); + if (ch_index == INVALID_CHANEL_INDEX) { + pr_err("%s:channel %d invalid!\n", __func__, channel); + return; + } + + sblock = sblocks[dst][ch_index]; + if (!sblock) + return; + + ring = sblock->ring; + poolhd_op = &(ring->header_op.poolhd_op); + + spin_lock_irqsave(&ring->p_txlock, flags); + txpos = sblock_get_ringpos(*(poolhd_op->tx_rd_p) - 1, + poolhd_op->tx_count); + ring->p_txblks[txpos].addr = blk->addr - + sblock->smem_virt + + sblock->stored_smem_addr; + ring->p_txblks[txpos].length = poolhd_op->tx_size; + *(poolhd_op->tx_rd_p) = *(poolhd_op->tx_rd_p) - 1; + if ((int)(*(poolhd_op->tx_wt_p) - *(poolhd_op->tx_rd_p)) == 1) + wake_up_interruptible_all(&(ring->getwait)); + index = sblock_get_index((blk->addr - ring->txblk_virt), + sblock->txblksz); + ring->txrecord[index] = SBLOCK_BLK_STATE_DONE; + + spin_unlock_irqrestore(&ring->p_txlock, flags); + + /* set write mask. */ + spin_lock_irqsave(&ring->poll_lock, flags); + ring->poll_mask |= POLLOUT | POLLWRNORM; + spin_unlock_irqrestore(&ring->poll_lock, flags); + + /* request in sblock_get, release here */ + sipc_smem_release_resource(ring->tx_pms, dst); +} +EXPORT_SYMBOL_GPL(sblock_put); + +static bool sblock_has_data(struct sblock_mgr *sblock, bool tx) +{ + struct sblock_ring_header_op *poolhd_op; + struct sblock_ring_header_op *ringhd_op; + struct sblock_ring *ring = sblock->ring; + bool has_data; + unsigned long flags; + + /* if it is local share memory, + * check the read and write point directly. + */ + if (smsg_ipcs[sblock->dst]->smem_type == SMEM_LOCAL) { + if (tx) { + poolhd_op = &ring->header_op.poolhd_op; + return *(poolhd_op->tx_rd_p) != *(poolhd_op->tx_wt_p); + } + + ringhd_op = &ring->header_op.ringhd_op; + return *(ringhd_op->rx_wt_p) != *(ringhd_op->rx_rd_p); + } + + /* + * if it is remote share memmory read the poll_mask, this situation + * requires that the poll_mask must be accurate enough. + */ + spin_lock_irqsave(&ring->poll_lock, flags); + if (tx) + has_data = ring->poll_mask & (POLLOUT | POLLWRNORM); + else + has_data = ring->poll_mask & (POLLIN | POLLRDNORM); + spin_unlock_irqrestore(&ring->poll_lock, flags); + + return has_data; +} + +/* clean rings and recover pools */ +static int sblock_recover(u8 dst, u8 channel) +{ + struct sblock_mgr *sblock; + struct sblock_ring *ring = NULL; + struct sblock_ring_header_op *poolhd_op; + struct sblock_ring_header_op *ringhd_op; + unsigned long pflags, qflags; + int i, j, rval; + u8 ch_index; + + ch_index = sipc_channel2index(channel); + if (ch_index == INVALID_CHANEL_INDEX) { + pr_err("%s:channel %d invalid!\n", __func__, channel); + return -EINVAL; + } + + sblock = sblocks[dst][ch_index]; + if (!sblock) + return -ENODEV; + + ring = sblock->ring; + poolhd_op = &(ring->header_op.poolhd_op); + ringhd_op = &(ring->header_op.ringhd_op); + + sblock->state = SBLOCK_STATE_IDLE; + wake_up_interruptible_all(&ring->getwait); + wake_up_interruptible_all(&ring->recvwait); + + /* must request resource before read or write share memory */ + rval = sipc_smem_request_resource(ring->rx_pms, dst, -1); + if (rval < 0) + return rval; + + spin_lock_irqsave(&ring->r_txlock, pflags); + /* clean txblks ring */ + *(ringhd_op->tx_wt_p) = *(ringhd_op->tx_rd_p); + + spin_lock_irqsave(&ring->p_txlock, qflags); + /* recover txblks pool */ + *(poolhd_op->tx_rd_p) = *(poolhd_op->tx_wt_p); + for (i = 0, j = 0; i < poolhd_op->tx_count; i++) { + if (ring->txrecord[i] == SBLOCK_BLK_STATE_DONE) { + ring->p_txblks[j].addr = i * sblock->txblksz + + poolhd_op->tx_addr; + ring->p_txblks[j].length = sblock->txblksz; + *(poolhd_op->tx_wt_p) = *(poolhd_op->tx_wt_p) + 1; + j++; + } + } + spin_unlock_irqrestore(&ring->p_txlock, qflags); + spin_unlock_irqrestore(&ring->r_txlock, pflags); + + spin_lock_irqsave(&ring->r_rxlock, pflags); + /* clean rxblks ring */ + *(ringhd_op->rx_rd_p) = *(ringhd_op->rx_wt_p); + + spin_lock_irqsave(&ring->p_rxlock, qflags); + /* recover rxblks pool */ + *(poolhd_op->rx_wt_p) = *(poolhd_op->rx_rd_p); + for (i = 0, j = 0; i < poolhd_op->rx_count; i++) { + if (ring->rxrecord[i] == SBLOCK_BLK_STATE_DONE) { + ring->p_rxblks[j].addr = i * sblock->rxblksz + + poolhd_op->rx_addr; + ring->p_rxblks[j].length = sblock->rxblksz; + *(poolhd_op->rx_wt_p) = *(poolhd_op->rx_wt_p) + 1; + j++; + } + } + spin_unlock_irqrestore(&ring->p_rxlock, qflags); + spin_unlock_irqrestore(&ring->r_rxlock, pflags); + + /* restore write mask. */ + spin_lock_irqsave(&ring->poll_lock, qflags); + ring->poll_mask |= POLLOUT | POLLWRNORM; + spin_unlock_irqrestore(&ring->poll_lock, qflags); + + /* release resource */ + sipc_smem_release_resource(ring->rx_pms, dst); + + return 0; +} + +static int sblock_host_init(struct smsg_ipc *sipc, struct sblock_mgr *sblock, + u32 txblocknum, u32 txblocksize, + u32 rxblocknum, u32 rxblocksize) +{ + volatile struct sblock_ring_header *ringhd; + volatile struct sblock_ring_header *poolhd; + struct sblock_ring_header_op *ringhd_op; + struct sblock_ring_header_op *poolhd_op; + + u32 hsize; + int i, rval = -ENOMEM; + phys_addr_t offset = 0; + u8 dst = sblock->dst; + + txblocksize = ALIGN(txblocksize, SBLOCK_ALIGN_BYTES); + rxblocksize = ALIGN(rxblocksize, SBLOCK_ALIGN_BYTES); + sblock->txblksz = txblocksize; + sblock->rxblksz = rxblocksize; + sblock->txblknum = txblocknum; + sblock->rxblknum = rxblocknum; + + pr_debug("%s: channel %d-%d, txblocksize=%#x, rxblocksize=%#x!\n", + __func__, + sblock->dst, + sblock->channel, + txblocksize, + rxblocksize); + + pr_debug("%s: channel %d-%d, txblocknum=%#x, rxblocknum=%#x!\n", + __func__, + sblock->dst, + sblock->channel, + txblocknum, + rxblocknum); + + /* allocate smem */ + hsize = sizeof(struct sblock_header); + /* for header*/ + sblock->smem_size = hsize + + /* for blks */ + txblocknum * txblocksize + rxblocknum * rxblocksize + + /* for ring */ + (txblocknum + rxblocknum) * sizeof(struct sblock_blks) + + /* for pool */ + (txblocknum + rxblocknum) * sizeof(struct sblock_blks); + + sblock->smem_addr = smem_alloc(dst, sblock->smem_size); + if (!sblock->smem_addr) { + pr_err("%s: channel %d-%d, Failed to alloc smem for sblock\n", + __func__, + sblock->dst, + sblock->channel); + return -ENOMEM; + } + + pr_debug("%s: channel %d-%d, smem_addr=%#lx, smem_size=%#x!\n", + __func__, + sblock->dst, + sblock->channel, + (unsigned long)(sblock->smem_addr + offset), + sblock->smem_size); + + sblock->dst_smem_addr = sblock->smem_addr - + sipc->smem_base + sipc->dst_smem_base; + + /* in host mode, it is client physial address. */ + sblock->stored_smem_addr = sblock->dst_smem_addr; + +#ifdef CONFIG_PHYS_ADDR_T_64BIT + offset = sipc->high_offset; + offset = offset << 32; +#endif + pr_debug("%s: channel %d-%d, offset = 0x%lx!\n", + __func__, + sblock->dst, + sblock->channel, + (unsigned long)offset); + sblock->smem_virt = shmem_ram_vmap_nocache(dst, + sblock->smem_addr + offset, + sblock->smem_size); + if (!sblock->smem_virt) { + pr_err("%s: channel %d-%d, Failed to map smem for sblock\n", + __func__, + sblock->dst, + sblock->channel); + goto sblock_host_smem_free; + } + + /* alloc ring */ + sblock->ring->txrecord = kcalloc(txblocknum, sizeof(int), GFP_KERNEL); + if (!sblock->ring->txrecord) + goto sblock_host_unmap; + + sblock->ring->rxrecord = kcalloc(rxblocknum, sizeof(int), GFP_KERNEL); + if (!sblock->ring->rxrecord) + goto sblock_host_tx_free; + + /* must request resource before read or write share memory */ + rval = sipc_smem_request_resource(sipc->sipc_pms, sipc->dst, -1); + if (rval < 0) + goto sblock_host_rx_free; + + /* initialize header */ + ringhd = (volatile struct sblock_ring_header *)(sblock->smem_virt); + ringhd->txblk_addr = sblock->stored_smem_addr + hsize; + ringhd->txblk_count = txblocknum; + ringhd->txblk_size = txblocksize; + ringhd->txblk_rdptr = 0; + ringhd->txblk_wrptr = 0; + ringhd->txblk_blks = ringhd->txblk_addr + + txblocknum * txblocksize + rxblocknum * rxblocksize; + ringhd->rxblk_addr = ringhd->txblk_addr + txblocknum * txblocksize; + ringhd->rxblk_count = rxblocknum; + ringhd->rxblk_size = rxblocksize; + ringhd->rxblk_rdptr = 0; + ringhd->rxblk_wrptr = 0; + ringhd->rxblk_blks = ringhd->txblk_blks + + txblocknum * sizeof(struct sblock_blks); + + poolhd = ringhd + 1; + poolhd->txblk_addr = sblock->stored_smem_addr + hsize; + poolhd->txblk_count = txblocknum; + poolhd->txblk_size = txblocksize; + poolhd->txblk_rdptr = 0; + poolhd->txblk_wrptr = 0; + poolhd->txblk_blks = ringhd->rxblk_blks + + rxblocknum * sizeof(struct sblock_blks); + poolhd->rxblk_addr = ringhd->txblk_addr + txblocknum * txblocksize; + poolhd->rxblk_count = rxblocknum; + poolhd->rxblk_size = rxblocksize; + poolhd->rxblk_rdptr = 0; + poolhd->rxblk_wrptr = 0; + poolhd->rxblk_blks = poolhd->txblk_blks + + txblocknum * sizeof(struct sblock_blks); + + pr_debug("%s: channel %d-%d, int ring!\n", + __func__, + sblock->dst, + sblock->channel); + + /* initialize ring */ + sblock->ring->header = sblock->smem_virt; + sblock->ring->txblk_virt = sblock->smem_virt + + (ringhd->txblk_addr - sblock->stored_smem_addr); + sblock->ring->r_txblks = sblock->smem_virt + + (ringhd->txblk_blks - sblock->stored_smem_addr); + sblock->ring->rxblk_virt = sblock->smem_virt + + (ringhd->rxblk_addr - sblock->stored_smem_addr); + sblock->ring->r_rxblks = sblock->smem_virt + + (ringhd->rxblk_blks - sblock->stored_smem_addr); + sblock->ring->p_txblks = sblock->smem_virt + + (poolhd->txblk_blks - sblock->stored_smem_addr); + sblock->ring->p_rxblks = sblock->smem_virt + + (poolhd->rxblk_blks - sblock->stored_smem_addr); + + for (i = 0; i < txblocknum; i++) { + sblock->ring->p_txblks[i].addr = poolhd->txblk_addr + + i * txblocksize; + sblock->ring->p_txblks[i].length = txblocksize; + sblock->ring->txrecord[i] = SBLOCK_BLK_STATE_DONE; + poolhd->txblk_wrptr++; + } + for (i = 0; i < rxblocknum; i++) { + sblock->ring->p_rxblks[i].addr = poolhd->rxblk_addr + + i * rxblocksize; + sblock->ring->p_rxblks[i].length = rxblocksize; + sblock->ring->rxrecord[i] = SBLOCK_BLK_STATE_DONE; + poolhd->rxblk_wrptr++; + } + + /* init, set write mask. */ + sblock->ring->poll_mask = POLLOUT | POLLWRNORM; + + /* init header op */ + ringhd_op = &((sblock->ring->header_op).ringhd_op); + ringhd_op->tx_rd_p = &ringhd->txblk_rdptr; + ringhd_op->tx_wt_p = &ringhd->txblk_wrptr; + ringhd_op->rx_rd_p = &ringhd->rxblk_rdptr; + ringhd_op->rx_wt_p = &ringhd->rxblk_wrptr; + ringhd_op->tx_addr = ringhd->txblk_addr; + ringhd_op->tx_count = ringhd->txblk_count; + ringhd_op->tx_size = ringhd->txblk_size; + ringhd_op->tx_blks = ringhd->txblk_blks; + ringhd_op->rx_addr = ringhd->rxblk_addr; + ringhd_op->rx_count = ringhd->rxblk_count; + ringhd_op->rx_size = ringhd->rxblk_size; + ringhd_op->rx_blks = ringhd->rxblk_blks; + poolhd_op = &((sblock->ring->header_op).poolhd_op); + poolhd_op->tx_rd_p = &poolhd->txblk_rdptr; + poolhd_op->tx_wt_p = &poolhd->txblk_wrptr; + poolhd_op->rx_rd_p = &poolhd->rxblk_rdptr; + poolhd_op->rx_wt_p = &poolhd->rxblk_wrptr; + poolhd_op->tx_addr = poolhd->txblk_addr; + poolhd_op->tx_count = poolhd->txblk_count; + poolhd_op->tx_size = poolhd->txblk_size; + poolhd_op->tx_blks = poolhd->txblk_blks; + poolhd_op->rx_addr = poolhd->rxblk_addr; + poolhd_op->rx_count = poolhd->rxblk_count; + poolhd_op->rx_size = poolhd->rxblk_size; + poolhd_op->rx_blks = poolhd->rxblk_blks; + + /* release resource */ + sipc_smem_release_resource(sipc->sipc_pms, sipc->dst); + + return 0; + +sblock_host_rx_free: + kfree(sblock->ring->rxrecord); +sblock_host_tx_free: + kfree(sblock->ring->txrecord); +sblock_host_unmap: + shmem_ram_unmap(dst, sblock->smem_virt); +sblock_host_smem_free: + smem_free(dst, sblock->smem_addr, sblock->smem_size); + + pr_err("%s: channel %d-%d, failed, ENOMEM!\n", + __func__, + sblock->dst, + sblock->channel); + + return rval; +} + +static int sblock_client_init(struct smsg_ipc *sipc, struct sblock_mgr *sblock) +{ + volatile struct sblock_ring_header *ringhd; + volatile struct sblock_ring_header *poolhd; + struct sblock_ring_header_op *ringhd_op; + struct sblock_ring_header_op *poolhd_op; + u32 hsize; + u8 dst = sblock->dst; + phys_addr_t offset = 0; + u32 txblocknum, txblocksize, rxblocknum, rxblocksize; + int rval = -ENOMEM; + +#ifdef CONFIG_PHYS_ADDR_T_64BIT + offset = sipc->high_offset; + offset = offset << 32; + pr_debug("%s: channel %d-%d, offset = 0x%llx!\n", + __func__, + sblock->dst, + sblock->channel, + offset); +#endif + + /* get blcok num and block size */ + hsize = sizeof(struct sblock_header); + sblock->smem_virt = shmem_ram_vmap_nocache(dst, + sblock->smem_addr + offset, + hsize); + if (!sblock->smem_virt) { + pr_err("%s: channel %d-%d, Failed to map smem for sblock\n", + __func__, + sblock->dst, + sblock->channel); + return -ENOMEM; + } + ringhd = (volatile struct sblock_ring_header *)(sblock->smem_virt); + /* client mode, tx <==> rx */ + txblocknum = ringhd->rxblk_count; + txblocksize = ringhd->rxblk_size; + rxblocknum = ringhd->txblk_count; + rxblocksize = ringhd->txblk_size; + + sblock->txblksz = txblocksize; + sblock->rxblksz = rxblocksize; + sblock->txblknum = txblocknum; + sblock->rxblknum = rxblocknum; + shmem_ram_unmap(dst, sblock->smem_virt); + + pr_debug("%s: channel %d-%d, txblocksize=%#x, rxblocksize=%#x!\n", + __func__, + sblock->dst, + sblock->channel, + txblocksize, + rxblocksize); + + pr_debug("%s: channel %d-%d, txblocknum=%#x, rxblocknum=%#x!\n", + __func__, + sblock->dst, + sblock->channel, + txblocknum, + rxblocknum); + + /* allocate smem */ + /* for header*/ + sblock->smem_size = hsize + + /* for blks */ + txblocknum * txblocksize + rxblocknum * rxblocksize + + /* for ring */ + (txblocknum + rxblocknum) * sizeof(struct sblock_blks) + + /* for pool */ + (txblocknum + rxblocknum) * sizeof(struct sblock_blks); + + sblock->smem_addr_debug = smem_alloc(dst, sblock->smem_size); + if (!sblock->smem_addr_debug) { + pr_err("%s: channel %d-%d, Failed to allocate smem\n", + __func__, + sblock->dst, + sblock->channel); + return -ENOMEM; + } + + pr_debug("%s: channel %d-%d, smem_addr=%#lx, smem_size=%#x!\n", + __func__, + sblock->dst, + sblock->channel, + (unsigned long)(sblock->smem_addr + offset), + sblock->smem_size); + + /* in client mode, it is own physial address. */ + sblock->stored_smem_addr = sblock->smem_addr; + + /* get smem virtual address */ + sblock->smem_virt = shmem_ram_vmap_nocache(dst, + sblock->smem_addr + offset, + sblock->smem_size); + if (!sblock->smem_virt) { + pr_err("%s: channel %d-%d, Failed to map smem for sblock!\n", + __func__, + sblock->dst, + sblock->channel); + goto sblock_client_smem_free; + } + + /* initialize ring and header */ + sblock->ring->txrecord = kcalloc(txblocknum, sizeof(int), GFP_KERNEL); + if (!sblock->ring->txrecord) + goto sblock_client_unmap; + + sblock->ring->rxrecord = kcalloc(rxblocknum, sizeof(int), GFP_KERNEL); + if (!sblock->ring->rxrecord) + goto sblock_client_tx_free; + + ringhd = (volatile struct sblock_ring_header *)(sblock->smem_virt); + poolhd = ringhd + 1; + + /* must request resource before read or write share memory */ + rval = sipc_smem_request_resource(sipc->sipc_pms, sipc->dst, -1); + if (rval < 0) + goto sblock_client_tx_free; + + /* client mode, tx <==> rx */ + sblock->ring->header = sblock->smem_virt; + sblock->ring->txblk_virt = sblock->smem_virt + + (ringhd->rxblk_addr - sblock->stored_smem_addr); + sblock->ring->rxblk_virt = sblock->smem_virt + + (ringhd->txblk_addr - sblock->stored_smem_addr); + sblock->ring->r_txblks = sblock->smem_virt + + (ringhd->rxblk_blks - sblock->stored_smem_addr); + sblock->ring->r_rxblks = sblock->smem_virt + + (ringhd->txblk_blks - sblock->stored_smem_addr); + sblock->ring->p_txblks = sblock->smem_virt + + (poolhd->rxblk_blks - sblock->stored_smem_addr); + sblock->ring->p_rxblks = sblock->smem_virt + + (poolhd->txblk_blks - sblock->stored_smem_addr); + + /* init header op, tx <==> rx */ + ringhd_op = &((sblock->ring->header_op).ringhd_op); + ringhd_op->tx_rd_p = &ringhd->rxblk_rdptr; + ringhd_op->tx_wt_p = &ringhd->rxblk_wrptr; + ringhd_op->rx_rd_p = &ringhd->txblk_rdptr; + ringhd_op->rx_wt_p = &ringhd->txblk_wrptr; + ringhd_op->tx_addr = ringhd->rxblk_addr; + ringhd_op->tx_count = ringhd->rxblk_count; + ringhd_op->tx_size = ringhd->rxblk_size; + ringhd_op->tx_blks = ringhd->rxblk_blks; + ringhd_op->rx_addr = ringhd->txblk_addr; + ringhd_op->rx_count = ringhd->txblk_count; + ringhd_op->rx_size = ringhd->txblk_size; + ringhd_op->rx_blks = ringhd->txblk_blks; + poolhd_op = &((sblock->ring->header_op).poolhd_op); + poolhd_op->tx_rd_p = &poolhd->rxblk_rdptr; + poolhd_op->tx_wt_p = &poolhd->rxblk_wrptr; + poolhd_op->rx_rd_p = &poolhd->txblk_rdptr; + poolhd_op->rx_wt_p = &poolhd->txblk_wrptr; + poolhd_op->tx_addr = poolhd->rxblk_addr; + poolhd_op->tx_count = poolhd->rxblk_count; + poolhd_op->tx_size = poolhd->rxblk_size; + poolhd_op->tx_blks = poolhd->rxblk_blks; + poolhd_op->rx_addr = poolhd->txblk_addr; + poolhd_op->rx_count = poolhd->txblk_count; + poolhd_op->rx_size = poolhd->txblk_size; + poolhd_op->rx_blks = poolhd->txblk_blks; + + /* release resource */ + sipc_smem_release_resource(sipc->sipc_pms, sipc->dst); + + return 0; + +sblock_client_tx_free: + kfree(sblock->ring->txrecord); +sblock_client_unmap: + shmem_ram_unmap(dst, sblock->smem_virt); +sblock_client_smem_free: + smem_free(dst, sblock->smem_addr_debug, sblock->smem_size); + + return rval; +} + +static int sblock_thread(void *data) +{ + struct sblock_mgr *sblock = data; + struct smsg mcmd, mrecv; + unsigned long flags; + int rval; + int recovery = 0; + struct smsg_ipc *sipc; + struct sblock_ring *ring; + + /* since the channel open may hang, we call it in the sblock thread */ + rval = smsg_ch_open(sblock->dst, sblock->channel, -1); + if (rval != 0) { + pr_err("Failed to open channel %d\n", + sblock->channel); + + if (sblock->pre_cfg && sblock->handler) { + sblock->handler(SBLOCK_NOTIFY_OPEN_FAILED, + sblock->data); + } + + return rval; + } + + if (sblock->pre_cfg) { + sblock->state = SBLOCK_STATE_READY; + recovery = 1; + if (sblock->handler) + sblock->handler(SBLOCK_NOTIFY_OPEN, sblock->data); + } + + /* if client, send SMSG_CMD_SBLOCK_INIT, wait SMSG_DONE_SBLOCK_INIT */ + sipc = smsg_ipcs[sblock->dst]; + if (sipc->client) { + smsg_set(&mcmd, sblock->channel, SMSG_TYPE_CMD, + SMSG_CMD_SBLOCK_INIT, 0); + smsg_send(sblock->dst, &mcmd, -1); + do { + smsg_set(&mrecv, sblock->channel, 0, 0, 0); + rval = smsg_recv(sblock->dst, &mrecv, -1); + if (rval != 0) { + sblock->thread = NULL; + return rval; + } + } while (mrecv.type != SMSG_TYPE_DONE || + mrecv.flag != SMSG_DONE_SBLOCK_INIT); + sblock->smem_addr = mrecv.value; + pr_info("%s: channel %d-%d, done_sblock_init, address=0x%x!\n", + __func__, + sblock->dst, + sblock->channel, + sblock->smem_addr); + if (sblock_client_init(sipc, sblock)) { + sblock->thread = NULL; + return 0; + } + sblock->state = SBLOCK_STATE_READY; + if (sblock->handler) + sblock->handler(SBLOCK_NOTIFY_OPEN, + sblock->data); + } + + /* handle the sblock events */ + while (!kthread_should_stop()) { + /* monitor sblock recv smsg */ + smsg_set(&mrecv, sblock->channel, 0, 0, 0); + rval = smsg_recv(sblock->dst, &mrecv, -1); + if (rval == -EIO || rval == -ENODEV) { + /* channel state is FREE */ + msleep(20); + continue; + } + + pr_debug("sblock thread recv msg: dst=%d, channel=%d, type=%d, flag=0x%04x, value=0x%08x\n", + sblock->dst, sblock->channel, + mrecv.type, mrecv.flag, mrecv.value); + + switch (mrecv.type) { + case SMSG_TYPE_OPEN: + pr_info("%s: channel %d-%d,revc open!\n", + __func__, + sblock->dst, + sblock->channel); + /* handle channel recovery */ + if (recovery) { + if (sblock->handler) + sblock->handler(SBLOCK_NOTIFY_CLOSE, + sblock->data); + sblock_recover(sblock->dst, sblock->channel); + } + smsg_open_ack(sblock->dst, sblock->channel); + if (sblock->pre_cfg) + sblock->state = SBLOCK_STATE_READY; + break; + case SMSG_TYPE_CLOSE: + /* handle channel recovery */ + smsg_close_ack(sblock->dst, sblock->channel); + if (sblock->handler) + sblock->handler(SBLOCK_NOTIFY_CLOSE, + sblock->data); + sblock->state = SBLOCK_STATE_IDLE; + break; + case SMSG_TYPE_CMD: + if (!sblock->pre_cfg) { + /* respond cmd done for sblock init */ + WARN_ON(mrecv.flag != SMSG_CMD_SBLOCK_INIT); + smsg_set(&mcmd, + sblock->channel, + SMSG_TYPE_DONE, + SMSG_DONE_SBLOCK_INIT, + sblock->dst_smem_addr); + smsg_send(sblock->dst, &mcmd, -1); + sblock->state = SBLOCK_STATE_READY; + recovery = 1; + pr_info("%s: channel %d-%d, SMSG_CMD_SBLOCK_INIT, dst address = 0x%x!\n", + __func__, + sblock->dst, + sblock->channel, + sblock->dst_smem_addr); + + if (sblock->handler) + sblock->handler(SBLOCK_NOTIFY_OPEN, + sblock->data); + } + break; + case SMSG_TYPE_EVENT: + /* handle sblock send/release events */ + switch (mrecv.flag) { + case SMSG_EVENT_SBLOCK_SEND: + ring = sblock->ring; + /* set read mask. */ + spin_lock_irqsave(&ring->poll_lock, flags); + ring->poll_mask |= POLLIN | POLLRDNORM; + spin_unlock_irqrestore(&ring->poll_lock, flags); + wake_up_interruptible_all(&sblock->ring->recvwait); + if (sblock->handler) + sblock->handler(SBLOCK_NOTIFY_RECV, + sblock->data); + break; + case SMSG_EVENT_SBLOCK_RELEASE: + ring = sblock->ring; + /* set write mask. */ + spin_lock_irqsave(&ring->poll_lock, flags); + ring->poll_mask |= POLLOUT | POLLWRNORM; + spin_unlock_irqrestore(&ring->poll_lock, flags); + wake_up_interruptible_all(&sblock->ring->getwait); + if (sblock->handler) + sblock->handler(SBLOCK_NOTIFY_GET, + sblock->data); + break; + default: + rval = 1; + break; + } + break; + default: + rval = 1; + break; + }; + + if (rval) { + pr_info("non-handled sblock msg: %d-%d, %d, %d, %d\n", + sblock->dst, sblock->channel, + mrecv.type, mrecv.flag, mrecv.value); + rval = 0; + } + } + + pr_info("sblock %d-%d thread stop", + sblock->dst, sblock->channel); + return rval; +} + +static void sblock_pms_init(uint8_t dst, uint8_t ch, struct sblock_ring *ring) +{ + sprintf(ring->tx_pms_name, "sblock-%d-%d-tx", dst, ch); + ring->tx_pms = sprd_pms_create(dst, ring->tx_pms_name, true); + if (!ring->tx_pms) + pr_warn("create pms %s failed!\n", ring->tx_pms_name); + + sprintf(ring->rx_pms_name, "sblock-%d-%d-rx", dst, ch); + ring->rx_pms = sprd_pms_create(dst, ring->rx_pms_name, true); + if (!ring->rx_pms) + pr_warn("create pms %s failed!\n", ring->rx_pms_name); +} + +static void sblock_pms_destroy(struct sblock_ring *ring) +{ + sprd_pms_destroy(ring->tx_pms); + sprd_pms_destroy(ring->rx_pms); + ring->tx_pms = NULL; + ring->rx_pms = NULL; +} + +static int sblock_mgr_create(uint8_t dst, + uint8_t channel, + int pre_cfg, + uint32_t tx_blk_num, uint32_t tx_blk_sz, + uint32_t rx_blk_num, uint32_t rx_blk_sz, + struct sblock_mgr **sb_mgr) +{ + struct sblock_mgr *sblock = NULL; + struct smsg_ipc *sipc = smsg_ipcs[dst]; + int ret; + + pr_debug("%s: dst=%d channel=%d\n", __func__, dst, channel); + + if (!sipc) + return -EINVAL; + + sblock = kzalloc(sizeof(struct sblock_mgr), GFP_KERNEL); + if (!sblock) + return -ENOMEM; + + sblock->ring = kzalloc(sizeof(struct sblock_ring), GFP_KERNEL); + if (!sblock->ring) { + kfree(sblock); + return -ENOMEM; + } + + sblock->dst = dst; + sblock->channel = channel; + sblock->pre_cfg = pre_cfg; + sblock->state = SBLOCK_STATE_IDLE; + + if (!sipc->client) { + ret = sblock_host_init(sipc, sblock, + tx_blk_num, tx_blk_sz, + rx_blk_num, rx_blk_sz); + if (ret) { + kfree(sblock->ring); + kfree(sblock); + return ret; + } + } + + sblock_pms_init(dst, channel, sblock->ring); + init_waitqueue_head(&sblock->ring->getwait); + init_waitqueue_head(&sblock->ring->recvwait); + spin_lock_init(&sblock->ring->r_txlock); + spin_lock_init(&sblock->ring->r_rxlock); + spin_lock_init(&sblock->ring->p_txlock); + spin_lock_init(&sblock->ring->p_rxlock); + spin_lock_init(&sblock->ring->poll_lock); + + *sb_mgr = sblock; + + return 0; +} + +int sblock_create_ex(u8 dst, u8 channel, + u32 txblocknum, u32 txblocksize, + u32 rxblocknum, u32 rxblocksize, + void (*handler)(int event, void *data), void *data) +{ + struct sblock_mgr *sblock = NULL; + int result; + u8 ch_index; + struct smsg_ipc *sipc; + struct sched_param param = {.sched_priority = 11}; + + ch_index = sipc_channel2index(channel); + if (ch_index == INVALID_CHANEL_INDEX) { + pr_err("%s:channel %d invalid!\n", __func__, channel); + return -EINVAL; + } + + if (dst >= SIPC_ID_NR) { + pr_err("%s: dst = %d is invalid\n", __func__, dst); + return -EINVAL; + } + + pr_debug("%s: dst=%d channel=%d\n", __func__, dst, channel); + + result = sblock_mgr_create(dst, channel, 0, + txblocknum, txblocksize, + rxblocknum, rxblocksize, + &sblock); + if (!result) { + sblock->thread = kthread_create(sblock_thread, sblock, + "sblock-%d-%d", dst, channel); + if (IS_ERR(sblock->thread)) { + pr_err("Failed to create kthread: sblock-%d-%d\n", + dst, channel); + sipc = smsg_ipcs[sblock->dst]; + if (!sipc->client) { + shmem_ram_unmap(dst, sblock->smem_virt); + smem_free(dst, + sblock->smem_addr, + sblock->smem_size); + kfree(sblock->ring->txrecord); + kfree(sblock->ring->rxrecord); + } + kfree(sblock->ring); + result = PTR_ERR(sblock->thread); + kfree(sblock); + return result; + } + + /* Prevent the thread task_struct from being destroyed. */ + get_task_struct(sblock->thread); + + sblocks[dst][ch_index] = sblock; + if ((handler != NULL) && (data != NULL)) { + result = sblock_register_notifier(dst, channel, + handler, data); + if (result < 0) { + sblock_destroy(dst, channel); + return result; + } + } + /*set the thread as a real time thread, and its priority is 11*/ + sched_setscheduler(sblock->thread, SCHED_RR, ¶m); + wake_up_process(sblock->thread); + } + + pr_debug("%s: sblock-%d-%d create over, result = %d\n", + __func__, dst, channel, result); + + return result; +} +EXPORT_SYMBOL_GPL(sblock_create_ex); + +int sblock_create(u8 dst, u8 channel, + u32 txblocknum, u32 txblocksize, + u32 rxblocknum, u32 rxblocksize) +{ + return sblock_create_ex(dst, channel, txblocknum, txblocksize, + rxblocknum, rxblocksize, NULL, NULL); +} +EXPORT_SYMBOL_GPL(sblock_create); + +int sblock_pcfg_create(u8 dst, u8 channel, + u32 tx_blk_num, u32 tx_blk_sz, + u32 rx_blk_num, u32 rx_blk_sz) +{ + struct sblock_mgr *sblock = NULL; + int result; + u8 ch_index; + + ch_index = sipc_channel2index(channel); + if (ch_index == INVALID_CHANEL_INDEX) { + pr_err("%s:channel %d invalid!\n", __func__, channel); + return -EINVAL; + } + if (dst >= SIPC_ID_NR) { + pr_err("sblock_create_ex: dst = %d is invalid\n", dst); + return -EINVAL; + } + + pr_debug("%s: dst=%d channel=%d\n", __func__, dst, channel); + + result = sblock_mgr_create(dst, + channel, + 1, + tx_blk_num, tx_blk_sz, + rx_blk_num, rx_blk_sz, + &sblock); + if (!result) { + struct sched_param param = {.sched_priority = 11}; + + sblock->thread = kthread_create(sblock_thread, sblock, + "sblock-%d-%d", dst, channel); + if (IS_ERR(sblock->thread)) { + struct smsg_ipc *sipc; + + pr_err("Failed to create kthread: sblock-%d-%d\n", + dst, channel); + sipc = smsg_ipcs[sblock->dst]; + if (!sipc->client) { + shmem_ram_unmap(dst, sblock->smem_virt); + smem_free(dst, + sblock->smem_addr, + sblock->smem_size); + kfree(sblock->ring->txrecord); + kfree(sblock->ring->rxrecord); + } + kfree(sblock->ring); + result = PTR_ERR(sblock->thread); + kfree(sblock); + return result; + } + + /* Prevent the thread task_struct from being destroyed. */ + get_task_struct(sblock->thread); + + sblocks[dst][ch_index] = sblock; + /* + * Set the thread as a real time thread, and its priority + * is 11. + */ + sched_setscheduler(sblock->thread, SCHED_RR, ¶m); + wake_up_process(sblock->thread); + } + + return result; +} +EXPORT_SYMBOL_GPL(sblock_pcfg_create); + +void sblock_down(u8 dst, u8 channel) +{ + struct sblock_mgr *sblock; + u8 ch_index; + int i; + ch_index = sipc_channel2index(channel); + if (ch_index == INVALID_CHANEL_INDEX) { + pr_err("%s:channel %d invalid!\n", __func__, channel); + return; + } + + sblock = sblocks[dst][ch_index]; + if (sblock == NULL) + return; + + sblock->state = SBLOCK_STATE_IDLE; + if (sblock->ring) { + wake_up_interruptible_all(&sblock->ring->recvwait); + wake_up_interruptible_all(&sblock->ring->getwait); + } + pr_info("%s: channel=%d sblock down success\n", __func__, channel); + +} +EXPORT_SYMBOL_GPL(sblock_down); + +void sblock_destroy(u8 dst, u8 channel) +{ + struct sblock_mgr *sblock; + u8 ch_index; + struct smsg_ipc *sipc; + + ch_index = sipc_channel2index(channel); + if (ch_index == INVALID_CHANEL_INDEX) { + pr_err("%s:channel %d invalid!\n", __func__, channel); + return; + } + + sblock = sblocks[dst][ch_index]; + if (sblock == NULL) + return; + + sblock->state = SBLOCK_STATE_IDLE; + smsg_ch_close(dst, channel, -1); + + /* stop sblock thread if it's created successfully and still alive */ + if (!IS_ERR_OR_NULL(sblock->thread)) { + kthread_stop(sblock->thread); + put_task_struct(sblock->thread); + sblock->thread = NULL; + } + + if (sblock->ring) { + sblock_pms_destroy(sblock->ring); + wake_up_interruptible_all(&sblock->ring->recvwait); + wake_up_interruptible_all(&sblock->ring->getwait); + /* kfree(NULL) is safe */ + /* if (sblock->ring->txrecord) */ + kfree(sblock->ring->txrecord); + /* if (sblock->ring->rxrecord) */ + kfree(sblock->ring->rxrecord); + kfree(sblock->ring); + } + if (sblock->smem_virt) + shmem_ram_unmap(dst, sblock->smem_virt); + + sipc = smsg_ipcs[dst]; + if (sipc->client) + smem_free(dst, sblock->smem_addr_debug, sblock->smem_size); + else + smem_free(dst, sblock->smem_addr, sblock->smem_size); + kfree(sblock); + + sblocks[dst][ch_index] = NULL; +} +EXPORT_SYMBOL_GPL(sblock_destroy); + +int sblock_pcfg_open(uint8_t dest, uint8_t channel, + void (*notifier)(int event, void *client), + void *client) +{ + struct sblock_mgr *sblock; + uint8_t idx; + int ret; + struct sched_param param = {.sched_priority = 11}; + + pr_debug("%s: dst=%d channel=%d\n", __func__, dest, channel); + + if (!notifier) + return -EINVAL; + + idx = sipc_channel2index(channel); + if (idx == INVALID_CHANEL_INDEX) { + pr_err("%s: invalid channel %d!\n", __func__, channel); + return -ENODEV; + } + + sblock = sblocks[dest][idx]; + if (!sblock) + return -ENODEV; + + if (!sblock->pre_cfg) + return -EINVAL; + + if (sblock->thread) { + pr_err("%s: SBLOCK %u/%u already open", + __func__, + (unsigned int)sblock->dst, + (unsigned int)sblock->channel); + return -EPROTO; + } + + ret = 0; + sblock->thread = kthread_create(sblock_thread, sblock, + "sblock-%d-%d", dest, channel); + if (IS_ERR(sblock->thread)) { + pr_err("%s: create thread error\n", __func__); + sblock->thread = NULL; + ret = -EBUSY; + } else { + /* Prevent the thread task_struct from being destroyed. */ + get_task_struct(sblock->thread); + + sblock->handler = notifier; + sblock->data = client; + /*set the thread as a real time thread, and its priority is 11*/ + sched_setscheduler(sblock->thread, SCHED_RR, ¶m); + wake_up_process(sblock->thread); + } + + return ret; +} +EXPORT_SYMBOL_GPL(sblock_pcfg_open); + +int sblock_close(uint8_t dest, uint8_t channel) +{ + return -ENOTSUPP; +} +EXPORT_SYMBOL_GPL(sblock_close); + +int sblock_register_notifier(u8 dst, u8 channel, + void (*handler)(int event, void *data), + void *data) +{ + struct sblock_mgr *sblock; + u8 ch_index; + + ch_index = sipc_channel2index(channel); + if (ch_index == INVALID_CHANEL_INDEX) { + pr_err("%s:channel %d invalid!\n", __func__, channel); + return -EINVAL; + } + + sblock = sblocks[dst][ch_index]; + + if (!sblock) { + pr_err("%s:sblock-%d-%d not ready!\n", __func__, dst, channel); + return -ENODEV; + } +#ifndef CONFIG_SPRD_SIPC_WCN + if (sblock->handler) { + pr_err("sblock handler already registered\n"); + return -EBUSY; + } +#endif + sblock->handler = handler; + sblock->data = data; + + return 0; +} +EXPORT_SYMBOL_GPL(sblock_register_notifier); + +int sblock_get_smem_cp_addr(uint8_t dest, uint8_t channel, + uint32_t *paddr) +{ + struct sblock_mgr *sblock; + uint8_t idx; + + if (!paddr) + return -EINVAL; + + idx = sipc_channel2index(channel); + if (idx == INVALID_CHANEL_INDEX) { + pr_err("%s: invalid channel %d!\n", __func__, channel); + return -ENODEV; + } + + sblock = sblocks[dest][idx]; + if (!sblock) + return -ENODEV; + + *paddr = sblock->dst_smem_addr; + + return 0; +} +EXPORT_SYMBOL_GPL(sblock_get_smem_cp_addr); + +int sblock_get(u8 dst, u8 channel, struct sblock *blk, int timeout) +{ + struct sblock_mgr *sblock; + struct sblock_ring *ring; + struct sblock_ring_header_op *poolhd_op; + + int txpos, index; + int rval = 0; + unsigned long flags; + u8 ch_index; + bool no_data; + + ch_index = sipc_channel2index(channel); + if (ch_index == INVALID_CHANEL_INDEX) { + pr_err("%s:channel %d invalid!\n", __func__, channel); + return -EINVAL; + } + + sblock = sblocks[dst][ch_index]; + + if (!sblock || sblock->state != SBLOCK_STATE_READY) { + pr_err("%s:sblock-%d-%d not ready!\n", __func__, dst, channel); + return sblock ? -EIO : -ENODEV; + } + + ring = sblock->ring; + poolhd_op = &(ring->header_op.poolhd_op); + + /* must request resource before read or write share memory */ + rval = sipc_smem_request_resource(ring->tx_pms, dst, timeout); + if (rval < 0) + return rval; + + spin_lock_irqsave(&ring->poll_lock, flags); + no_data = *(poolhd_op->tx_rd_p) == *(poolhd_op->tx_wt_p); + /* update write mask */ + if (no_data) + ring->poll_mask &= ~(POLLOUT | POLLWRNORM); + else + ring->poll_mask |= POLLOUT | POLLWRNORM; + spin_unlock_irqrestore(&ring->poll_lock, flags); + + /* release resource */ + sipc_smem_release_resource(ring->tx_pms, dst); + + if (no_data) { + if (timeout == 0) { + /* no wait */ + pr_err("%s: %d-%d is empty!\n", __func__, dst, channel); + rval = -ENODATA; + } else if (timeout < 0) { + /* wait forever */ + rval = wait_event_interruptible(ring->getwait, + sblock_has_data(sblock, true) || + sblock->state == SBLOCK_STATE_IDLE); + if (rval < 0) + pr_debug("%s: wait interrupted!\n", __func__); + + if (sblock->state == SBLOCK_STATE_IDLE) { + pr_err("%s: state is idle!\n", __func__); + rval = -EIO; + } + } else { + /* wait timeout */ + rval = wait_event_interruptible_timeout(ring->getwait, + sblock_has_data(sblock, true) || + sblock == SBLOCK_STATE_IDLE, + timeout); + if (rval < 0) { + pr_debug("%s: wait interrupted!\n", __func__); + } else if (rval == 0) { + pr_info("%s: wait timeout!\n", __func__); + rval = -ETIME; + } + + if (sblock->state == SBLOCK_STATE_IDLE) { + pr_info("%s: state is idle!\n", __func__); + rval = -EIO; + } + } + } + + if (rval < 0) + return rval; + + /* must request resource before read or write share memory */ + rval = sipc_smem_request_resource(ring->tx_pms, dst, timeout); + if (rval < 0) + return rval; + + /* multi-gotter may cause got failure */ + spin_lock_irqsave(&ring->p_txlock, flags); + if (*(poolhd_op->tx_rd_p) != *(poolhd_op->tx_wt_p) && + sblock->state == SBLOCK_STATE_READY) { + txpos = sblock_get_ringpos(*(poolhd_op->tx_rd_p), + poolhd_op->tx_count); + blk->addr = sblock->smem_virt + + (ring->p_txblks[txpos].addr - + sblock->stored_smem_addr); + blk->length = poolhd_op->tx_size; + *(poolhd_op->tx_rd_p) = *(poolhd_op->tx_rd_p) + 1; + index = sblock_get_index((blk->addr - ring->txblk_virt), + sblock->txblksz); + ring->txrecord[index] = SBLOCK_BLK_STATE_PENDING; + } else { + /* release resource */ + sipc_smem_release_resource(ring->tx_pms, dst); + rval = sblock->state == SBLOCK_STATE_READY ? -EAGAIN : -EIO; + } + spin_unlock_irqrestore(&ring->p_txlock, flags); + + spin_lock_irqsave(&ring->poll_lock, flags); + /* update write mask */ + if (*(poolhd_op->tx_wt_p) == *(poolhd_op->tx_rd_p)) + ring->poll_mask &= ~(POLLOUT | POLLWRNORM); + else + ring->poll_mask |= POLLOUT | POLLWRNORM; + spin_unlock_irqrestore(&ring->poll_lock, flags); + + return rval; +} +EXPORT_SYMBOL_GPL(sblock_get); + +static int sblock_send_ex(u8 dst, u8 channel, + struct sblock *blk, bool yell) +{ + struct sblock_mgr *sblock; + struct sblock_ring *ring; + struct sblock_ring_header_op *ringhd_op; + struct smsg mevt; + int txpos, index; + int rval = 0; + unsigned long flags; + u8 ch_index; + bool send_event = false; + + ch_index = sipc_channel2index(channel); + if (ch_index == INVALID_CHANEL_INDEX) { + pr_err("%s:channel %d invalid!\n", __func__, channel); + return -EINVAL; + } + + sblock = sblocks[dst][ch_index]; + + if (!sblock || sblock->state != SBLOCK_STATE_READY) { + pr_err("%s:sblock-%d-%d not ready!\n", __func__, dst, channel); + return sblock ? -EIO : -ENODEV; + } + + pr_debug("sblock_send: dst=%d, channel=%d, addr=%p, len=%d\n", + dst, channel, blk->addr, blk->length); + + ring = sblock->ring; + ringhd_op = &(ring->header_op.ringhd_op); + + spin_lock_irqsave(&ring->r_txlock, flags); + + txpos = sblock_get_ringpos(*(ringhd_op->tx_wt_p), ringhd_op->tx_count); + ring->r_txblks[txpos].addr = blk->addr - + sblock->smem_virt + + sblock->stored_smem_addr; + ring->r_txblks[txpos].length = blk->length; + pr_debug("sblock_send: channel=%d, wrptr=%d, txpos=%d, addr=%x\n", + channel, *(ringhd_op->tx_wt_p), + txpos, ring->r_txblks[txpos].addr); + *(ringhd_op->tx_wt_p) = *(ringhd_op->tx_wt_p) + 1; + + if (sblock->state == SBLOCK_STATE_READY) { + if (yell) { + send_event = true; + } else if (!ring->yell) { + if ((int)(*(ringhd_op->tx_wt_p) - + *(ringhd_op->tx_rd_p)) == 1) + ring->yell = 1; + } + } + index = sblock_get_index((blk->addr - ring->txblk_virt), + sblock->txblksz); + ring->txrecord[index] = SBLOCK_BLK_STATE_DONE; + + spin_unlock_irqrestore(&ring->r_txlock, flags); + + /* request in sblock_get, release here */ + sipc_smem_release_resource(ring->tx_pms, dst); + + /* + * smsg_send may caused schedule, + * can't be called in spinlock protected context. + */ + if (send_event) { + smsg_set(&mevt, channel, + SMSG_TYPE_EVENT, + SMSG_EVENT_SBLOCK_SEND, + 0); + rval = smsg_send(dst, &mevt, -1); + } + + return rval; +} + +int sblock_send(u8 dst, u8 channel, struct sblock *blk) +{ + return sblock_send_ex(dst, channel, blk, true); +} +EXPORT_SYMBOL_GPL(sblock_send); + +int sblock_send_prepare(u8 dst, u8 channel, struct sblock *blk) +{ + return sblock_send_ex(dst, channel, blk, false); +} +EXPORT_SYMBOL_GPL(sblock_send_prepare); + +int sblock_send_finish(u8 dst, u8 channel) +{ + struct sblock_mgr *sblock; + struct sblock_ring *ring; + struct sblock_ring_header_op *ringhd_op; + struct smsg mevt; + int rval = 0; + u8 ch_index; + + ch_index = sipc_channel2index(channel); + if (ch_index == INVALID_CHANEL_INDEX) { + pr_err("%s:channel %d invalid!\n", __func__, channel); + return -EINVAL; + } + + sblock = sblocks[dst][ch_index]; + if (!sblock || sblock->state != SBLOCK_STATE_READY) { + pr_err("%s:sblock-%d-%d not ready!\n", __func__, dst, channel); + return sblock ? -EIO : -ENODEV; + } + + ring = sblock->ring; + ringhd_op = &(ring->header_op.ringhd_op); + + /* must wait resource before read or write share memory */ + rval = sipc_smem_request_resource(ring->tx_pms, dst, -1); + if (rval) + return rval; + + if (*(ringhd_op->tx_wt_p) != *(ringhd_op->tx_rd_p)) { + smsg_set(&mevt, channel, + SMSG_TYPE_EVENT, + SMSG_EVENT_SBLOCK_SEND, 0); + rval = smsg_send(dst, &mevt, -1); + } + /* release resource */ + sipc_smem_release_resource(ring->tx_pms, dst); + + return rval; +} +EXPORT_SYMBOL_GPL(sblock_send_finish); + +int sblock_receive(u8 dst, u8 channel, + struct sblock *blk, int timeout) +{ + struct sblock_mgr *sblock; + struct sblock_ring *ring; + struct sblock_ring_header_op *ringhd_op; + int rxpos, index, rval = 0; + unsigned long flags; + u8 ch_index; + bool no_data; + + ch_index = sipc_channel2index(channel); + if (ch_index == INVALID_CHANEL_INDEX) { + pr_err("%s:channel %d invalid!\n", __func__, channel); + return -EINVAL; + } + + sblock = sblocks[dst][ch_index]; + + if (!sblock || sblock->state != SBLOCK_STATE_READY) { + pr_err("%s:sblock-%d-%d not ready!\n", __func__, dst, channel); + return sblock ? -EIO : -ENODEV; + } + + pr_debug("%s: dst=%d, channel=%d, timeout=%d\n", + __func__, dst, channel, timeout); + + ring = sblock->ring; + ringhd_op = &(ring->header_op.ringhd_op); + + /* must request resource before read or write share memory */ + rval = sipc_smem_request_resource(ring->rx_pms, dst, timeout); + if (rval < 0) + return rval; + + pr_debug("%s: channel=%d, wrptr=%d, rdptr=%d", + __func__, channel, + *(ringhd_op->rx_wt_p), + *(ringhd_op->rx_rd_p)); + + spin_lock_irqsave(&ring->poll_lock, flags); + no_data = *(ringhd_op->rx_wt_p) == *(ringhd_op->rx_rd_p); + /* update read mask */ + if (no_data) + ring->poll_mask &= ~(POLLIN | POLLRDNORM); + else + ring->poll_mask |= POLLIN | POLLRDNORM; + spin_unlock_irqrestore(&ring->poll_lock, flags); + + /* release resource */ + sipc_smem_release_resource(ring->rx_pms, dst); + + if (no_data) { + if (timeout == 0) { + /* no wait */ + pr_debug("%s: %d-%d is empty!\n", + __func__, dst, channel); + rval = -ENODATA; + } else if (timeout < 0) { + /* wait forever */ + rval = wait_event_interruptible(ring->recvwait, + sblock_has_data(sblock, false)); + if (rval < 0) + pr_info("%s: wait interrupted!\n", __func__); + + if (sblock->state == SBLOCK_STATE_IDLE) { + pr_info("%s: state is idle!\n", __func__); + rval = -EIO; + } + + } else { + /* wait timeout */ + rval = wait_event_interruptible_timeout(ring->recvwait, + sblock_has_data(sblock, false), + timeout); + if (rval < 0) { + pr_info("%s: wait interrupted!\n", __func__); + } else if (rval == 0) { + pr_info("%s: wait timeout!\n", __func__); + rval = -ETIME; + } + + if (sblock->state == SBLOCK_STATE_IDLE) { + pr_info("%s: state is idle!\n", __func__); + rval = -EIO; + } + } + } + + if (rval < 0) + return rval; + + /* must request resource before read or write share memory */ + rval = sipc_smem_request_resource(ring->rx_pms, dst, timeout); + if (rval < 0) + return rval; + + /* multi-receiver may cause recv failure */ + spin_lock_irqsave(&ring->r_rxlock, flags); + + if (*(ringhd_op->rx_wt_p) != *(ringhd_op->rx_rd_p) && + sblock->state == SBLOCK_STATE_READY) { + rxpos = sblock_get_ringpos(*(ringhd_op->rx_rd_p), + ringhd_op->rx_count); + blk->addr = ring->r_rxblks[rxpos].addr - + sblock->stored_smem_addr + + sblock->smem_virt; + blk->length = ring->r_rxblks[rxpos].length; + *(ringhd_op->rx_rd_p) = *(ringhd_op->rx_rd_p) + 1; + pr_debug("%s: channel=%d, rxpos=%d, addr=%p, len=%d\n", + __func__, channel, rxpos, blk->addr, blk->length); + index = sblock_get_index((blk->addr - ring->rxblk_virt), + sblock->rxblksz); + ring->rxrecord[index] = SBLOCK_BLK_STATE_PENDING; + } else { + /* release resource */ + sipc_smem_release_resource(ring->rx_pms, dst); + rval = sblock->state == SBLOCK_STATE_READY ? -EAGAIN : -EIO; + } + spin_unlock_irqrestore(&ring->r_rxlock, flags); + + spin_lock_irqsave(&ring->poll_lock, flags); + /* update read mask */ + if (*(ringhd_op->rx_wt_p) == *(ringhd_op->rx_rd_p)) + ring->poll_mask &= ~(POLLIN | POLLRDNORM); + else + ring->poll_mask |= POLLIN | POLLRDNORM; + spin_unlock_irqrestore(&ring->poll_lock, flags); + + return rval; +} +EXPORT_SYMBOL_GPL(sblock_receive); + +int sblock_get_arrived_count(u8 dst, u8 channel) +{ + struct sblock_mgr *sblock; + struct sblock_ring *ring; + struct sblock_ring_header_op *ringhd_op; + int blk_count = 0; + unsigned long flags; + u8 ch_index; + int rval; + + ch_index = sipc_channel2index(channel); + if (ch_index == INVALID_CHANEL_INDEX) { + pr_err("%s:channel %d invalid!\n", __func__, channel); + return -EINVAL; + } + + sblock = sblocks[dst][ch_index]; + if (!sblock || sblock->state != SBLOCK_STATE_READY) { + pr_err("%s:sblock-%d-%d not ready!\n", __func__, dst, channel); + return -ENODEV; + } + + ring = sblock->ring; + ringhd_op = &(ring->header_op.ringhd_op); + + /* must request resource before read or write share memory */ + rval = sipc_smem_request_resource(ring->rx_pms, dst, -1); + if (rval < 0) + return rval; + + spin_lock_irqsave(&ring->r_rxlock, flags); + blk_count = (int)(*(ringhd_op->rx_wt_p) - *(ringhd_op->rx_rd_p)); + spin_unlock_irqrestore(&ring->r_rxlock, flags); + /* release resource */ + sipc_smem_release_resource(ring->rx_pms, dst); + + return blk_count; + +} +EXPORT_SYMBOL_GPL(sblock_get_arrived_count); + +int sblock_get_free_count(u8 dst, u8 channel) +{ + struct sblock_mgr *sblock; + struct sblock_ring *ring; + struct sblock_ring_header_op *poolhd_op; + int blk_count = 0, rval; + unsigned long flags; + u8 ch_index; + + ch_index = sipc_channel2index(channel); + if (ch_index == INVALID_CHANEL_INDEX) { + pr_err("%s:channel %d invalid!\n", __func__, channel); + return -EINVAL; + } + + sblock = sblocks[dst][ch_index]; + if (!sblock || sblock->state != SBLOCK_STATE_READY) { + pr_err("%s:sblock-%d-%d not ready!\n", __func__, dst, channel); + return -ENODEV; + } + + ring = sblock->ring; + poolhd_op = &(ring->header_op.poolhd_op); + + /* must request resource before read or write share memory */ + rval = sipc_smem_request_resource(ring->tx_pms, dst, -1); + if (rval < 0) + return rval; + + spin_lock_irqsave(&ring->p_txlock, flags); + blk_count = (int)(*(poolhd_op->tx_wt_p) - *(poolhd_op->tx_rd_p)); + spin_unlock_irqrestore(&ring->p_txlock, flags); + /* release resource */ + sipc_smem_release_resource(ring->tx_pms, dst); + + return blk_count; +} +EXPORT_SYMBOL_GPL(sblock_get_free_count); + +int sblock_release(u8 dst, u8 channel, struct sblock *blk) +{ + struct sblock_mgr *sblock; + struct sblock_ring *ring; + struct sblock_ring_header_op *poolhd_op; + struct smsg mevt; + unsigned long flags; + int rxpos; + int index; + u8 ch_index; + bool send_event = false; + + ch_index = sipc_channel2index(channel); + if (ch_index == INVALID_CHANEL_INDEX) { + pr_err("%s:channel %d invalid!\n", __func__, channel); + return -EINVAL; + } + + sblock = sblocks[dst][ch_index]; + if (!sblock || sblock->state != SBLOCK_STATE_READY) { + pr_err("%s:sblock-%d-%d not ready!\n", __func__, dst, channel); + return -ENODEV; + } + + pr_debug("%s: dst=%d, channel=%d, addr=%p, len=%d\n", + __func__, dst, channel, blk->addr, blk->length); + + ring = sblock->ring; + poolhd_op = &(ring->header_op.poolhd_op); + + spin_lock_irqsave(&ring->p_rxlock, flags); + rxpos = sblock_get_ringpos(*(poolhd_op->rx_wt_p), poolhd_op->rx_count); + ring->p_rxblks[rxpos].addr = blk->addr - + sblock->smem_virt + + sblock->stored_smem_addr; + ring->p_rxblks[rxpos].length = poolhd_op->rx_size; + *(poolhd_op->rx_wt_p) = *(poolhd_op->rx_wt_p) + 1; + pr_debug("%s: addr=%x\n", __func__, ring->p_rxblks[rxpos].addr); + + if ((int)(*(poolhd_op->rx_wt_p) - *(poolhd_op->rx_rd_p)) == 1 && + sblock->state == SBLOCK_STATE_READY) { + /* send smsg to notify the peer side */ + send_event = true; + } + + index = sblock_get_index((blk->addr - ring->rxblk_virt), + sblock->rxblksz); + ring->rxrecord[index] = SBLOCK_BLK_STATE_DONE; + + spin_unlock_irqrestore(&ring->p_rxlock, flags); + + /* request in sblock_receive, release here */ + sipc_smem_release_resource(ring->rx_pms, dst); + + /* + * smsg_send may caused schedule, + * can't be called in spinlock protected context. + */ + if (send_event) { + smsg_set(&mevt, channel, + SMSG_TYPE_EVENT, + SMSG_EVENT_SBLOCK_RELEASE, + 0); + smsg_send(dst, &mevt, -1); + } + + return 0; +} +EXPORT_SYMBOL_GPL(sblock_release); + +unsigned int sblock_poll_wait(u8 dst, u8 channel, + struct file *filp, poll_table *wait) +{ + struct sblock_mgr *sblock; + struct sblock_ring *ring; + struct sblock_ring_header_op *poolhd_op; + struct sblock_ring_header_op *ringhd_op; + unsigned int mask = 0; + u8 ch_index; + + ch_index = sipc_channel2index(channel); + if (ch_index == INVALID_CHANEL_INDEX) { + pr_err("%s:channel %d invalid!\n", __func__, channel); + return mask; + } + sblock = sblocks[dst][ch_index]; + + if (!sblock) + return mask; + ring = sblock->ring; + poolhd_op = &(ring->header_op.poolhd_op); + ringhd_op = &(ring->header_op.ringhd_op); + + if (sblock->state != SBLOCK_STATE_READY) { + pr_debug("%s:sblock-%d-%d not ready to poll !\n", + __func__, dst, channel); + return mask; + } + poll_wait(filp, &ring->recvwait, wait); + poll_wait(filp, &ring->getwait, wait); + + if (sblock_has_data(sblock, true)) + mask |= POLLOUT | POLLWRNORM; + + if (sblock_has_data(sblock, false)) + mask |= POLLIN | POLLRDNORM; + + return mask; +} +EXPORT_SYMBOL_GPL(sblock_poll_wait); + +int sblock_query(u8 dst, u8 channel) +{ + struct sblock_mgr *sblock = NULL; + u8 ch_index; + + ch_index = sipc_channel2index(channel); + if (ch_index == INVALID_CHANEL_INDEX) { + pr_err("%s:channel %d invalid!\n", __func__, channel); + return -EINVAL; + } + + sblock = sblocks[dst][ch_index]; + if (!sblock) + return -ENODEV; + if (sblock->state != SBLOCK_STATE_READY) { + pr_debug("%s:sblock-%d-%d not ready!\n", __func__, dst, channel); + return -EINVAL; + } + return 0; +} +EXPORT_SYMBOL_GPL(sblock_query); + +#if defined(CONFIG_DEBUG_FS) +static int sblock_debug_show(struct seq_file *m, void *private) +{ + struct smsg_ipc *sipc = NULL; + struct sblock_mgr *sblock; + struct sblock_ring *ring; + struct sblock_ring_header_op *poolhd_op; + struct sblock_ring_header_op *ringhd_op; + int i, j; + + for (i = 0; i < SIPC_ID_NR; i++) { + sipc = smsg_ipcs[i]; + if (!sipc) + continue; + + /* must request resource before read or write share memory */ + if (sipc_smem_request_resource(sipc->sipc_pms, + sipc->dst, 1000) < 0) + continue; + + for (j = 0; j < SMSG_VALID_CH_NR; j++) { + sblock = sblocks[i][j]; + if (!sblock) + continue; + + sipc_debug_putline(m, '*', 170); + seq_printf(m, "sblock dst %d, channel: %3d, state: %d, smem_virt: 0x%lx, smem_addr: 0x%0x, dst_smem_addr: 0x%0x, smem_size: 0x%0x, txblksz: %d, rxblksz: %d\n", + sblock->dst, + sblock->channel, + sblock->state, + (unsigned long)sblock->smem_virt, + sblock->smem_addr, + sblock->dst_smem_addr, + sblock->smem_size, + sblock->txblksz, + sblock->rxblksz); + + /* + * in precfg channel, the ring pinter can be null + * before the the block manger has been created + * and ring->header pointer can also be null + * before the block handshake with host, + * so must add null pointer protect here. + */ + ring = sblock->ring; + if (!ring || !ring->header) + continue; + + poolhd_op = &(ring->header_op.poolhd_op); + ringhd_op = &(ring->header_op.ringhd_op); + seq_printf(m, "sblock ring: txblk_virt :0x%lx, rxblk_virt :0x%lx, poll_mask=0x%x\n", + (unsigned long)ring->txblk_virt, + (unsigned long)ring->rxblk_virt, + ring->poll_mask); + seq_printf(m, "sblock ring header: rxblk_addr :0x%0x, rxblk_rdptr :0x%0x, rxblk_wrptr :0x%0x, rxblk_size :%d, rxblk_count :%d, rxblk_blks: 0x%0x\n", + ringhd_op->rx_addr, *(ringhd_op->rx_rd_p), + *(ringhd_op->rx_wt_p), ringhd_op->rx_size, + ringhd_op->rx_count, ringhd_op->rx_blks); + seq_printf(m, "sblock ring header: txblk_addr :0x%0x, txblk_rdptr :0x%0x, txblk_wrptr :0x%0x, txblk_size :%d, txblk_count :%d, txblk_blks: 0x%0x\n", + ringhd_op->tx_addr, *(ringhd_op->tx_rd_p), + *(ringhd_op->tx_wt_p), ringhd_op->tx_size, + ringhd_op->tx_count, ringhd_op->tx_blks); + seq_printf(m, "sblock pool header: rxblk_addr :0x%0x, rxblk_rdptr :0x%0x, rxblk_wrptr :0x%0x, rxblk_size :%d, rxpool_count :%d, rxblk_blks: 0x%0x\n", + poolhd_op->rx_addr, *(poolhd_op->rx_rd_p), + *(poolhd_op->rx_wt_p), poolhd_op->rx_size, + poolhd_op->rx_count, poolhd_op->rx_blks); + seq_printf(m, "sblock pool header: txblk_addr :0x%0x, txblk_rdptr :0x%0x, txblk_wrptr :0x%0x, txblk_size :%d, txpool_count :%d, txblk_blks: 0x%0x\n", + poolhd_op->tx_addr, *(poolhd_op->tx_rd_p), + *(poolhd_op->tx_wt_p), poolhd_op->tx_size, + poolhd_op->tx_count, poolhd_op->tx_blks); + } + /* release resource */ + sipc_smem_release_resource(sipc->sipc_pms, sipc->dst); + } + + return 0; +} + +static int sblock_debug_open(struct inode *inode, struct file *file) +{ + return single_open(file, sblock_debug_show, inode->i_private); +} + +static const struct file_operations sblock_debug_fops = { + .open = sblock_debug_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +int sblock_init_debugfs(void *root) +{ + if (!root) + return -ENXIO; + debugfs_create_file("sblock", 0444, + (struct dentry *)root, + NULL, &sblock_debug_fops); + return 0; +} +EXPORT_SYMBOL_GPL(sblock_init_debugfs); + +#endif /* CONFIG_DEBUG_FS */ + + +MODULE_AUTHOR("Chen Gaopeng"); +MODULE_DESCRIPTION("SIPC/SBLOCK driver"); +MODULE_LICENSE("GPL v2"); diff --git a/package/wwan/driver/quectel_SRPD_PCIE/src/sipc/sblock.h b/package/wwan/driver/quectel_SRPD_PCIE/src/sipc/sblock.h new file mode 100644 index 000000000..13d67afa2 --- /dev/null +++ b/package/wwan/driver/quectel_SRPD_PCIE/src/sipc/sblock.h @@ -0,0 +1,173 @@ +/* + * Copyright (C) 2019 Spreadtrum Communications Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __SBLOCK_H +#define __SBLOCK_H + +/* flag for CMD/DONE msg type */ +#define SMSG_CMD_SBLOCK_INIT 0x1 +#define SMSG_DONE_SBLOCK_INIT 0x2 + +/* flag for EVENT msg type */ +#define SMSG_EVENT_SBLOCK_SEND 0x1 +#define SMSG_EVENT_SBLOCK_RELEASE 0x2 + +#define SBLOCK_STATE_IDLE 0 +#define SBLOCK_STATE_READY 1 + +#define SBLOCK_BLK_STATE_DONE 0 +#define SBLOCK_BLK_STATE_PENDING 1 + +struct sblock_blks { + u32 addr; /*phy address*/ + u32 length; +}; + +/* ring block header */ +struct sblock_ring_header { + /* get|send-block info */ + u32 txblk_addr; /* tx blocks start addr */ + u32 txblk_count; /* tx blocks num */ + u32 txblk_size; /* one tx block size */ + u32 txblk_blks; /* tx_ring or tx_pool start addr */ + u32 txblk_rdptr; /* tx_ring or tx_pool read point */ + u32 txblk_wrptr; /* tx_ring or tx_pool write point */ + + /* release|recv-block info */ + u32 rxblk_addr; + u32 rxblk_count; + u32 rxblk_size; + u32 rxblk_blks; + u32 rxblk_rdptr; + u32 rxblk_wrptr; +}; + +struct sblock_header { + struct sblock_ring_header ring; + struct sblock_ring_header pool; +}; + +struct sblock_ring_header_op { + /* + * this points point to share memory + * for update rdptr and wtptr on share memory + */ + volatile u32 *tx_rd_p; + volatile u32 *tx_wt_p; + volatile u32 *rx_rd_p; + volatile u32 *rx_wt_p; + + /* + * this member copy from share memory, + * because this contents will not change on share memory + */ + u32 tx_addr; /* txblk_addr */ + u32 tx_count; /* txblk_count */ + u32 tx_size; /* txblk_size */ + u32 tx_blks; /* txblk_blks */ + u32 rx_addr; + u32 rx_count; + u32 rx_size; + u32 rx_blks; +}; + +struct sblock_header_op { + struct sblock_ring_header_op ringhd_op; + struct sblock_ring_header_op poolhd_op; +}; + +struct sblock_ring { + struct sblock_header *header; + struct sblock_header_op header_op; + + struct sprd_pms *tx_pms; + struct sprd_pms *rx_pms; + char tx_pms_name[20]; + char rx_pms_name[20]; + + void *txblk_virt; /* virt of header->txblk_addr */ + void *rxblk_virt; /* virt of header->rxblk_addr */ + + /* virt of header->ring->txblk_blks */ + struct sblock_blks *r_txblks; + /* virt of header->ring->rxblk_blks */ + struct sblock_blks *r_rxblks; + /* virt of header->pool->txblk_blks */ + struct sblock_blks *p_txblks; + /* virt of header->pool->rxblk_blks */ + struct sblock_blks *p_rxblks; + + unsigned int poll_mask; + /* protect the poll_mask menber */ + spinlock_t poll_lock; + + int *txrecord; /* record the state of every txblk */ + int *rxrecord; /* record the state of every rxblk */ + int yell; /* need to notify cp */ + spinlock_t r_txlock; /* send */ + spinlock_t r_rxlock; /* recv */ + spinlock_t p_txlock; /* get */ + spinlock_t p_rxlock; /* release */ + + wait_queue_head_t getwait; + wait_queue_head_t recvwait; +}; + +struct sblock_mgr { + u8 dst; + u8 channel; + int pre_cfg; /*support in host mode only */ + u32 state; + + void *smem_virt; + u32 smem_addr; + u32 smem_addr_debug; + u32 smem_size; + u32 dst_smem_addr; + + /* + * this address stored in share memory, + * be used to calculte the block virt address. + * in host mode, it is client physial address(dst_smem_addr), + * in client mode, it is own physial address(smem_addr). + */ + u32 stored_smem_addr; + + u32 txblksz; + u32 rxblksz; + u32 txblknum; + u32 rxblknum; + + struct sblock_ring *ring; + struct task_struct *thread; + + void (*handler)(int event, void *data); + void *data; +}; + +#ifdef CONFIG_64BIT +#define SBLOCK_ALIGN_BYTES (8) +#else +#define SBLOCK_ALIGN_BYTES (4) +#endif + +static inline u32 sblock_get_index(u32 x, u32 y) +{ + return (x / y); +} + +static inline u32 sblock_get_ringpos(u32 x, u32 y) +{ + return is_power_of_2(y) ? (x & (y - 1)) : (x % y); +} +#endif diff --git a/package/wwan/driver/quectel_SRPD_PCIE/src/sipc/sbuf.c b/package/wwan/driver/quectel_SRPD_PCIE/src/sipc/sbuf.c new file mode 100644 index 000000000..2a50b72d6 --- /dev/null +++ b/package/wwan/driver/quectel_SRPD_PCIE/src/sipc/sbuf.c @@ -0,0 +1,1755 @@ +/* + * Copyright (C) 2019 Spreadtrum Communications Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 4,4,60 )) +#include +#include +#endif + +#include "../include/sipc.h" +#include "sipc_priv.h" +#include "sbuf.h" + +#if defined(CONFIG_DEBUG_FS) +#include "sipc_debugfs.h" +#endif + +#define VOLA_SBUF_SMEM volatile struct sbuf_smem_header +#define VOLA_SBUF_RING volatile struct sbuf_ring_header + +struct name_node { + struct list_head list; + char comm[TASK_COMM_LEN]; + pid_t pid; + u8 latest; +}; + +union sbuf_buf { + void *buf; + void __user *ubuf; +}; + +enum task_type { + TASK_RXWAIT = 0, + TASK_TXWAIT, + TASK_SELECT +}; + +static struct sbuf_mgr *sbufs[SIPC_ID_NR][SMSG_VALID_CH_NR]; + +static bool sbuf_has_data(struct sbuf_ring *ring, u8 dst, bool tx) +{ + struct sbuf_ring_header_op *hd_op = &ring->header_op; + bool has_data; + unsigned long flags; + + /* + * if it is local share memory, + * check the read and write point directly. + */ + if (smsg_ipcs[dst]->smem_type == SMEM_LOCAL) { + if (tx) + return (int)(BL_READL(hd_op->tx_wt_p) - BL_READL(hd_op->tx_rd_p)) < + hd_op->tx_size; + + return BL_READL(hd_op->rx_wt_p) != BL_READL(hd_op->rx_rd_p); + } + + /* + * if it is remote share memmory read the poll_mask, + * this situation requires that the poll_mask must be accurate enough. + */ + spin_lock_irqsave(&ring->poll_lock, flags); + if (tx) + has_data = ring->poll_mask & (POLLOUT | POLLWRNORM); + else + has_data = ring->poll_mask & (POLLIN | POLLRDNORM); + spin_unlock_irqrestore(&ring->poll_lock, flags); + + return has_data; +} + +static bool sbuf_is_task_pointer(const void *ptr) +{ + struct task_struct *task; + struct thread_info *thread_info; + + task = (struct task_struct *)ptr; + if (IS_ERR_OR_NULL(task) || !virt_addr_valid(task)) + return false; + +#ifndef CONFIG_THREAD_INFO_IN_TASK + /* in this case thread_info is in the same addres with stack thread_union*/ + if (IS_ERR_OR_NULL(task->stack) || !virt_addr_valid(task->stack)) + return false; +#endif + + thread_info = task_thread_info(task); + + if (IS_ERR_OR_NULL(thread_info) || !virt_addr_valid(thread_info)) + return false; + + return true; +} + +static struct task_struct *sbuf_wait_get_task( +#if (LINUX_VERSION_CODE <= KERNEL_VERSION( 4,10,0 )) + wait_queue_t *pos, +#else + wait_queue_entry_t *pos, +#endif + u32 *b_select) +{ + struct task_struct *task; + struct poll_wqueues *table; + + if (!pos->private) + return NULL; + + /* if the private is put into wait list by sbuf_read, the struct of + * pos->private is struct task_struct + * if the private is put into list by sbuf_poll, the struct of + * pos->private is struct poll_wqueues + */ + + /* firstly, try struct poll_wqueues */ + table = (struct poll_wqueues *)pos->private; + task = table->polling_task; + if (sbuf_is_task_pointer(task)) { + *b_select = 1; + return task; + } + + /* firstly, try convert it with the struct task_struct */ + task = (struct task_struct *)pos->private; + + if (sbuf_is_task_pointer(task)) { + *b_select = 0; + return task; + } + + return NULL; +} + +#if defined(SIPC_DEBUG_SBUF_RDWT_OWNER) +static void sbuf_record_rdwt_owner(struct sbuf_ring *ring, int b_rx) +{ + int b_add; + int cnt = 0; + struct name_node *pos = NULL; + struct name_node *temp = NULL; + struct list_head *owner_list; + unsigned long flags; + + b_add = 1; + owner_list = b_rx ? (&ring->rx_list) : (&ring->tx_list); + + spin_lock_irqsave(&ring->rxwait.lock, flags); + list_for_each_entry(pos, owner_list, list) { + cnt++; + if (pos->pid == current->pid) { + b_add = 0; + pos->latest = 1; + continue; + } + if (pos->latest) + pos->latest = 0; + } + spin_unlock_irqrestore(&ring->rxwait.lock, flags); + + if (b_add) { + /* delete head next */ + if (cnt == MAX_RECORD_CNT) { + temp = list_first_entry(owner_list, + struct name_node, list); + list_del(&temp->list); + kfree(temp); + } + + pos = kzalloc(sizeof(*pos), GFP_KERNEL); + if (pos) { + memcpy(pos->comm, current->comm, TASK_COMM_LEN); + pos->pid = current->pid; + pos->latest = 1; + spin_lock_irqsave(&ring->rxwait.lock, flags); + list_add_tail(&pos->list, owner_list); + spin_unlock_irqrestore(&ring->rxwait.lock, flags); + } + } +} + +static void sbuf_destroy_rdwt_owner(struct sbuf_ring *ring) +{ + struct name_node *pos, *temp; + unsigned long flags; + + spin_lock_irqsave(&ring->rxwait.lock, flags); + /* free task node */ + list_for_each_entry_safe(pos, + temp, + &ring->rx_list, + list) { + list_del(&pos->list); + kfree(pos); + } + + list_for_each_entry_safe(pos, + temp, + &ring->tx_list, + list) { + list_del(&pos->list); + kfree(pos); + } + spin_unlock_irqrestore(&ring->rxwait.lock, flags); +} +#endif + +static void sbuf_skip_old_data(struct sbuf_mgr *sbuf) +{ + struct sbuf_ring *ring = NULL; + struct sbuf_ring_header_op *hd_op = NULL; + u32 i, v; + unsigned long flags; + + ring = &sbuf->rings[0]; + /* must reques resource before read or write share memory */ + if (sipc_smem_request_resource(ring->rx_pms, sbuf->dst, -1) < 0) + return; + + for (i = 0; i < sbuf->ringnr; i++) { + ring = &sbuf->rings[i]; + hd_op = &ring->header_op; + + /* clean sbuf tx ring , sbuf tx ring no need to clear */ + /* *(hd_op->tx_wt_p) = *(hd_op->tx_rd_p); */ + /* clean sbuf rx ring */ + v = BL_READL(hd_op->rx_wt_p); + BL_WRITEL(v, hd_op->rx_rd_p); + /* restore write mask. */ + spin_lock_irqsave(&ring->poll_lock, flags); + ring->poll_mask = POLLOUT | POLLWRNORM; + spin_unlock_irqrestore(&ring->poll_lock, flags); + } + ring = &sbuf->rings[0]; + /* release resource */ + sipc_smem_release_resource(ring->rx_pms, sbuf->dst); +} + +static void sbuf_pms_init(struct sbuf_ring *ring, + uint8_t dst, uint8_t ch, int index) +{ + ring->need_wake_lock = true; + sprintf(ring->tx_pms_name, "sbuf-%d-%d-%d-tx", dst, ch, index); + ring->tx_pms = sprd_pms_create(dst, ring->tx_pms_name, false); + if (!ring->tx_pms) + pr_warn("create pms %s failed!\n", ring->tx_pms_name); + + sprintf(ring->rx_pms_name, "sbuf-%d-%d-%d-rx", dst, ch, index); + ring->rx_pms = sprd_pms_create(dst, ring->rx_pms_name, false); + if (!ring->rx_pms) + pr_warn("create pms %s failed!\n", ring->rx_pms_name); +} + +static void sbuf_comm_init(struct sbuf_mgr *sbuf) +{ + u32 bufnum = sbuf->ringnr; + int i; + struct sbuf_ring *ring; + + for (i = 0; i < bufnum; i++) { + ring = &sbuf->rings[i]; + init_waitqueue_head(&ring->txwait); + init_waitqueue_head(&ring->rxwait); +#if defined(SIPC_DEBUG_SBUF_RDWT_OWNER) + INIT_LIST_HEAD(&ring->tx_list); + INIT_LIST_HEAD(&ring->rx_list); +#endif + mutex_init(&ring->txlock); + mutex_init(&ring->rxlock); + spin_lock_init(&ring->poll_lock); + spin_lock_init(&ring->rxwait.lock); + + /* init, set write mask. */ + ring->poll_mask = POLLOUT | POLLWRNORM; + sbuf_pms_init(ring, sbuf->dst, sbuf->channel, i); + } +} + +static int sbuf_host_init(struct smsg_ipc *sipc, struct sbuf_mgr *sbuf, + u32 bufnum, u32 txbufsize, u32 rxbufsize) +{ + VOLA_SBUF_SMEM *smem; + VOLA_SBUF_RING *ringhd; + struct sbuf_ring_header_op *hd_op; + int hsize, i, rval; + phys_addr_t offset = 0; + u8 dst = sbuf->dst; + struct sbuf_ring *ring; + u32 txbuf_addr; + + sbuf->ringnr = bufnum; + + /* allocate smem */ + hsize = sizeof(struct sbuf_smem_header) + + sizeof(struct sbuf_ring_header) * bufnum; + sbuf->smem_size = hsize + (txbufsize + rxbufsize) * bufnum; + sbuf->smem_addr = smem_alloc(dst, sbuf->smem_size); + if (!sbuf->smem_addr) { + pr_err("%s: channel %d-%d, Failed to allocate smem for sbuf\n", + __func__, sbuf->dst, sbuf->channel); + return -ENOMEM; + } + sbuf->dst_smem_addr = sbuf->smem_addr - sipc->smem_base + + sipc->dst_smem_base; + + pr_debug("%s: channel %d-%d, smem_addr=0x%x, smem_size=0x%x, dst_smem_addr=0x%x\n", + __func__, + sbuf->dst, + sbuf->channel, + sbuf->smem_addr, + sbuf->smem_size, + sbuf->dst_smem_addr); + +#ifdef CONFIG_PHYS_ADDR_T_64BIT + offset = sipc->high_offset; + offset = offset << 32; +#endif + + pr_info("%s: channel %d-%d, offset = 0x%lx!\n", + __func__, sbuf->dst, sbuf->channel, (unsigned long)offset); + sbuf->smem_virt = shmem_ram_vmap_nocache(dst, + sbuf->smem_addr + offset, + sbuf->smem_size); + if (!sbuf->smem_virt) { + pr_err("%s: channel %d-%d, Failed to map smem for sbuf\n", + __func__, sbuf->dst, sbuf->channel); + smem_free(dst, sbuf->smem_addr, sbuf->smem_size); + return -EFAULT; + } + + /* allocate rings description */ + sbuf->rings = kcalloc(bufnum, sizeof(struct sbuf_ring), GFP_KERNEL); + if (!sbuf->rings) { + smem_free(dst, sbuf->smem_addr, sbuf->smem_size); + shmem_ram_unmap(dst, sbuf->smem_virt); + return -ENOMEM; + } + + /* must request resource before read or write share memory */ + rval = sipc_smem_request_resource(sipc->sipc_pms, sipc->dst, -1); + if (rval < 0) { + smem_free(dst, sbuf->smem_addr, sbuf->smem_size); + shmem_ram_unmap(dst, sbuf->smem_virt); + kfree(sbuf->rings); + return rval; + } + + /* initialize all ring bufs */ + smem = (VOLA_SBUF_SMEM *)sbuf->smem_virt; + BL_SETL(smem->ringnr, bufnum); + for (i = 0; i < bufnum; i++) { + ringhd = (VOLA_SBUF_RING *)&smem->headers[i]; + txbuf_addr = sbuf->dst_smem_addr + hsize + + (txbufsize + rxbufsize) * i; + BL_SETL(ringhd->txbuf_addr, txbuf_addr); + BL_SETL(ringhd->txbuf_size, txbufsize); + BL_SETL(ringhd->txbuf_rdptr, 0); + BL_SETL(ringhd->txbuf_wrptr, 0); + BL_SETL(ringhd->rxbuf_addr, txbuf_addr + txbufsize); + BL_SETL(ringhd->rxbuf_size, rxbufsize); + BL_SETL(ringhd->rxbuf_rdptr, 0); + BL_SETL(ringhd->rxbuf_wrptr, 0); + + ring = &sbuf->rings[i]; + ring->header = ringhd; + ring->txbuf_virt = sbuf->smem_virt + hsize + + (txbufsize + rxbufsize) * i; + ring->rxbuf_virt = ring->txbuf_virt + txbufsize; + /* init header op */ + hd_op = &ring->header_op; + hd_op->rx_rd_p = &ringhd->rxbuf_rdptr; + hd_op->rx_wt_p = &ringhd->rxbuf_wrptr; + hd_op->rx_size = ringhd->rxbuf_size; + hd_op->tx_rd_p = &ringhd->txbuf_rdptr; + hd_op->tx_wt_p = &ringhd->txbuf_wrptr; + hd_op->tx_size = ringhd->txbuf_size; + } + + /* release resource */ + sipc_smem_release_resource(sipc->sipc_pms, sipc->dst); + + sbuf_comm_init(sbuf); + + return 0; +} + +static int sbuf_client_init(struct smsg_ipc *sipc, struct sbuf_mgr *sbuf) +{ + VOLA_SBUF_SMEM *smem; + VOLA_SBUF_RING *ringhd; + struct sbuf_ring_header_op *hd_op; + struct sbuf_ring *ring; + int hsize, i, rval; + u32 txbufsize, rxbufsize; + phys_addr_t offset = 0; + u32 bufnum; + u8 dst = sbuf->dst; + +#ifdef CONFIG_PHYS_ADDR_T_64BIT + offset = sipc->high_offset; + offset = offset << 32; + pr_info("%s: channel %d-%d, offset = 0x%llx!\n", + __func__, sbuf->dst, sbuf->channel, offset); +#endif + + /* get bufnum and bufsize */ + hsize = sizeof(struct sbuf_smem_header) + + sizeof(struct sbuf_ring_header) * 1; + sbuf->smem_virt = shmem_ram_vmap_nocache(dst, + sbuf->smem_addr + offset, + hsize); + if (!sbuf->smem_virt) { + pr_err("%s: channel %d-%d, Failed to map smem for sbuf head\n", + __func__, sbuf->dst, sbuf->channel); + return -EFAULT; + } + smem = (VOLA_SBUF_SMEM *)sbuf->smem_virt; + sbuf->ringnr = smem->ringnr; + bufnum = sbuf->ringnr; + ringhd = (VOLA_SBUF_RING *)&smem->headers[0]; + txbufsize = BL_GETL(ringhd->rxbuf_size); + rxbufsize = BL_GETL(ringhd->txbuf_size); + hsize = sizeof(struct sbuf_smem_header) + + sizeof(struct sbuf_ring_header) * bufnum; + sbuf->smem_size = hsize + (txbufsize + rxbufsize) * bufnum; + pr_debug("%s: channel %d-%d, txbufsize = 0x%x, rxbufsize = 0x%x!\n", + __func__, sbuf->dst, sbuf->channel, txbufsize, rxbufsize); + pr_debug("%s: channel %d-%d, smem_size = 0x%x, ringnr = %d!\n", + __func__, sbuf->dst, sbuf->channel, sbuf->smem_size, bufnum); + shmem_ram_unmap(dst, sbuf->smem_virt); + + /* alloc debug smem */ + sbuf->smem_addr_debug = smem_alloc(dst, sbuf->smem_size); + if (!sbuf->smem_addr_debug) { + pr_err("%s: channel %d-%d,Failed to allocate debug smem for sbuf\n", + __func__, sbuf->dst, sbuf->channel); + return -ENOMEM; + } + + /* get smem virtual address */ + sbuf->smem_virt = shmem_ram_vmap_nocache(dst, + sbuf->smem_addr + offset, + sbuf->smem_size); + if (!sbuf->smem_virt) { + pr_err("%s: channel %d-%d,Failed to map smem for sbuf\n", + __func__, sbuf->dst, sbuf->channel); + smem_free(dst, sbuf->smem_addr_debug, sbuf->smem_size); + return -EFAULT; + } + + /* allocate rings description */ + sbuf->rings = kcalloc(bufnum, sizeof(struct sbuf_ring), GFP_KERNEL); + if (!sbuf->rings) { + smem_free(dst, sbuf->smem_addr_debug, sbuf->smem_size); + shmem_ram_unmap(dst, sbuf->smem_virt); + return -ENOMEM; + } + pr_info("%s: channel %d-%d, ringns = 0x%p!\n", + __func__, sbuf->dst, sbuf->channel, sbuf->rings); + + /* must request resource before read or write share memory */ + rval = sipc_smem_request_resource(sipc->sipc_pms, sipc->dst, -1); + if (rval < 0) { + smem_free(dst, sbuf->smem_addr, sbuf->smem_size); + shmem_ram_unmap(dst, sbuf->smem_virt); + kfree(sbuf->rings); + return rval; + } + + /* initialize all ring bufs */ + smem = (VOLA_SBUF_SMEM *)sbuf->smem_virt; + for (i = 0; i < bufnum; i++) { + ringhd = (VOLA_SBUF_RING *)&smem->headers[i]; + ring = &sbuf->rings[i]; + ring->header = ringhd; + /* host txbuf_addr */ + ring->rxbuf_virt = sbuf->smem_virt + hsize + + (txbufsize + rxbufsize) * i; + /* host rxbuf_addr */ + ring->txbuf_virt = ring->rxbuf_virt + rxbufsize; + /* init header op , client mode, rx <==> tx */ + hd_op = &ring->header_op; + hd_op->rx_rd_p = &ringhd->txbuf_rdptr; + hd_op->rx_wt_p = &ringhd->txbuf_wrptr; + hd_op->rx_size = ringhd->txbuf_size; + hd_op->tx_rd_p = &ringhd->rxbuf_rdptr; + hd_op->tx_wt_p = &ringhd->rxbuf_wrptr; + hd_op->tx_size = ringhd->rxbuf_size; + } + + /* release resource */ + sipc_smem_release_resource(sipc->sipc_pms, sipc->dst); + + sbuf_comm_init(sbuf); + + return 0; +} + +static int sbuf_thread(void *data) +{ + struct sbuf_mgr *sbuf = data; + struct sbuf_ring *ring; + struct smsg mcmd, mrecv; + int rval, bufid; + struct smsg_ipc *sipc; + unsigned long flags; + + /* since the channel open may hang, we call it in the sbuf thread */ + rval = smsg_ch_open(sbuf->dst, sbuf->channel, -1); + if (rval != 0) { + pr_err("Failed to open channel %d\n", sbuf->channel); + /* assign NULL to thread poniter as failed to open channel */ + sbuf->thread = NULL; + return rval; + } + + /* if client, send SMSG_CMD_SBUF_INIT, wait sbuf SMSG_DONE_SBUF_INIT */ + sipc = smsg_ipcs[sbuf->dst]; + if (sipc->client) { + smsg_set(&mcmd, sbuf->channel, SMSG_TYPE_CMD, + SMSG_CMD_SBUF_INIT, 0); + smsg_send(sbuf->dst, &mcmd, -1); + do { + smsg_set(&mrecv, sbuf->channel, 0, 0, 0); + rval = smsg_recv(sbuf->dst, &mrecv, -1); + if (rval != 0) { + sbuf->thread = NULL; + return rval; + } + } while (mrecv.type != SMSG_TYPE_DONE || + mrecv.flag != SMSG_DONE_SBUF_INIT); + sbuf->smem_addr = mrecv.value; + pr_info("%s: channel %d-%d, done_sbuf_init, address = 0x%x!\n", + __func__, sbuf->dst, sbuf->channel, sbuf->smem_addr); + if (sbuf_client_init(sipc, sbuf)) { + sbuf->thread = NULL; + return 0; + } + sbuf->state = SBUF_STATE_READY; + } + + /* sbuf init done, handle the ring rx events */ + while (!kthread_should_stop()) { + /* monitor sbuf rdptr/wrptr update smsg */ + smsg_set(&mrecv, sbuf->channel, 0, 0, 0); + rval = smsg_recv(sbuf->dst, &mrecv, -1); + if (rval == -EIO) { + /* channel state is free */ + msleep(20); + continue; + } + + pr_debug("sbuf thread recv msg: dst=%d, channel=%d, type=%d, flag=0x%04x, value=0x%08x\n", + sbuf->dst, + sbuf->channel, + mrecv.type, + mrecv.flag, + mrecv.value); + + switch (mrecv.type) { + case SMSG_TYPE_OPEN: + pr_info("%s: channel %d-%d, state=%d, recv open msg!\n", + __func__, sbuf->dst, + sbuf->channel, sbuf->state); + if (sipc->client) + break; + + /* if channel state is already reay, reopen it + * (such as modem reset), we must skip the old + * buf data , than give open ack and reset state + * to idle + */ + if (sbuf->state == SBUF_STATE_READY) { + sbuf_skip_old_data(sbuf); + sbuf->state = SBUF_STATE_IDLE; + } + /* handle channel open */ + smsg_open_ack(sbuf->dst, sbuf->channel); + break; + case SMSG_TYPE_CLOSE: + /* handle channel close */ + sbuf_skip_old_data(sbuf); + smsg_close_ack(sbuf->dst, sbuf->channel); + sbuf->state = SBUF_STATE_IDLE; + break; + case SMSG_TYPE_CMD: + pr_info("%s: channel %d-%d state = %d, recv cmd msg, flag = %d!\n", + __func__, sbuf->dst, sbuf->channel, + sbuf->state, mrecv.flag); + if (sipc->client) + break; + + /* respond cmd done for sbuf init only state is idle */ + if (sbuf->state == SBUF_STATE_IDLE && + mrecv.flag == SMSG_CMD_SBUF_INIT) { + smsg_set(&mcmd, + sbuf->channel, + SMSG_TYPE_DONE, + SMSG_DONE_SBUF_INIT, + sbuf->dst_smem_addr); + smsg_send(sbuf->dst, &mcmd, -1); + sbuf->state = SBUF_STATE_READY; + for (bufid = 0; bufid < sbuf->ringnr; bufid++) { + ring = &sbuf->rings[bufid]; + if (ring->handler) + ring->handler(SBUF_NOTIFY_READY, + ring->data); + } + } + break; + case SMSG_TYPE_EVENT: + bufid = mrecv.value; + WARN_ON(bufid >= sbuf->ringnr); + ring = &sbuf->rings[bufid]; + switch (mrecv.flag) { + case SMSG_EVENT_SBUF_RDPTR: + if (ring->need_wake_lock) + sprd_pms_request_wakelock_period(ring->tx_pms, + 500); + /* set write mask. */ + spin_lock_irqsave(&ring->poll_lock, flags); + ring->poll_mask |= POLLOUT | POLLWRNORM; + spin_unlock_irqrestore(&ring->poll_lock, flags); + wake_up_interruptible_all(&ring->txwait); + if (ring->handler) + ring->handler(SBUF_NOTIFY_WRITE, + ring->data); + break; + case SMSG_EVENT_SBUF_WRPTR: + /* set read mask. */ + spin_lock_irqsave(&ring->poll_lock, flags); + ring->poll_mask |= POLLIN | POLLRDNORM; + spin_unlock_irqrestore(&ring->poll_lock, flags); + + if (ring->need_wake_lock) + sprd_pms_request_wakelock_period(ring->rx_pms, + 500); + wake_up_interruptible_all(&ring->rxwait); + if (ring->handler) + ring->handler(SBUF_NOTIFY_READ, + ring->data); + break; + default: + rval = 1; + break; + } + break; + default: + rval = 1; + break; + }; + + if (rval) { + pr_info("non-handled sbuf msg: %d-%d, %d, %d, %d\n", + sbuf->dst, + sbuf->channel, + mrecv.type, + mrecv.flag, + mrecv.value); + rval = 0; + } + /* unlock sipc channel wake lock */ + smsg_ch_wake_unlock(sbuf->dst, sbuf->channel); + } + + return 0; +} + + +int sbuf_create(u8 dst, u8 channel, u32 bufnum, u32 txbufsize, u32 rxbufsize) +{ + struct sbuf_mgr *sbuf; + u8 ch_index; + int ret; + struct smsg_ipc *sipc = NULL; + struct sched_param param = {.sched_priority = 10}; + + sipc = smsg_ipcs[dst]; + ch_index = sipc_channel2index(channel); + if (ch_index == INVALID_CHANEL_INDEX) { + pr_err("%s:channel %d invalid!\n", __func__, channel); + return -EINVAL; + } + + pr_debug("%s dst=%d, chanel=%d, bufnum=%d, txbufsize=0x%x, rxbufsize=0x%x\n", + __func__, + dst, + channel, + bufnum, + txbufsize, + rxbufsize); + + if (dst >= SIPC_ID_NR || !sipc) { + pr_err("%s: dst = %d is invalid\n", __func__, dst); + return -EINVAL; + } + + sbuf = kzalloc(sizeof(*sbuf), GFP_KERNEL); + if (!sbuf) + return -ENOMEM; + + sbuf->state = SBUF_STATE_IDLE; + sbuf->dst = dst; + sbuf->channel = channel; + + /* The dst smem type is pcie, need force send smsg in sbuf_write. */ + if (sipc->type == SIPC_BASE_PCIE && sipc->smem_type == SMEM_LOCAL) + sbuf->force_send = true; + + if (!sipc->client) { + ret = sbuf_host_init(sipc, sbuf, bufnum, txbufsize, rxbufsize); + if (ret) { + kfree(sbuf); + return ret; + } + } + + sbuf->thread = kthread_create(sbuf_thread, sbuf, + "sbuf-%d-%d", dst, channel); + if (IS_ERR(sbuf->thread)) { + pr_err("Failed to create kthread: sbuf-%d-%d\n", dst, channel); + if (!sipc->client) { + kfree(sbuf->rings); + shmem_ram_unmap(dst, sbuf->smem_virt); + smem_free(dst, sbuf->smem_addr, sbuf->smem_size); + } + ret = PTR_ERR(sbuf->thread); + kfree(sbuf); + return ret; + } + + sbufs[dst][ch_index] = sbuf; + + /*set the thread as a real time thread, and its priority is 10*/ + sched_setscheduler(sbuf->thread, SCHED_FIFO, ¶m); + wake_up_process(sbuf->thread); + + return 0; +} +EXPORT_SYMBOL_GPL(sbuf_create); + +void sbuf_set_no_need_wake_lock(u8 dst, u8 channel, u32 bufnum) +{ + u8 ch_index; + struct sbuf_mgr *sbuf; + struct sbuf_ring *ring = NULL; + + ch_index = sipc_channel2index(channel); + if (ch_index == INVALID_CHANEL_INDEX) { + pr_err("%s:channel %d invalid!\n", __func__, channel); + return; + } + + sbuf = sbufs[dst][ch_index]; + if (!sbuf || sbuf->ringnr <= bufnum) + return; + + ring = &sbuf->rings[bufnum]; + ring->need_wake_lock = false; +} +EXPORT_SYMBOL_GPL(sbuf_set_no_need_wake_lock); + + +void sbuf_down(u8 dst, u8 channel) +{ + struct sbuf_mgr *sbuf; + u8 ch_index; + int i; + ch_index = sipc_channel2index(channel); + if (ch_index == INVALID_CHANEL_INDEX) { + pr_err("%s:channel %d invalid!\n", __func__, channel); + return; + } + + sbuf = sbufs[dst][ch_index]; + if (!sbuf) + return; + sbuf->state = SBUF_STATE_IDLE; + if (sbuf->rings) { + for (i = 0; i < sbuf->ringnr; i++) { + wake_up_interruptible_all(&sbuf->rings[i].txwait); + wake_up_interruptible_all(&sbuf->rings[i].rxwait); + } + } + pr_info("%s: channel=%d sbuf down success\n", __func__, channel); +} + +void sbuf_destroy(u8 dst, u8 channel) +{ + struct sbuf_mgr *sbuf; + int i; + u8 ch_index; + struct smsg_ipc *sipc; + + ch_index = sipc_channel2index(channel); + if (ch_index == INVALID_CHANEL_INDEX) { + pr_err("%s:channel %d invalid!\n", __func__, channel); + return; + } + + sbuf = sbufs[dst][ch_index]; + if (!sbuf) + return; + + sbuf->state = SBUF_STATE_IDLE; + smsg_ch_close(dst, channel, -1); + + /* stop sbuf thread if it's created successfully and still alive */ + if (!IS_ERR_OR_NULL(sbuf->thread)) + kthread_stop(sbuf->thread); + + if (sbuf->rings) { + for (i = 0; i < sbuf->ringnr; i++) { + wake_up_interruptible_all(&sbuf->rings[i].txwait); + wake_up_interruptible_all(&sbuf->rings[i].rxwait); +#if defined(SIPC_DEBUG_SBUF_RDWT_OWNER) + sbuf_destroy_rdwt_owner(&sbuf->rings[i]); +#endif + sprd_pms_destroy(sbuf->rings[i].tx_pms); + sprd_pms_destroy(sbuf->rings[i].rx_pms); + } + kfree(sbuf->rings); + } + + if (sbuf->smem_virt) + shmem_ram_unmap(dst, sbuf->smem_virt); + + sipc = smsg_ipcs[dst]; + if (sipc->client) + smem_free(dst, sbuf->smem_addr_debug, sbuf->smem_size); + else + smem_free(dst, sbuf->smem_addr, sbuf->smem_size); + + kfree(sbuf); + + sbufs[dst][ch_index] = NULL; +} +EXPORT_SYMBOL_GPL(sbuf_destroy); + +int sbuf_write(u8 dst, u8 channel, u32 bufid, + void *buf, u32 len, int timeout) +{ + struct sbuf_mgr *sbuf; + struct sbuf_ring *ring = NULL; + struct sbuf_ring_header_op *hd_op; + struct smsg mevt; + void *txpos; + int rval, left, tail, txsize; + u8 ch_index; + union sbuf_buf u_buf; + bool no_data; + unsigned long flags; + u32 v; + + u_buf.buf = buf; + ch_index = sipc_channel2index(channel); + if (ch_index == INVALID_CHANEL_INDEX) { + pr_err("%s:channel %d invalid!\n", __func__, channel); + return -EINVAL; + } + + sbuf = sbufs[dst][ch_index]; + if (!sbuf) + return -ENODEV; + ring = &sbuf->rings[bufid]; + hd_op = &ring->header_op; + if (sbuf->state != SBUF_STATE_READY) { + pr_info("sbuf-%d-%d not ready to write!\n", + dst, channel); + return -ENODEV; + } + + pr_debug("%s: dst=%d, channel=%d, bufid=%d, len=%d, timeout=%d\n", + __func__, + dst, + channel, + bufid, + len, + timeout); + + rval = 0; + left = len; + + if (timeout) { + mutex_lock_interruptible(&ring->txlock); + } else { + if (!mutex_trylock(&ring->txlock)) { + pr_debug("sbuf_read busy, dst=%d, channel=%d, bufid=%d\n", + dst, channel, bufid); + return -EBUSY; + } + } + +#if defined(SIPC_DEBUG_SBUF_RDWT_OWNER) + sbuf_record_rdwt_owner(ring, 0); +#endif + + /* must request resource before read or write share memory */ + rval = sipc_smem_request_resource(ring->tx_pms, sbuf->dst, -1); + if (rval < 0) { + mutex_unlock(&ring->txlock); + return rval; + } + + pr_debug("%s: channel=%d, wrptr=%d, rdptr=%d\n", + __func__, + channel, + BL_READL(hd_op->tx_wt_p), + BL_READL(hd_op->tx_rd_p)); + no_data = ((int)(BL_READL(hd_op->tx_wt_p) - BL_READL(hd_op->tx_rd_p)) >= + hd_op->tx_size); + + /* update write mask */ + spin_lock_irqsave(&ring->poll_lock, flags); + if (no_data) + ring->poll_mask &= ~(POLLOUT | POLLWRNORM); + else + ring->poll_mask |= POLLOUT | POLLWRNORM; + spin_unlock_irqrestore(&ring->poll_lock, flags); + + /* release resource */ + sipc_smem_release_resource(ring->tx_pms, sbuf->dst); + + if (no_data) { + if (timeout == 0) { + pr_info("%s: %d-%d ring %d txbuf is full!\n", + __func__, dst, channel, bufid); + rval = -EBUSY; + } else if (timeout < 0) { + /* wait forever */ + rval = wait_event_interruptible( + ring->txwait, + sbuf_has_data(ring, dst, true) || + sbuf->state == SBUF_STATE_IDLE); + if (rval < 0) + pr_debug("%s: wait interrupted!\n", __func__); + + if (sbuf->state == SBUF_STATE_IDLE) { + pr_err("%s: sbuf state is idle!\n", __func__); + rval = -EIO; + } + } else { + /* wait timeout */ + rval = wait_event_interruptible_timeout( + ring->txwait, + sbuf_has_data(ring, dst, true) || + sbuf->state == SBUF_STATE_IDLE, + timeout); + if (rval < 0) { + pr_debug("%s: wait interrupted!\n", __func__); + } else if (rval == 0) { + pr_info("%s: wait timeout!\n", __func__); + rval = -ETIME; + } + + if (sbuf->state == SBUF_STATE_IDLE) { + pr_err("%s: sbuf state is idle!\n", __func__); + rval = -EIO; + } + } + } + + if (rval < 0) { + mutex_unlock(&ring->txlock); + return rval; + } + + /* must request resource before read or write share memory */ + rval = sipc_smem_request_resource(ring->tx_pms, sbuf->dst, -1); + if (rval < 0) { + mutex_unlock(&ring->txlock); + return rval; + } + + while (left && (int)(BL_READL(hd_op->tx_wt_p) - BL_READL(hd_op->tx_rd_p)) < + hd_op->tx_size && sbuf->state == SBUF_STATE_READY) { + /* calc txpos & txsize */ + txpos = ring->txbuf_virt + + BL_READL(hd_op->tx_wt_p) % hd_op->tx_size; + txsize = hd_op->tx_size - + (int)(BL_READL(hd_op->tx_wt_p) - BL_READL(hd_op->tx_rd_p)); + txsize = min(txsize, left); + + tail = txpos + txsize - (ring->txbuf_virt + hd_op->tx_size); + if (tail > 0) { + /* ring buffer is rounded */ + if ((uintptr_t)u_buf.buf > TASK_SIZE) { + unalign_memcpy(txpos, u_buf.buf, txsize - tail); + unalign_memcpy(ring->txbuf_virt, + u_buf.buf + txsize - tail, tail); + } else { + if (unalign_copy_from_user( + txpos, + u_buf.ubuf, + txsize - tail) || + unalign_copy_from_user( + ring->txbuf_virt, + u_buf.ubuf + txsize - tail, + tail)) { + pr_err("%s:failed to copy from user!\n", + __func__); + rval = -EFAULT; + break; + } + } + } else { + if ((uintptr_t)u_buf.buf > TASK_SIZE) { + unalign_memcpy(txpos, u_buf.buf, txsize); + } else { + /* handle the user space address */ + if (unalign_copy_from_user( + txpos, + u_buf.ubuf, + txsize)) { + pr_err("%s:failed to copy from user!\n", + __func__); + rval = -EFAULT; + break; + } + } + } + + pr_debug("%s: channel=%d, txpos=%p, txsize=%d\n", + __func__, channel, txpos, txsize); + + /* update tx wrptr */ + v = BL_READL(hd_op->tx_wt_p) + txsize; + BL_WRITEL(v, hd_op->tx_wt_p); + /* + * force send be true or tx ringbuf is empty, + * need to notify peer side + */ + if (sbuf->force_send || + BL_READL(hd_op->tx_wt_p) - BL_READL(hd_op->tx_rd_p) == txsize) { + smsg_set(&mevt, channel, + SMSG_TYPE_EVENT, + SMSG_EVENT_SBUF_WRPTR, + bufid); + smsg_send(dst, &mevt, -1); + } + + left -= txsize; + u_buf.buf += txsize; + } + + /* update write mask */ + spin_lock_irqsave(&ring->poll_lock, flags); + if ((int)(BL_READL(hd_op->tx_wt_p) - BL_READL(hd_op->tx_rd_p)) >= + hd_op->tx_size) + ring->poll_mask &= ~(POLLOUT | POLLWRNORM); + else + ring->poll_mask |= POLLOUT | POLLWRNORM; + spin_unlock_irqrestore(&ring->poll_lock, flags); + + /* release resource */ + sipc_smem_release_resource(ring->tx_pms, sbuf->dst); + if (ring->need_wake_lock) + sprd_pms_release_wakelock_later(ring->tx_pms, 20); + + mutex_unlock(&ring->txlock); + + pr_debug("%s: done, channel=%d, len=%d\n", + __func__, channel, len - left); + + if (len == left) + return rval; + else + return (len - left); +} +EXPORT_SYMBOL_GPL(sbuf_write); + +int sbuf_read(u8 dst, u8 channel, u32 bufid, + void *buf, u32 len, int timeout) +{ + struct sbuf_mgr *sbuf; + struct sbuf_ring *ring = NULL; + struct sbuf_ring_header_op *hd_op; + struct smsg mevt; + void *rxpos; + int rval, left, tail, rxsize; + u8 ch_index; + union sbuf_buf u_buf; + bool no_data; + unsigned long flags; + u32 v; + + u_buf.buf = buf; + ch_index = sipc_channel2index(channel); + if (ch_index == INVALID_CHANEL_INDEX) { + pr_err("%s:channel %d invalid!\n", __func__, channel); + return -EINVAL; + } + sbuf = sbufs[dst][ch_index]; + if (!sbuf) + return -ENODEV; + ring = &sbuf->rings[bufid]; + hd_op = &ring->header_op; + + if (sbuf->state != SBUF_STATE_READY) { + pr_debug("sbuf-%d-%d not ready to read!\n", dst, channel); + return -ENODEV; + } + + pr_debug("%s:dst=%d, channel=%d, bufid=%d, len=%d, timeout=%d\n", + __func__, dst, channel, bufid, len, timeout); + + rval = 0; + left = len; + + if (timeout) { + mutex_lock_interruptible(&ring->rxlock); + } else { + if (!mutex_trylock(&ring->rxlock)) { + pr_debug("%s: busy!,dst=%d, channel=%d, bufid=%d\n", + __func__, dst, channel, bufid); + return -EBUSY; + } + } + +#if defined(SIPC_DEBUG_SBUF_RDWT_OWNER) + sbuf_record_rdwt_owner(ring, 1); +#endif + + /* must request resource before read or write share memory */ + rval = sipc_smem_request_resource(ring->rx_pms, sbuf->dst, -1); + if (rval < 0) { + mutex_unlock(&ring->rxlock); + return rval; + } + + pr_debug("%s: channel=%d, wrptr=%d, rdptr=%d\n", + __func__, + channel, + BL_READL(hd_op->rx_wt_p), + BL_READL(hd_op->rx_rd_p)); + no_data = (BL_READL(hd_op->rx_wt_p) == BL_READL(hd_op->rx_rd_p)); + /* update read mask */ + spin_lock_irqsave(&ring->poll_lock, flags); + if (no_data) + ring->poll_mask &= ~(POLLIN | POLLRDNORM); + else + ring->poll_mask |= POLLIN | POLLRDNORM; + spin_unlock_irqrestore(&ring->poll_lock, flags); + + /* release resource */ + sipc_smem_release_resource(ring->rx_pms, sbuf->dst); + + if (no_data) { + if (timeout == 0) { + /* no wait */ + pr_debug("%s: %d-%d ring %d rxbuf is empty!\n", + __func__, dst, channel, bufid); + rval = -ENODATA; + } else if (timeout < 0) { + /* wait forever */ + rval = wait_event_interruptible( + ring->rxwait, + sbuf_has_data(ring, dst, false) || + sbuf->state == SBUF_STATE_IDLE); + if (rval < 0) + pr_debug("%s: wait interrupted!\n", __func__); + + if (sbuf->state == SBUF_STATE_IDLE) { + pr_err("%s: sbuf state is idle!\n", __func__); + rval = -EIO; + } + } else { + /* wait timeout */ + rval = wait_event_interruptible_timeout( + ring->rxwait, + sbuf_has_data(ring, dst, false) || + sbuf->state == SBUF_STATE_IDLE, timeout); + if (rval < 0) { + pr_debug("%s: wait interrupted!\n", __func__); + } else if (rval == 0) { + pr_info("%s: wait timeout!\n", __func__); + rval = -ETIME; + } + + if (sbuf->state == SBUF_STATE_IDLE) { + pr_err("%s: state is idle!\n", __func__); + rval = -EIO; + } + } + } + + if (rval < 0) { + mutex_unlock(&ring->rxlock); + return rval; + } + + /* must request resource before read or write share memory */ + rval = sipc_smem_request_resource(ring->rx_pms, sbuf->dst, -1); + if (rval < 0) { + mutex_unlock(&ring->rxlock); + return rval; + } + + while (left && + (BL_READL(hd_op->rx_wt_p) != BL_READL(hd_op->rx_rd_p)) && + sbuf->state == SBUF_STATE_READY) { + /* calc rxpos & rxsize */ + rxpos = ring->rxbuf_virt + + BL_READL(hd_op->rx_rd_p) % hd_op->rx_size; + rxsize = (int)(BL_READL(hd_op->rx_wt_p) - BL_READL(hd_op->rx_rd_p)); + /* check overrun */ + if (rxsize > hd_op->rx_size) + pr_err("%s: bufid = %d, channel= %d rxsize=0x%x, rdptr=%d, wrptr=%d", + __func__, + bufid, + channel, + rxsize, + BL_READL(hd_op->rx_wt_p), + BL_READL(hd_op->rx_rd_p)); + + rxsize = min(rxsize, left); + + pr_debug("%s: channel=%d, buf=%p, rxpos=%p, rxsize=%d\n", + __func__, channel, u_buf.buf, rxpos, rxsize); + + tail = rxpos + rxsize - (ring->rxbuf_virt + hd_op->rx_size); + + if (tail > 0) { + /* ring buffer is rounded */ + if ((uintptr_t)u_buf.buf > TASK_SIZE) { + unalign_memcpy(u_buf.buf, rxpos, rxsize - tail); + unalign_memcpy(u_buf.buf + rxsize - tail, + ring->rxbuf_virt, tail); + } else { + /* handle the user space address */ + if (unalign_copy_to_user(u_buf.ubuf, + rxpos, + rxsize - tail) || + unalign_copy_to_user(u_buf.ubuf + + rxsize - tail, + ring->rxbuf_virt, + tail)) { + pr_err("%s: failed to copy to user!\n", + __func__); + rval = -EFAULT; + break; + } + } + } else { + if ((uintptr_t)u_buf.buf > TASK_SIZE) { + unalign_memcpy(u_buf.buf, rxpos, rxsize); + } else { + /* handle the user space address */ + if (unalign_copy_to_user(u_buf.ubuf, + rxpos, rxsize)) { + pr_err("%s: failed to copy to user!\n", + __func__); + rval = -EFAULT; + break; + } + } + } + + /* update rx rdptr */ + v = BL_READL(hd_op->rx_rd_p) + rxsize; + BL_WRITEL(v, hd_op->rx_rd_p); + /* rx ringbuf is full ,so need to notify peer side */ + if (BL_READL(hd_op->rx_wt_p) - BL_READL(hd_op->rx_rd_p) == + hd_op->rx_size - rxsize) { + smsg_set(&mevt, channel, + SMSG_TYPE_EVENT, + SMSG_EVENT_SBUF_RDPTR, + bufid); + smsg_send(dst, &mevt, -1); + } + + left -= rxsize; + u_buf.buf += rxsize; + } + + /* update read mask */ + spin_lock_irqsave(&ring->poll_lock, flags); + if (BL_READL(hd_op->rx_wt_p) == BL_READL(hd_op->rx_rd_p)) + ring->poll_mask &= ~(POLLIN | POLLRDNORM); + else + ring->poll_mask |= POLLIN | POLLRDNORM; + spin_unlock_irqrestore(&ring->poll_lock, flags); + + /* release resource */ + sipc_smem_release_resource(ring->rx_pms, sbuf->dst); + if (ring->need_wake_lock) + sprd_pms_release_wakelock_later(ring->rx_pms, 20); + + mutex_unlock(&ring->rxlock); + + pr_debug("%s: done, channel=%d, len=%d", __func__, channel, len - left); + + if (len == left) + return rval; + else + return (len - left); +} +EXPORT_SYMBOL_GPL(sbuf_read); + +int sbuf_poll_wait(u8 dst, u8 channel, u32 bufid, + struct file *filp, poll_table *wait) +{ + struct sbuf_mgr *sbuf; + struct sbuf_ring *ring = NULL; + struct sbuf_ring_header_op *hd_op; + unsigned int mask = 0; + u8 ch_index; + + ch_index = sipc_channel2index(channel); + if (ch_index == INVALID_CHANEL_INDEX) { + pr_err("%s:channel %d invalid!\n", __func__, channel); + return mask; + } + sbuf = sbufs[dst][ch_index]; + if (!sbuf) + return mask; + ring = &sbuf->rings[bufid]; + hd_op = &ring->header_op; + if (sbuf->state != SBUF_STATE_READY) { + pr_err("sbuf-%d-%d not ready to poll !\n", dst, channel); + return mask; + } + + poll_wait(filp, &ring->txwait, wait); + poll_wait(filp, &ring->rxwait, wait); + + + if (sbuf_has_data(ring, dst, true)) + mask |= POLLOUT | POLLWRNORM; + + if (sbuf_has_data(ring, dst, false)) + mask |= POLLIN | POLLRDNORM; + + return mask; +} +EXPORT_SYMBOL_GPL(sbuf_poll_wait); + +int sbuf_status(u8 dst, u8 channel) +{ + struct sbuf_mgr *sbuf; + u8 ch_index; + + ch_index = sipc_channel2index(channel); + if (ch_index == INVALID_CHANEL_INDEX) { + pr_err("%s:channel %d invalid!\n", __func__, channel); + return -EINVAL; + } + sbuf = sbufs[dst][ch_index]; + + if (!sbuf) + return -ENODEV; + if (sbuf->state != SBUF_STATE_READY) + return -ENODEV; + + return 0; +} +EXPORT_SYMBOL_GPL(sbuf_status); + +int sbuf_register_notifier(u8 dst, u8 channel, u32 bufid, + void (*handler)(int event, void *data), void *data) +{ + struct sbuf_mgr *sbuf; + struct sbuf_ring *ring = NULL; + u8 ch_index; + + ch_index = sipc_channel2index(channel); + if (ch_index == INVALID_CHANEL_INDEX) { + pr_err("%s:channel %d invalid!\n", __func__, channel); + return -EINVAL; + } + sbuf = sbufs[dst][ch_index]; + if (!sbuf) + return -ENODEV; + ring = &sbuf->rings[bufid]; + ring->handler = handler; + ring->data = data; + + if (sbuf->state == SBUF_STATE_READY) + handler(SBUF_NOTIFY_READ, data); + + return 0; +} +EXPORT_SYMBOL_GPL(sbuf_register_notifier); + +void sbuf_get_status(u8 dst, char *status_info, int size) +{ + struct sbuf_mgr *sbuf = NULL; + struct sbuf_ring *ring = NULL; + struct sbuf_ring_header_op *hd_op; + +#if (LINUX_VERSION_CODE <= KERNEL_VERSION( 4,10,0 )) + wait_queue_t *pos; +#else + wait_queue_entry_t *pos; +#endif + struct task_struct *task; + unsigned long flags; + int i, n, len, cnt; + u32 b_select; + char *phead; +#if defined(SIPC_DEBUG_SBUF_RDWT_OWNER) + struct name_node *node = NULL; +#endif + + len = strlen(status_info); + if (!status_info || size < 0 || dst >= SIPC_ID_NR) + return; + + for (i = 0; i < SMSG_VALID_CH_NR; i++) { + sbuf = sbufs[dst][i]; + if (!sbuf) + continue; + + ring = &sbuf->rings[0]; + /* must request resource before read or write share memory */ + if (sipc_smem_request_resource(ring->rx_pms, dst, 1000) < 0) + continue; + + for (n = 0; n < sbuf->ringnr && len < size; n++) { + ring = &sbuf->rings[n]; + hd_op = &ring->header_op; + + if ((BL_READL(hd_op->rx_wt_p) - BL_READL(hd_op->rx_rd_p)) + < hd_op->rx_size) + continue; + + snprintf(status_info + len, + size - len, + "ch-%d-ring-%d is full.\n", + sbuf->channel, + n); + len = strlen(status_info); + + /* show all rxwait task */ + spin_lock_irqsave(&ring->rxwait.lock, flags); + cnt = 0; + + list_for_each_entry(pos, +#if (LINUX_VERSION_CODE <= KERNEL_VERSION( 4,10,0 )) + &ring->rxwait.task_list, + task_list +#else + &ring->rxwait.head, + entry +#endif + ) { + task = sbuf_wait_get_task(pos, &b_select); + if (!task) + continue; + + if (b_select) + phead = "rxwait task"; + else + phead = "select task"; + + snprintf( + status_info + len, + size - len, + "%s %d: %s, state=0x%lx, pid=%d.\n", + phead, + cnt, task->comm, + task->state, task->pid); + cnt++; + len = strlen(status_info); + } + spin_unlock_irqrestore(&ring->rxwait.lock, flags); + + /* only show the latest ever read task */ +#if defined(SIPC_DEBUG_SBUF_RDWT_OWNER) + spin_lock_irqsave(&ring->rxwait.lock, flags); + list_for_each_entry(node, &ring->rx_list, list) { + if (node->latest) { + snprintf( + status_info + len, + size - len, + "read task: %s, pid = %d.\n", + node->comm, + node->pid); + break; + } + } + spin_unlock_irqrestore(&ring->rxwait.lock, flags); +#endif + } + ring = &sbuf->rings[0]; + /* release resource */ + sipc_smem_release_resource(ring->rx_pms, sbuf->dst); + } +} +EXPORT_SYMBOL_GPL(sbuf_get_status); + +#if defined(CONFIG_DEBUG_FS) +static void sbuf_debug_task_show(struct seq_file *m, + struct sbuf_mgr *sbuf, int task_type) +{ + int n, cnt; + u32 b_select; + unsigned long flags; + struct sbuf_ring *ring = NULL; + wait_queue_head_t *phead; + char *buf; +#if (LINUX_VERSION_CODE <= KERNEL_VERSION( 4,10,0 )) + wait_queue_t *pos; +#else + wait_queue_entry_t *pos; +#endif + struct task_struct *task; + + for (n = 0; n < sbuf->ringnr; n++) { + ring = &sbuf->rings[n]; + cnt = 0; + + if (task_type == TASK_RXWAIT) { + phead = &ring->rxwait; + buf = "rxwait task"; + } else if (task_type == TASK_TXWAIT) { + phead = &ring->txwait; + buf = "txwait task"; + } else { + phead = &ring->rxwait; + buf = "select task"; + } + + spin_lock_irqsave(&phead->lock, flags); + + list_for_each_entry(pos, +#if (LINUX_VERSION_CODE <= KERNEL_VERSION( 4,10,0 )) + &phead->task_list, task_list +#else + &phead->head, entry +#endif + ){ + task = sbuf_wait_get_task(pos, &b_select); + if (!task) + continue; + + if (b_select && (task_type != TASK_SELECT)) + continue; + + seq_printf(m, " ring[%2d]: %s %d ", + n, + buf, + cnt); + seq_printf(m, ": %s, state = 0x%lx, pid = %d\n", + task->comm, + task->state, + task->pid); + cnt++; + } + spin_unlock_irqrestore( + &phead->lock, + flags); + } +} + +#if defined(SIPC_DEBUG_SBUF_RDWT_OWNER) +static void sbuf_debug_list_show(struct seq_file *m, + struct sbuf_mgr *sbuf, int b_rx) +{ + int n, cnt; + struct sbuf_ring *ring = NULL; + struct list_head *plist; + char *buf; + struct name_node *node = NULL; + unsigned long flags; + + /* list all sbuf task list */ + for (n = 0; n < sbuf->ringnr; n++) { + ring = &sbuf->rings[n]; + cnt = 0; + + if (b_rx) { + plist = &ring->rx_list; + buf = "read task"; + } else { + plist = &ring->tx_list; + buf = "write task"; + } + + spin_lock_irqsave(&ring->rxwait.lock, flags); + list_for_each_entry(node, plist, list) { + seq_printf(m, " ring[%2d]: %s %d : %s, pid = %d, latest = %d\n", + n, + buf, + cnt, + node->comm, + node->pid, + node->latest); + cnt++; + } + spin_unlock_irqrestore(&ring->rxwait.lock, flags); + } +} +#endif + +static int sbuf_debug_show(struct seq_file *m, void *private) +{ + struct sbuf_mgr *sbuf = NULL; + struct sbuf_ring *ring = NULL; + struct sbuf_ring_header_op *hd_op; + int i, j, n, cnt; + struct smsg_ipc *sipc = NULL; + + for (i = 0; i < SIPC_ID_NR; i++) { + sipc = smsg_ipcs[i]; + if (!sipc) + continue; + + /* must request resource before read or write share memory */ + if (sipc_smem_request_resource(sipc->sipc_pms, + sipc->dst, 1000) < 0) + continue; + + sipc_debug_putline(m, '*', 120); + seq_printf(m, "dst: 0x%0x, sipc: %s:\n", i, sipc->name); + sipc_debug_putline(m, '*', 120); + + for (j = 0; j < SMSG_VALID_CH_NR; j++) { + sbuf = sbufs[i][j]; + if (!sbuf) + continue; + /* list a sbuf channel */ + sipc_debug_putline(m, '-', 100); + seq_printf(m, "sbuf_%d_%03d, state: %d, force: %d", + sbuf->dst, + sbuf->channel, + sbuf->state, + sbuf->force_send); + seq_printf(m, "virt: 0x%lx, phy: 0x%0x, map: 0x%x", + (unsigned long)sbuf->smem_virt, + sbuf->smem_addr, + sbuf->dst_smem_addr); + seq_printf(m, " size: 0x%0x, ringnr: %d\n", + sbuf->smem_size, + sbuf->ringnr); + sipc_debug_putline(m, '-', 100); + + /* list all sbuf ring info list in a chanel */ + sipc_debug_putline(m, '-', 80); + seq_puts(m, " 1. all sbuf ring info list:\n"); + for (n = 0; n < sbuf->ringnr; n++) { + ring = &sbuf->rings[n]; + hd_op = &ring->header_op; + if (BL_READL(hd_op->tx_wt_p) == 0 && + BL_READL(hd_op->rx_wt_p) == 0) + continue; + + seq_printf(m, " rx ring[%2d]: addr: 0x%0x, mask: 0x%x", + n, ring->header->rxbuf_addr, + ring->poll_mask); + seq_printf(m, "rp: 0x%0x, wp: 0x%0x, size: 0x%0x\n", + BL_READL(hd_op->rx_rd_p), + BL_READL(hd_op->rx_wt_p), + hd_op->rx_size); + + seq_printf(m, " tx ring[%2d]: addr: 0x%0x, ", + n, ring->header->txbuf_addr); + seq_printf(m, "rp: 0x%0x, wp: 0x%0x, size: 0x%0x\n", + BL_READL(hd_op->tx_rd_p), + BL_READL(hd_op->tx_wt_p), + hd_op->tx_size); + } + + /* list all sbuf rxwait/txwait in a chanel */; + sipc_debug_putline(m, '-', 80); + seq_puts(m, " 2. all waittask list:\n"); + sbuf_debug_task_show(m, sbuf, TASK_RXWAIT); + sbuf_debug_task_show(m, sbuf, TASK_TXWAIT); + sbuf_debug_task_show(m, sbuf, TASK_SELECT); + +#ifdef SIPC_DEBUG_SBUF_RDWT_OWNER + /* list all sbuf ever read task list in a chanel */; + sipc_debug_putline(m, '-', 80); + seq_puts(m, " 3. all ever rdwt list:\n"); + sbuf_debug_list_show(m, sbuf, 1); + sbuf_debug_list_show(m, sbuf, 0); +#endif + + /* list all rx full ring list in a chanel */ + cnt = 0; + for (n = 0; n < sbuf->ringnr; n++) { + ring = &sbuf->rings[n]; + hd_op = &ring->header_op; + if ((BL_READL(hd_op->rx_wt_p) - BL_READL(hd_op->rx_rd_p)) + == hd_op->rx_size) { + if (cnt == 0) { + sipc_debug_putline(m, '-', 80); + seq_puts(m, " x. all rx full ring list:\n"); + } + cnt++; + seq_printf(m, " ring[%2d]\n", n); + } + } + } + /* release resource */ + sipc_smem_release_resource(sipc->sipc_pms, sipc->dst); + } + + return 0; +} + +static int sbuf_debug_open(struct inode *inode, struct file *file) +{ + return single_open(file, sbuf_debug_show, inode->i_private); +} + +static const struct file_operations sbuf_debug_fops = { + .open = sbuf_debug_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +int sbuf_init_debugfs(void *root) +{ + if (!root) + return -ENXIO; + debugfs_create_file("sbuf", 0444, + (struct dentry *)root, + NULL, &sbuf_debug_fops); + return 0; +} +EXPORT_SYMBOL_GPL(sbuf_init_debugfs); + +#endif /* CONFIG_DEBUG_FS */ + +MODULE_AUTHOR("Chen Gaopeng"); +MODULE_DESCRIPTION("SIPC/SBUF driver"); +MODULE_LICENSE("GPL v2"); diff --git a/package/wwan/driver/quectel_SRPD_PCIE/src/sipc/sbuf.h b/package/wwan/driver/quectel_SRPD_PCIE/src/sipc/sbuf.h new file mode 100644 index 000000000..80104f0a6 --- /dev/null +++ b/package/wwan/driver/quectel_SRPD_PCIE/src/sipc/sbuf.h @@ -0,0 +1,126 @@ +/* + * Copyright (C) 2019 Spreadtrum Communications Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __SBUF_H +#define __SBUF_H + +/* flag for CMD/DONE msg type */ +#define SMSG_CMD_SBUF_INIT 0x0001 +#define SMSG_DONE_SBUF_INIT 0x0002 + +/* flag for EVENT msg type */ +#define SMSG_EVENT_SBUF_WRPTR 0x0001 +#define SMSG_EVENT_SBUF_RDPTR 0x0002 + +#if defined(CONFIG_DEBUG_FS) +#define SIPC_DEBUG_SBUF_RDWT_OWNER +#define MAX_RECORD_CNT 0x10 +#endif + +/* ring buf header */ +struct sbuf_ring_header { + /* send-buffer info */ + u32 txbuf_addr; + u32 txbuf_size; + u32 txbuf_rdptr; + u32 txbuf_wrptr; + + /* recv-buffer info */ + u32 rxbuf_addr; + u32 rxbuf_size; + u32 rxbuf_rdptr; + u32 rxbuf_wrptr; +}; + +struct sbuf_ring_header_op { + /* + * this points point to share memory + * for update rdptr and wtptr on share memory + */ + volatile u32 *rx_rd_p; + volatile u32 *rx_wt_p; + volatile u32 *tx_rd_p; + volatile u32 *tx_wt_p; + + /* + * this member copy from share memory, + * because this contents will not change on share memory + */ + u32 rx_size;/* rxbuf_size */ + u32 tx_size;/* txbuf_size */ +}; + +/* sbuf_mem is the structure of smem for rings */ +struct sbuf_smem_header { + u32 ringnr; + + struct sbuf_ring_header headers[0]; +}; + +struct sbuf_ring { + /* tx/rx buffer info */ + volatile struct sbuf_ring_header *header; + struct sbuf_ring_header_op header_op; + + void *txbuf_virt; + void *rxbuf_virt; + + /* send/recv wait queue */ + wait_queue_head_t txwait; + wait_queue_head_t rxwait; + +#if defined(SIPC_DEBUG_SBUF_RDWT_OWNER) + /* record all task histoy */ + struct list_head tx_list; + struct list_head rx_list; +#endif + + /* send/recv mutex */ + struct mutex txlock; + struct mutex rxlock; + + struct sprd_pms *tx_pms; + struct sprd_pms *rx_pms; + char tx_pms_name[20]; + char rx_pms_name[20]; + + bool need_wake_lock; + unsigned int poll_mask; + /* protect poll_mask member */ + spinlock_t poll_lock; + + void (*handler)(int event, void *data); + void *data; +}; + +#define SBUF_STATE_IDLE 0 +#define SBUF_STATE_READY 1 + +struct sbuf_mgr { + u8 dst; + u8 channel; + bool force_send; + u32 state; + + void *smem_virt; + u32 smem_addr; + u32 smem_size; + + u32 smem_addr_debug; + u32 dst_smem_addr; + u32 ringnr; + + struct sbuf_ring *rings; + struct task_struct *thread; +}; +#endif diff --git a/package/wwan/driver/quectel_SRPD_PCIE/src/sipc/sipc.c b/package/wwan/driver/quectel_SRPD_PCIE/src/sipc/sipc.c new file mode 100644 index 000000000..ee256fab8 --- /dev/null +++ b/package/wwan/driver/quectel_SRPD_PCIE/src/sipc/sipc.c @@ -0,0 +1,524 @@ +/* + * Copyright (C) 2019 Spreadtrum Communications Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../include/sipc.h" +#include "sipc_priv.h" + +#define MBOX_BAMK "mbox" +#define PCIE_BAMK "pcie" + +enum { + NORMAL_MODE = 0, + CHARGE_MODE, + CALI_MODE +}; + +#define CALI_LATENCY (10000 * 1000) +#define NORMAL_LATENCY (1 * 1000) + +/* + * In charge mode, will only boot pm system, + * so just create pm systen sipc. + */ +static u8 g_boot_mode = NORMAL_MODE; +/* +static int __init sipc_early_mode(char *str) +{ + if (!memcmp(str, "charger", 7)) + g_boot_mode = CHARGE_MODE; + else if (!memcmp(str, "cali", 4)) + g_boot_mode = CALI_MODE; + else + g_boot_mode = NORMAL_MODE; + + return 0; +} + +early_param("androidboot.mode", sipc_early_mode); +*/ + +#if defined(CONFIG_DEBUG_FS) +void sipc_debug_putline(struct seq_file *m, char c, int n) +{ + char buf[300]; + int i, max, len; + + /* buf will end with '\n' and 0 */ + max = ARRAY_SIZE(buf) - 2; + len = (n > max) ? max : n; + + for (i = 0; i < len; i++) + buf[i] = c; + + buf[i] = '\n'; + buf[i + 1] = 0; + + seq_puts(m, buf); +} +EXPORT_SYMBOL_GPL(sipc_debug_putline); +#endif + +static u32 sipc_rxirq_status(u8 dst) +{ + return 0; +} + +static void sipc_rxirq_clear(u8 dst) +{ + +} + +static void sipc_txirq_trigger(u8 dst, u64 msg) +{ + struct smsg_ipc *ipc; + + ipc = smsg_ipcs[dst]; + + if (ipc) { +#ifdef CONFIG_SPRD_MAILBOX + if (ipc->type == SIPC_BASE_MBOX) { + mbox_raw_sent(ipc->core_id, msg); + return; + } +#endif + + if (ipc->type == SIPC_BASE_PCIE) { +#ifdef CONFIG_SPRD_PCIE_EP_DEVICE + sprd_ep_dev_raise_irq(ipc->ep_dev, PCIE_DBELL_SIPC_IRQ); +#endif + +#ifdef CONFIG_PCIE_EPF_SPRD + sprd_pci_epf_raise_irq(ipc->ep_fun, PCIE_MSI_SIPC_IRQ); +#endif + return; + } + } +} + +#ifdef SPRD_PCIE_USE_DTS +static int sipc_parse_dt(struct smsg_ipc *ipc, + struct device_node *np, struct device *dev) +{ + u32 val[3]; + int ret; + const char *type; + + /* get name */ + ret = of_property_read_string(np, "sprd,name", &ipc->name); + if (ret) + return ret; + + pr_info("sipc: name=%s\n", ipc->name); + + /* get sipc type, optional */ + if (of_property_read_string(np, "sprd,type", &type) == 0) { + pr_info("sipc: type=%s\n", type); + if (strcmp(MBOX_BAMK, type) == 0) + ipc->type = SIPC_BASE_MBOX; + else if (strcmp(PCIE_BAMK, type) == 0) + ipc->type = SIPC_BASE_PCIE; + } + + /* get sipc client, optional */ + if (of_property_read_u32_array(np, "sprd,client", val, 1) == 0) { + ipc->client = (u8)val[0]; + pr_info("sipc: client=%d\n", ipc->client); + } + + /* get sipc dst */ + ret = of_property_read_u32_array(np, "sprd,dst", val, 1); + if (!ret) { + ipc->dst = (u8)val[0]; + pr_info("sipc: dst =%d\n", ipc->dst); + } + + if (ret || ipc->dst >= SIPC_ID_NR) { + pr_err("sipc: dst err, ret =%d.\n", ret); + return ret; + } + +#ifdef CONFIG_SPRD_MAILBOX + if (ipc->type == SIPC_BASE_MBOX) { + /* get core id */ + ipc->core_id = (u8)MBOX_INVALID_CORE; + ret = of_property_read_u32_array(np, "sprd,core", val, 1); + if (!ret) { + ipc->core_id = (u8)val[0]; + pr_info("sipc: core=%d\n", ipc->core_id); + } else { + pr_err("sipc: core err, ret =%d.\n", ret); + return ret; + } + + /* get core sensor id, optional*/ + ipc->core_sensor_id = (u8)MBOX_INVALID_CORE; + if (of_property_read_u32_array(np, "sprd,core_sensor", + val, 1) == 0) { + ipc->core_sensor_id = (u8)val[0]; + pr_info("sipc: core_sensor=%d\n", ipc->core_sensor_id); + } + } +#endif + +#ifdef CONFIG_SPRD_PCIE_EP_DEVICE + if (ipc->type == SIPC_BASE_PCIE) { +#ifdef CONFIG_SPRD_PCIE + struct device_node *pdev_node; +#endif + + ret = of_property_read_u32_array(np, + "sprd,ep-dev", + &ipc->ep_dev, + 1); + pr_info("sipc: ep_dev=%d\n", ipc->ep_dev); + if (ret || ipc->ep_dev >= PCIE_EP_NR) { + pr_err("sipc: ep_dev err, ret =%d.\n", ret); + return ret; + } + +#ifdef CONFIG_SPRD_PCIE + /* get pcie rc ctrl device */ + pdev_node = of_parse_phandle(np, "sprd,rc-ctrl", 0); + if (!pdev_node) { + pr_err("sipc: sprd,rc-ctrl err.\n"); + return -ENODEV; + } + ipc->pcie_dev = of_find_device_by_node(pdev_node); + of_node_put(pdev_node); + + if (!ipc->pcie_dev) { + pr_err("sipc: find pcie_dev err.\n"); + return -ENODEV; + } +#endif + } +#endif + +#ifdef CONFIG_PCIE_EPF_SPRD + if (ipc->type == SIPC_BASE_PCIE) { + ret = of_property_read_u32_array(np, + "sprd,ep-fun", + &ipc->ep_fun, + 1); + pr_info("sipc: ep_fun=%d\n", ipc->ep_fun); + if (ret || ipc->ep_fun >= SPRD_FUNCTION_MAX) { + pr_err("sipc: ep_fun err, ret =%d.\n", ret); + return ret; + } + + /* parse doolbell irq */ + ret = of_irq_get(np, 0); + if (ret < 0) { + pr_err("sipc: doorbell irq err, ret=%d\n", ret); + return -EINVAL; + } + ipc->irq = ret; + pr_info("sipc: irq=%d\n", ipc->irq); + } +#endif + + /* get smem type */ + ret = of_property_read_u32_array(np, + "sprd,smem-type", + &val[0], + 1); + if (!ret) + ipc->smem_type = (enum smem_type)val[0]; + else + ipc->smem_type = SMEM_LOCAL; + + pr_info("sipc: smem_type = %d, ret =%d\n", ipc->smem_type, ret); + + /* get smem info */ + ret = of_property_read_u32_array(np, + "sprd,smem-info", + val, + 3); + if (ret) { + pr_err("sipc: parse smem info failed.\n"); + return ret; + } + ipc->smem_base = val[0]; + ipc->dst_smem_base = val[1]; + ipc->smem_size = val[2]; + pr_info("sipc: smem_base=0x%x, dst_smem_base=0x%x, smem_size=0x%x\n", + ipc->smem_base, ipc->dst_smem_base, ipc->smem_size); + +#ifdef CONFIG_PHYS_ADDR_T_64BIT + /* try to get high_offset */ + ret = of_property_read_u32(np, + "sprd,high-offset", + val); + if (!ret) { + ipc->high_offset = val[0]; + pr_info("sipc: high_offset=0x%xn", ipc->high_offset); + } +#endif + + if (ipc->type == SIPC_BASE_PCIE) { + /* pcie sipc, the host must use loacal SMEM_LOCAL */ + if (!ipc->client && ipc->smem_type != SMEM_LOCAL) { + pr_err("sipc: host must use local smem!"); + return -EINVAL; + } + + if (ipc->client && ipc->smem_type != SMEM_PCIE) { + pr_err("sipc: client must use pcie smem!"); + return -EINVAL; + } + } + + return 0; +} +#else +static u32 sipc_get_smem_base(size_t size) +{ + unsigned long order = get_order(size); + struct page *page, *p, *e; + + page = alloc_pages(GFP_KERNEL, order); + if(page == NULL) { + printk("sipc alloc pages fail\n"); + return 0; + } + split_page(page, order); + for (p = page +(size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++) + __free_page(p); + + if (PageHighMem(page)) { + phys_addr_t base = __pfn_to_phys(page_to_pfn(page)); + //phys_addr_t end = base + size; + while (size > 0) { + void *ptr = kmap_atomic(page); + memset(ptr, 0, PAGE_SIZE); + kunmap_atomic(ptr); + page++; + size -= PAGE_SIZE; + } + + return base; + } else { + void *ptr = page_address(page); + memset(ptr, 0, size); + return __pa(ptr); + } +} + +static int sipc_parse_dt(struct smsg_ipc *ipc, + struct device_node *np, struct device *dev) +{ + u32 val[3]; + int ret = 0; + //dma_addr_t *dma_handle; + + /* get name */ + ipc->name = "sprd,sipc"; + pr_info("sipc: name=%s\n", ipc->name); + + /* get sipc type, optional */ + ipc->type = SIPC_BASE_PCIE; + pr_info("sipc: type=%d\n", ipc->type); + + /* get sipc client, optional */ + + /* get sipc dst */ + ipc->dst = 1; + pr_info("sipc: dst =%d\n", ipc->dst); + + if (ipc->dst >= SIPC_ID_NR) { + pr_err("sipc: dst err\n"); + return ret; + } + +#ifdef CONFIG_SPRD_PCIE_EP_DEVICE + if (ipc->type == SIPC_BASE_PCIE) { + ipc->ep_dev = 0; + pr_info("sipc: ep_dev=%d\n", ipc->ep_dev); + if (ipc->ep_dev >= PCIE_EP_NR) { + pr_err("sipc: ep_dev err\n"); + return -1; + } + + } +#endif + /* get smem type */ + ipc->smem_type = SMEM_LOCAL; + + pr_info("sipc: smem_type = %d\n", ipc->smem_type); + + /* get smem info */ + val[0] = sipc_get_smem_base(0x0300000); + val[1] = val[0]; + val[2] = 0x0300000; + ipc->smem_base = val[0]; + ipc->dst_smem_base = val[1]; + ipc->smem_size = val[2]; + pr_info("sipc: smem_base=0x%x, dst_smem_base=0x%x, smem_size=0x%x\n", + ipc->smem_base, ipc->dst_smem_base, ipc->smem_size); + +#ifdef CONFIG_PHYS_ADDR_T_64BIT + /* try to get high_offset */ + ipc->high_offset = 0; + pr_info("sipc: high_offset=0x%xn", ipc->high_offset); + +#endif + + if (ipc->type == SIPC_BASE_PCIE) { + /* pcie sipc, the host must use loacal SMEM_LOCAL */ + if (!ipc->client && ipc->smem_type != SMEM_LOCAL) { + pr_err("sipc: host must use local smem!"); + return -EINVAL; + } + + if (ipc->client && ipc->smem_type != SMEM_PCIE) { + pr_err("sipc: client must use pcie smem!"); + return -EINVAL; + } + } + + return 0; +} +#endif + +static int sipc_probe(struct platform_device *pdev) +{ + struct smsg_ipc *ipc; + struct device_node *np; + + if (1) { + np = pdev->dev.of_node; + ipc = devm_kzalloc(&pdev->dev, + sizeof(struct smsg_ipc), + GFP_KERNEL); + if (!ipc) + return -ENOMEM; + + if (sipc_parse_dt(ipc, np, &pdev->dev)) { + pr_err("%s: failed to parse dt!\n", __func__); + return -ENODEV; + } + + /* + * In charge mode, will only boot pm system, + * so just create pm systen sipc. + */ + if (g_boot_mode == CHARGE_MODE && ipc->dst != SIPC_ID_PM_SYS) + return -ENODEV; + + ipc->rxirq_status = sipc_rxirq_status; + ipc->rxirq_clear = sipc_rxirq_clear; + ipc->txirq_trigger = sipc_txirq_trigger; + spin_lock_init(&ipc->txpinlock); + + if (ipc->type == SIPC_BASE_PCIE) { + /* init mpm delay enter idle time for pcie. */ + if (g_boot_mode == CALI_MODE) + ipc->latency = CALI_LATENCY; + else + ipc->latency = NORMAL_LATENCY; + } + + smsg_ipc_create(ipc); + platform_set_drvdata(pdev, ipc); + } + return 0; +} + +static int sipc_remove(struct platform_device *pdev) +{ + struct smsg_ipc *ipc = platform_get_drvdata(pdev); + + smsg_ipc_destroy(ipc); + + devm_kfree(&pdev->dev, ipc); + return 0; +} + +#ifdef SPRD_PCIE_USE_DTS +static const struct of_device_id sipc_match_table[] = { + { .compatible = "sprd,sipc", }, + { }, +}; +#endif + +static struct platform_driver sipc_driver = { + .driver = { + .owner = THIS_MODULE, + .name = "sipc", +#ifdef SPRD_PCIE_USE_DTS + .of_match_table = sipc_match_table, +#endif + }, + .probe = sipc_probe, + .remove = sipc_remove, +}; + +#ifndef SPRD_PCIE_USE_DTS +static void sipc_platform_device_release(struct device *dev) {} +static struct platform_device sipc_device = { + .name = "sipc", + .id = -1, + .dev = { + .release = sipc_platform_device_release, + } +}; +#endif + +int sipc_init(void) +{ + int ret; + + smsg_init_channel2index(); +#ifndef SPRD_PCIE_USE_DTS + if((ret = platform_device_register(&sipc_device))) + return ret; +#endif + + if((ret = platform_driver_register(&sipc_driver))) { +#ifndef SPRD_PCIE_USE_DTS + platform_device_unregister(&sipc_device); +#endif + return ret; + } + + return ret; +} +EXPORT_SYMBOL_GPL(sipc_init); + +void sipc_exit(void) +{ + platform_driver_unregister(&sipc_driver); + printk("dayin is here0\n"); +#ifndef SPRD_PCIE_USE_DTS + platform_device_unregister(&sipc_device); +#endif +} +EXPORT_SYMBOL_GPL(sipc_exit); \ No newline at end of file diff --git a/package/wwan/driver/quectel_SRPD_PCIE/src/sipc/sipc_debugfs.c b/package/wwan/driver/quectel_SRPD_PCIE/src/sipc/sipc_debugfs.c new file mode 100644 index 000000000..135f13984 --- /dev/null +++ b/package/wwan/driver/quectel_SRPD_PCIE/src/sipc/sipc_debugfs.c @@ -0,0 +1,51 @@ +/* + * Copyright (C) 2019 Spreadtrum Communications Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include + +#include "../include/sipc.h" +#if defined(CONFIG_DEBUG_FS) +#include "sipc_debugfs.h" + +int sipc_init_debugfs(void) +{ + struct dentry *root = debugfs_create_dir("sipc", NULL); + + if (!root) + return -ENXIO; + + smsg_init_debugfs(root); +#if defined(CONFIG_SPRD_SIPC_SMSGC) + smsgc_init_debugfs(root); +#endif + sbuf_init_debugfs(root); + sblock_init_debugfs(root); +#ifdef CONFIG_SPRD_SIPC_ZERO_COPY_SIPX + sipx_init_debugfs(root); +#endif +#ifdef CONFIG_SPRD_SIPC_SWCNBLK + swcnblk_init_debugfs(root); +#endif + smem_init_debugfs(root); +#ifdef CONFIG_SPRD_MAILBOX + mbox_init_debugfs(root); +#endif + return 0; +} +EXPORT_SYMBOL_GPL(sipc_init_debugfs); +#endif /* CONFIG_DEBUG_FS */ diff --git a/package/wwan/driver/quectel_SRPD_PCIE/src/sipc/sipc_debugfs.h b/package/wwan/driver/quectel_SRPD_PCIE/src/sipc/sipc_debugfs.h new file mode 100644 index 000000000..e7849f515 --- /dev/null +++ b/package/wwan/driver/quectel_SRPD_PCIE/src/sipc/sipc_debugfs.h @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2019 Spreadtrum Communications Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef __SIPC_DEBUGFS_H +#define __SIPC_DEBUGFS_H + +int sbuf_init_debugfs(void *root); +int smsg_init_debugfs(void *root); +int sblock_init_debugfs(void *root); +int smem_init_debugfs(void *root); + +#ifdef CONFIG_SPRD_SIPC_ZERO_COPY_SIPX +int sipx_init_debugfs(void *root); +#endif + +#ifdef CONFIG_SPRD_SIPC_SWCNBLK +int swcnblk_init_debugfs(void *root); +#endif + +#if defined(CONFIG_SPRD_SIPC_SMSGC) +int smsgc_init_debugfs(void *root); +#endif + +#ifdef CONFIG_SPRD_MAILBOX +int mbox_init_debugfs(void *root); +#endif +#endif /* !__SIPC_DEBUGFS_H */ + diff --git a/package/wwan/driver/quectel_SRPD_PCIE/src/sipc/sipc_priv.h b/package/wwan/driver/quectel_SRPD_PCIE/src/sipc/sipc_priv.h new file mode 100644 index 000000000..0702faf39 --- /dev/null +++ b/package/wwan/driver/quectel_SRPD_PCIE/src/sipc/sipc_priv.h @@ -0,0 +1,188 @@ +/* + * Copyright (C) 2019 Spreadtrum Communications Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __SIPC_PRIV_H +#define __SIPC_PRIV_H +#include +#include +#include + +#include "../include/sprd_mpm.h" +#ifdef CONFIG_SPRD_MAILBOX +#include +#endif + +#ifdef CONFIG_SPRD_PCIE_EP_DEVICE +#include "../include/sprd_pcie_ep_device.h" +#endif + +#ifdef CONFIG_PCIE_EPF_SPRD +#include +#endif + +#ifdef CONFIG_PCI +#include "../include/sprd_pcie_resource.h" +#endif + +#include "../include/sipc_big_to_little.h" + +#ifndef SZ_1K +#define SZ_1K 0x00000400 +#define SZ_4K 0x00001000 +#endif + +enum { + SIPC_BASE_MBOX = 0, + SIPC_BASE_PCIE, + SIPC_BASE_IPI, + SIPC_BASE_NR +}; + +enum smem_type { + SMEM_LOCAL = 0, + SMEM_PCIE +}; + +extern struct smsg_ipc *smsg_ipcs[]; +#define SMSG_CACHE_NR 256 + +struct smsg_channel { + /* wait queue for recv-buffer */ + wait_queue_head_t rxwait; + struct mutex rxlock; + struct sprd_pms *tx_pms; + struct sprd_pms *rx_pms; + char tx_name[16]; + char rx_name[16]; + + /* cached msgs for recv */ + uintptr_t wrptr[1]; + uintptr_t rdptr[1]; + struct smsg caches[SMSG_CACHE_NR]; +}; + +/* smsg ring-buffer between AP/CP ipc */ +struct smsg_ipc { + const char *name; + struct sprd_pms *sipc_pms; + + u8 dst; + u8 client; /* sipc is client mode */ + /* target core_id over mailbox */ + u8 core_id; + u8 core_sensor_id; + u32 type; /* sipc type, mbox, ipi, pcie */ + + void __iomem *write_addr; + +#ifdef CONFIG_SPRD_PCIE_EP_DEVICE + u32 ep_dev; + struct platform_device *pcie_dev; +#endif +#ifdef CONFIG_PCIE_EPF_SPRD + u32 ep_fun; +#endif + u32 latency; + + /* send-buffer info */ + uintptr_t txbuf_addr; + u32 txbuf_size; /* must be 2^n */ + uintptr_t txbuf_rdptr; + uintptr_t txbuf_wrptr; + + /* recv-buffer info */ + uintptr_t rxbuf_addr; + u32 rxbuf_size; /* must be 2^n */ + uintptr_t rxbuf_rdptr; + uintptr_t rxbuf_wrptr; + + /* sipc irq related */ + int irq; + u32 (*rxirq_status)(u8 id); + void (*rxirq_clear)(u8 id); + void (*txirq_trigger)(u8 id, u64 msg); + + u32 ring_base; + u32 ring_size; + void *smem_vbase; + u32 smem_base; + u32 smem_size; + enum smem_type smem_type; + u32 dst_smem_base; +#ifdef CONFIG_PHYS_ADDR_T_64BIT + u32 high_offset; +#endif + /* lock for send-buffer */ + spinlock_t txpinlock; + /* all fixed channels receivers */ + struct smsg_channel *channels[SMSG_VALID_CH_NR]; + /* record the runtime status of smsg channel */ + atomic_t busy[SMSG_VALID_CH_NR]; + /* all channel states: 0 unused, 1 be opened by other core, 2 opend */ + u8 states[SMSG_VALID_CH_NR]; +}; + +#define CHAN_STATE_UNUSED 0 +#define CHAN_STATE_CLIENT_OPENED 1 +#define CHAN_STATE_HOST_OPENED 2 +#define CHAN_STATE_OPENED 3 +#define CHAN_STATE_FREE 4 + +void smsg_init_channel2index(void); +void smsg_ipc_create(struct smsg_ipc *ipc); +void smsg_ipc_destroy(struct smsg_ipc *ipc); + +/*smem alloc size align*/ +#define SMEM_ALIGN_POOLSZ 0x40000 /*256KB*/ + +#ifdef CONFIG_64BIT +#define SMEM_ALIGN_BYTES 8 +#define SMEM_MIN_ORDER 3 +#else +#define SMEM_ALIGN_BYTES 4 +#define SMEM_MIN_ORDER 2 +#endif + +/* initialize smem pool for AP/CP */ +int smem_init(u32 addr, u32 size, u32 dst, u32 mem_type); +void sbuf_get_status(u8 dst, char *status_info, int size); + +#if defined(CONFIG_DEBUG_FS) +void sipc_debug_putline(struct seq_file *m, char c, int n); +#endif + +#ifdef CONFIG_SPRD_MAILBOX +#define MBOX_INVALID_CORE 0xff +#endif + +/* sipc_smem_request_resource + * local smem no need request resource, just return 0. + */ +static inline int sipc_smem_request_resource(struct sprd_pms *pms, + u8 dst, int timeout) +{ + if (smsg_ipcs[dst]->smem_type == SMEM_LOCAL) + return 0; + + return sprd_pms_request_resource(pms, timeout); +} + +/* sipc_smem_release_resource + * local smem no need release resource, do nothing. + */ +static inline void sipc_smem_release_resource(struct sprd_pms *pms, u8 dst) +{ + if (smsg_ipcs[dst]->smem_type != SMEM_LOCAL) + sprd_pms_release_resource(pms); +} +#endif diff --git a/package/wwan/driver/quectel_SRPD_PCIE/src/sipc/smem.c b/package/wwan/driver/quectel_SRPD_PCIE/src/sipc/smem.c new file mode 100644 index 000000000..12640c2e3 --- /dev/null +++ b/package/wwan/driver/quectel_SRPD_PCIE/src/sipc/smem.c @@ -0,0 +1,559 @@ +/* + * Copyright (C) 2019 Spreadtrum Communications Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../include/sipc.h" +#include "sipc_priv.h" +#define CONFIG_SPRD_IPA_PCIE_WORKROUND +/* + * workround: Due to orca ipa hardware limitations + * the sipc share memory must map from + * 0x2x0000000(orca side) to 0xx0000000(roc1 + * side), and the size must be 256M + */ +#ifdef CONFIG_SPRD_IPA_PCIE_WORKROUND +#define IPA_GET_SRC_BASE(addr) (((addr) & 0xf0000000) + 0x200000000) +#define IPA_GET_DST_BASE(addr) ((addr) & 0xf0000000) +#define IPA_SIZE 0x10000000 +#endif + +struct smem_phead { + struct list_head smem_phead; + spinlock_t lock; + u32 poolnum; +}; + +struct smem_pool { + struct list_head smem_head; + struct list_head smem_plist; + spinlock_t lock; + + void *pcie_base; + u32 addr; + u32 size; + u32 dst; + u32 mem_type; + + atomic_t used; + struct gen_pool *gen; +}; + +struct smem_record { + struct list_head smem_list; + struct task_struct *task; + u32 size; + u32 addr; +}; + +struct smem_map_list { + struct list_head map_head; + spinlock_t lock; + u32 inited; +}; + +struct smem_map { + struct list_head map_list; + struct task_struct *task; + const void *mem; + unsigned int count; +}; + +static struct smem_phead sipc_smem_phead; +static struct smem_map_list mem_mp; + +static struct smem_pool *shmem_find_pool(u8 dst) +{ + struct smem_phead *phead = &sipc_smem_phead; + struct smem_pool *spool = NULL; + struct smem_pool *pos; + unsigned long flags; + + /* The num of one pool is 0, means the poll is not ready */ + if (!phead->poolnum) + return NULL; + + spin_lock_irqsave(&phead->lock, flags); + list_for_each_entry(pos, &phead->smem_phead, smem_plist) { + if (pos->dst == dst) { + spool = pos; + break; + } + } + spin_unlock_irqrestore(&phead->lock, flags); + return spool; +} + +static void *soc_modem_ram_vmap(phys_addr_t start, size_t size, int noncached) +{ + struct page **pages; + phys_addr_t page_start; + unsigned int page_count; + pgprot_t prot; + unsigned int i; + void *vaddr; + phys_addr_t addr; + unsigned long flags; + struct smem_map *map; + struct smem_map_list *smem = &mem_mp; + + map = kzalloc(sizeof(struct smem_map), GFP_KERNEL); + if (!map) + return NULL; + + page_start = start - offset_in_page(start); + page_count = DIV_ROUND_UP(size + offset_in_page(start), PAGE_SIZE); + if (noncached) + prot = pgprot_noncached(PAGE_KERNEL); + else + prot = PAGE_KERNEL; + + pages = kmalloc_array(page_count, sizeof(struct page *), GFP_KERNEL); + if (!pages) { + kfree(map); + return NULL; + } + + for (i = 0; i < page_count; i++) { + addr = page_start + i * PAGE_SIZE; + pages[i] = pfn_to_page(addr >> PAGE_SHIFT); + } + +#if (LINUX_VERSION_CODE < KERNEL_VERSION( 5,10,0 )) + vaddr = vm_map_ram(pages, page_count, -1, prot); +#else + vaddr = vmap(pages, page_count, -1, prot); + //vaddr = vm_map_ram(pages, page_count, -1); +#endif + + kfree(pages); + + if (!vaddr) { + pr_err("smem: vm map failed.\n"); + kfree(map); + return NULL; + } + + vaddr += offset_in_page(start); + map->count = page_count; + map->mem = vaddr; + map->task = current; + + if (smem->inited) { + spin_lock_irqsave(&smem->lock, flags); + list_add_tail(&map->map_list, &smem->map_head); + spin_unlock_irqrestore(&smem->lock, flags); + } + return vaddr; +} + +static void *pcie_modem_ram_vmap(phys_addr_t start, size_t size, int noncached) +{ + if (noncached == 0) { + pr_err("%s: cache not support!\n", __func__); + return NULL; + } + +#ifdef CONFIG_SPRD_PCIE_EP_DEVICE + return sprd_ep_map_memory(PCIE_EP_MODEM, start, size); +#endif + +#ifdef CONFIG_PCIE_EPF_SPRD + return sprd_pci_epf_map_memory(SPRD_FUNCTION_0, start, size); +#endif + + return NULL; +} + +static void pcie_modem_ram_unmap(const void *mem) +{ +#ifdef CONFIG_SPRD_PCIE_EP_DEVICE + return sprd_ep_unmap_memory(PCIE_EP_MODEM, mem); +#endif + +#ifdef CONFIG_PCIE_EPF_SPRD + return sprd_pci_epf_unmap_memory(SPRD_FUNCTION_0, mem); +#endif +} + +static void soc_modem_ram_unmap(const void *mem) +{ + struct smem_map *map, *next; + unsigned long flags; + struct smem_map_list *smem = &mem_mp; + bool found = false; + + if (smem->inited) { + spin_lock_irqsave(&smem->lock, flags); + list_for_each_entry_safe(map, next, &smem->map_head, map_list) { + if (map->mem == mem) { + list_del(&map->map_list); + found = true; + break; + } + } + spin_unlock_irqrestore(&smem->lock, flags); + + if (found) { + vm_unmap_ram(mem - offset_in_page(mem), map->count); + kfree(map); + } + } +} + +static void *shmem_ram_vmap(u8 dst, phys_addr_t start, + size_t size, + int noncached) +{ + struct smem_pool *spool; + + spool = shmem_find_pool(dst); + if (spool == NULL) { + pr_err("%s: pool dst %d is not existed!\n", __func__, dst); + return NULL; + } + + if (spool->mem_type == SMEM_PCIE) { + if (start < spool->addr + || start + size > spool->addr + spool->size) { + pr_info("%s: error, start = 0x%lx, size = 0x%lx.\n", + __func__, + (unsigned long)start, + (unsigned long)size); + return NULL; + } + + pr_info("%s: succ, start = 0x%lx, size = 0x%lx.\n", + __func__, (unsigned long)start, (unsigned long)size); + return (spool->pcie_base + start - spool->addr); + } + + return soc_modem_ram_vmap(start, size, noncached); + +} + +int smem_init(u32 addr, u32 size, u32 dst, u32 mem_type) +{ + struct smem_phead *phead = &sipc_smem_phead; + struct smem_map_list *smem = &mem_mp; + struct smem_pool *spool; + unsigned long flags; + + /* fisrt init, create the pool head */ + if (!phead->poolnum) { + spin_lock_init(&phead->lock); + INIT_LIST_HEAD(&phead->smem_phead); + } + + if (shmem_find_pool(dst)) + return 0; + + spool = kzalloc(sizeof(struct smem_pool), GFP_KERNEL); + if (!spool) + return -1; + + spin_lock_irqsave(&phead->lock, flags); + list_add_tail(&spool->smem_plist, &phead->smem_phead); + phead->poolnum++; + spin_unlock_irqrestore(&phead->lock, flags); + + spool->addr = addr; + spool->dst = dst; + spool->mem_type = mem_type; + + if (size >= SMEM_ALIGN_POOLSZ) + size = PAGE_ALIGN(size); + else + size = ALIGN(size, SMEM_ALIGN_BYTES); + + spool->size = size; + atomic_set(&spool->used, 0); + spin_lock_init(&spool->lock); + INIT_LIST_HEAD(&spool->smem_head); + + spin_lock_init(&smem->lock); + INIT_LIST_HEAD(&smem->map_head); + smem->inited = 1; + + /* allocator block size is times of pages */ + if (spool->size >= SMEM_ALIGN_POOLSZ) + spool->gen = gen_pool_create(PAGE_SHIFT, -1); + else + spool->gen = gen_pool_create(SMEM_MIN_ORDER, -1); + + if (!spool->gen) { + pr_err("Failed to create smem gen pool!\n"); + return -1; + } + + if (gen_pool_add(spool->gen, spool->addr, spool->size, -1) != 0) { + pr_err("Failed to add smem gen pool!\n"); + return -1; + } + pr_info("%s: pool addr = 0x%x, size = 0x%x added.\n", + __func__, spool->addr, spool->size); + + if (mem_type == SMEM_PCIE) { +#ifdef CONFIG_SPRD_IPA_PCIE_WORKROUND +#ifdef CONFIG_PCIE_EPF_SPRD + spool->pcie_base = sprd_epf_ipa_map(IPA_GET_SRC_BASE(addr), + IPA_GET_DST_BASE(addr), + IPA_SIZE); + if (!spool->pcie_base) + return -ENOMEM; + + spool->pcie_base += (addr - IPA_GET_DST_BASE(addr)); +#else + pr_err("Failed to pcie map, can't run here!\n"); + return -ENOMEM; +#endif +#else + spool->pcie_base = pcie_modem_ram_vmap(addr, size, 1); +#endif + } + return 0; +} + +/* ****************************************************************** */ + +int smem_get_area(u8 dst, u32 *base, u32 *size) +{ + struct smem_pool *spool; + + if (!base || !size) + return -EINVAL; + + spool = shmem_find_pool(dst); + if (!spool) { + pr_err("%s: err, dst = %d!\n", __func__, dst); + return -EINVAL; + } + + pr_info("%s: addr = 0x%x, size = 0x%x.\n", + __func__, spool->addr, spool->size); + + *base = spool->addr; + *size = spool->size; + + return 0; +} +EXPORT_SYMBOL_GPL(smem_get_area); + +u32 smem_alloc(u8 dst, u32 size) +{ + struct smem_pool *spool; + struct smem_record *recd; + unsigned long flags; + u32 addr = 0; + + spool = shmem_find_pool(dst); + if (spool == NULL) { + pr_err("%s: pool dst %d is not existed!\n", __func__, dst); + return 0; + } + + recd = kzalloc(sizeof(struct smem_record), GFP_KERNEL); + if (!recd) + return 0; + + if (spool->size >= SMEM_ALIGN_POOLSZ) + size = PAGE_ALIGN(size); + else + size = ALIGN(size, SMEM_ALIGN_BYTES); + + addr = gen_pool_alloc(spool->gen, size); + if (!addr) { + pr_err("%s:pool dst=%d, size=0x%x failed to alloc smem!\n", + __func__, dst, size); + kfree(recd); + return 0; + } + + /* record smem alloc info */ + atomic_add(size, &spool->used); + recd->size = size; + recd->task = current; + recd->addr = addr; + spin_lock_irqsave(&spool->lock, flags); + list_add_tail(&recd->smem_list, &spool->smem_head); + spin_unlock_irqrestore(&spool->lock, flags); + + return addr; +} +EXPORT_SYMBOL_GPL(smem_alloc); + +void smem_free(u8 dst, u32 addr, u32 size) +{ + struct smem_pool *spool; + struct smem_record *recd, *next; + unsigned long flags; + + spool = shmem_find_pool(dst); + if (spool == NULL) { + pr_err("%s: pool dst %d is not existed!\n", __func__, dst); + return; + } + + if (size >= SMEM_ALIGN_POOLSZ) + size = PAGE_ALIGN(size); + else + size = ALIGN(size, SMEM_ALIGN_BYTES); + + atomic_sub(size, &spool->used); + gen_pool_free(spool->gen, addr, size); + /* delete record node from list */ + spin_lock_irqsave(&spool->lock, flags); + list_for_each_entry_safe(recd, next, &spool->smem_head, smem_list) { + if (recd->addr == addr) { + list_del(&recd->smem_list); + kfree(recd); + break; + } + } + spin_unlock_irqrestore(&spool->lock, flags); +} +EXPORT_SYMBOL_GPL(smem_free); + +void *shmem_ram_vmap_nocache(u8 dst, phys_addr_t start, size_t size) +{ + return shmem_ram_vmap(dst, start, size, 1); +} +EXPORT_SYMBOL_GPL(shmem_ram_vmap_nocache); + + +void *shmem_ram_vmap_cache(u8 dst, phys_addr_t start, size_t size) +{ + return shmem_ram_vmap(dst, start, size, 0); +} +EXPORT_SYMBOL_GPL(shmem_ram_vmap_cache); + +void shmem_ram_unmap(u8 dst, const void *mem) +{ + struct smem_pool *spool; + + spool = shmem_find_pool(dst); + if (spool == NULL) { + pr_err("%s: pool dst %d is not existed!\n", __func__, dst); + return; + } + + if (spool->mem_type == SMEM_PCIE) + /* do nothing, because it also do nothing in shmem_ram_vmap */ + return; + else + return soc_modem_ram_unmap(mem); +} +EXPORT_SYMBOL_GPL(shmem_ram_unmap); + +void *modem_ram_vmap_nocache(u32 modem_type, phys_addr_t start, size_t size) +{ + if (modem_type == PCIE_MODEM) + return pcie_modem_ram_vmap(start, size, 1); + else + return soc_modem_ram_vmap(start, size, 1); +} +EXPORT_SYMBOL_GPL(modem_ram_vmap_nocache); + + +void *modem_ram_vmap_cache(u32 modem_type, phys_addr_t start, size_t size) +{ + if (modem_type == PCIE_MODEM) + return pcie_modem_ram_vmap(start, size, 0); + else + return soc_modem_ram_vmap(start, size, 0); +} +EXPORT_SYMBOL_GPL(modem_ram_vmap_cache); + +void modem_ram_unmap(u32 modem_type, const void *mem) +{ + if (modem_type == PCIE_MODEM) + return pcie_modem_ram_unmap(mem); + else + return soc_modem_ram_unmap(mem); +} +EXPORT_SYMBOL_GPL(modem_ram_unmap); + +#ifdef CONFIG_DEBUG_FS +static int smem_debug_show(struct seq_file *m, void *private) +{ + struct smem_phead *phead = &sipc_smem_phead; + struct smem_pool *spool, *pos; + struct smem_record *recd; + u32 fsize; + unsigned long flags; + u32 cnt = 1; + + spin_lock_irqsave(&phead->lock, flags); + list_for_each_entry(pos, &phead->smem_phead, smem_plist) { + spool = pos; + fsize = gen_pool_avail(spool->gen); + + sipc_debug_putline(m, '*', 80); + seq_printf(m, "%d, dst:%d, name: %s, smem pool info:\n", + cnt++, spool->dst, + (smsg_ipcs[spool->dst])->name); + seq_printf(m, "phys_addr=0x%x, total=0x%x, used=0x%x, free=0x%x\n", + spool->addr, spool->size, spool->used.counter, fsize); + seq_puts(m, "smem record list:\n"); + + list_for_each_entry(recd, &spool->smem_head, smem_list) { + seq_printf(m, "task %s: pid=%u, addr=0x%x, size=0x%x\n", + recd->task->comm, + recd->task->pid, + recd->addr, + recd->size); + } + } + spin_unlock_irqrestore(&phead->lock, flags); + return 0; +} + +static int smem_debug_open(struct inode *inode, struct file *file) +{ + return single_open(file, smem_debug_show, inode->i_private); +} + +static const struct file_operations smem_debug_fops = { + .open = smem_debug_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +int smem_init_debugfs(void *root) +{ + if (!root) + return -ENXIO; + debugfs_create_file("smem", 0444, + (struct dentry *)root, NULL, + &smem_debug_fops); + return 0; +} +EXPORT_SYMBOL_GPL(smem_init_debugfs); + +#endif /* endof CONFIG_DEBUG_FS */ + + +MODULE_AUTHOR("Chen Gaopeng"); +MODULE_DESCRIPTION("SIPC/SMEM driver"); +MODULE_LICENSE("GPL v2"); diff --git a/package/wwan/driver/quectel_SRPD_PCIE/src/sipc/smsg.c b/package/wwan/driver/quectel_SRPD_PCIE/src/sipc/smsg.c new file mode 100644 index 000000000..051800d8f --- /dev/null +++ b/package/wwan/driver/quectel_SRPD_PCIE/src/sipc/smsg.c @@ -0,0 +1,1103 @@ +/* + * Copyright (C) 2019 Spreadtrum Communications Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_SPRD_MAILBOX +#include +#endif + +#include "../include/sipc.h" +#include "sipc_priv.h" +#include "../include/sprd_pcie_resource.h" + +#if defined(CONFIG_DEBUG_FS) +#include "sipc_debugfs.h" +#endif + +#define SMSG_TXBUF_ADDR (0) +#define SMSG_TXBUF_SIZE (SZ_1K) +#define SMSG_RXBUF_ADDR (SMSG_TXBUF_SIZE) +#define SMSG_RXBUF_SIZE (SZ_1K) + +#define SMSG_RINGHDR (SMSG_TXBUF_SIZE + SMSG_RXBUF_SIZE) +#define SMSG_TXBUF_RDPTR (SMSG_RINGHDR + 0) +#define SMSG_TXBUF_WRPTR (SMSG_RINGHDR + 4) +#define SMSG_RXBUF_RDPTR (SMSG_RINGHDR + 8) +#define SMSG_RXBUF_WRPTR (SMSG_RINGHDR + 12) + +#define SMSG_RESERVE_BASE (SMSG_RINGHDR + SZ_1K) +#define SMSG_PCIE_WRPTR (SMSG_RESERVE_BASE + 0) +#define SMSG_PCIE_IRQPTR (SMSG_RESERVE_BASE + 4) + +#define SIPC_READL(addr) readl((__force void __iomem *)(addr)) +#define SIPC_WRITEL(b, addr) writel(b, (__force void __iomem *)(addr)) + +static u8 g_wakeup_flag; + +struct smsg_ipc *smsg_ipcs[SIPC_ID_NR]; +EXPORT_SYMBOL_GPL(smsg_ipcs); + +static ushort debug_enable; + +module_param_named(debug_enable, debug_enable, ushort, 0644); +static u8 channel2index[SMSG_CH_NR + 1]; + +static int smsg_ipc_smem_init(struct smsg_ipc *ipc); + +void smsg_init_channel2index(void) +{ + u16 i, j; + + for (i = 0; i < ARRAY_SIZE(channel2index); i++) { + for (j = 0; j < SMSG_VALID_CH_NR; j++) { + /* find the index of channel i */ + if (sipc_cfg[j].channel == i) + break; + } + + /* if not find, init with INVALID_CHANEL_INDEX, + * else init whith j + */ + if (j == SMSG_VALID_CH_NR) + channel2index[i] = INVALID_CHANEL_INDEX; + else + channel2index[i] = j; + } +} + +static void get_channel_status(u8 dst, char *status, int size) +{ + int i, len; + struct smsg_channel *ch; + + len = strlen(status); + for (i = 0; i < SMSG_VALID_CH_NR && len < size; i++) { + ch = smsg_ipcs[dst]->channels[i]; + if (!ch) + continue; + if (SIPC_READL(ch->rdptr) < SIPC_READL(ch->wrptr)) + snprintf( + status + len, + size - len, + "dst-%d-ch-%d: rd = %u, wr = %u.\n", + dst, + i, + SIPC_READL(ch->rdptr), + SIPC_READL(ch->wrptr) + ); + } +} + +static void smsg_wakeup_print(struct smsg_ipc *ipc, struct smsg *msg) +{ + /* if the first msg come after the irq wake up by sipc, + * use prin_fo to output log + */ + if (g_wakeup_flag) { + g_wakeup_flag = 0; + pr_info("irq read smsg: dst=%d, channel=%d,type=%d, flag=0x%04x, value=0x%08x\n", + ipc->dst, + msg->channel, + msg->type, + msg->flag, + msg->value); + } else { + pr_debug("irq read smsg: dst=%d, channel=%d,type=%d, flag=0x%04x, value=0x%08x\n", + ipc->dst, + msg->channel, + msg->type, + msg->flag, + msg->value); + } +} + +static void smsg_die_process(struct smsg_ipc *ipc, struct smsg *msg) +{ + if (msg->type == SMSG_TYPE_DIE) { + if (debug_enable) { + char sipc_status[100] = {0}; + + get_channel_status(ipc->dst, + sipc_status, + sizeof(sipc_status)); + sbuf_get_status(ipc->dst, + sipc_status, + sizeof(sipc_status)); + panic("cpcrash: %s", sipc_status); + while (1) + ; + } + } +} + +static void smsg_msg_process(struct smsg_ipc *ipc, + struct smsg *msg, bool wake_lock) +{ + struct smsg_channel *ch = NULL; + u32 wr; + u8 ch_index; + + smsg_wakeup_print(ipc, msg); + smsg_die_process(ipc, msg); + + ch_index = channel2index[msg->channel]; + atomic_inc(&ipc->busy[ch_index]); + + pr_debug("smsg:get dst=%d msg channel=%d, type=%d, flag=0x%04x, value=0x%08x\n", + ipc->dst, msg->channel, + msg->type, msg->flag, + msg->value); + + if (msg->type >= SMSG_TYPE_NR) { + /* invalid msg */ + pr_err("invalid smsg: channel=%d, type=%d, flag=0x%04x, value=0x%08x\n", + msg->channel, msg->type, msg->flag, msg->value); + goto exit_msg_proc; + } + + ch = ipc->channels[ch_index]; + if (!ch) { + if (ipc->states[ch_index] == CHAN_STATE_UNUSED && + msg->type == SMSG_TYPE_OPEN && + msg->flag == SMSG_OPEN_MAGIC) + ipc->states[ch_index] = CHAN_STATE_CLIENT_OPENED; + else + /* drop this bad msg since channel + * is not opened + */ + pr_info("smsg channel %d not opened! drop smsg: type=%d, flag=0x%04x, value=0x%08x\n", + msg->channel, msg->type, + msg->flag, msg->value); + + goto exit_msg_proc; + } + + if ((int)(SIPC_READL(ch->wrptr) - SIPC_READL(ch->rdptr)) >= + SMSG_CACHE_NR) { + /* msg cache is full, drop this msg */ + pr_info("smsg channel %d recv cache is full! drop smsg: type=%d, flag=0x%04x, value=0x%08x\n", + msg->channel, msg->type, msg->flag, msg->value); + } else { + /* write smsg to cache */ + wr = SIPC_READL(ch->wrptr) & (SMSG_CACHE_NR - 1); + memcpy(&ch->caches[wr], msg, sizeof(struct smsg)); + SIPC_WRITEL(SIPC_READL(ch->wrptr) + 1, ch->wrptr); + } + + wake_up_interruptible_all(&ch->rxwait); + + if (wake_lock) + sprd_pms_request_wakelock_period(ch->rx_pms, 500); + +exit_msg_proc: + atomic_dec(&ipc->busy[ch_index]); +} + +#ifdef CONFIG_SPRD_MAILBOX +static irqreturn_t smsg_mbox_irq_handler(void *ptr, void *private) +{ + struct smsg_ipc *ipc = (struct smsg_ipc *)private; + struct smsg *msg; + + msg = ptr; + smsg_msg_process(ipc, msg, true); + + return IRQ_HANDLED; +} + +static irqreturn_t smsg_mbox_sensor_irq_handler(void *ptr, void *private) +{ + struct smsg_ipc *ipc = (struct smsg_ipc *)private; + struct smsg *msg; + + msg = ptr; + smsg_msg_process(ipc, msg, false); + + return IRQ_HANDLED; +} + +#endif + +static int sipc_process_all_msg(struct smsg_ipc *ipc) +{ + struct smsg *msg; + struct smsg msg_recv; + uintptr_t rxpos; + + /* msg coming, means resource ok, don't wait */ + sipc_smem_request_resource(ipc->sipc_pms, ipc->dst, 0); + + while (BL_READL(ipc->rxbuf_wrptr) != BL_READL(ipc->rxbuf_rdptr)) { + rxpos = (BL_READL(ipc->rxbuf_rdptr) & (ipc->rxbuf_size - 1)) * + sizeof(struct smsg) + ipc->rxbuf_addr; + msg = (struct smsg *)rxpos; + + /* share memory smsg to ipc msg */ + msg_recv.channel = msg->channel; + msg_recv.type = msg->type; + msg_recv.flag = BL_GETW(msg->flag); + msg_recv.value= BL_GETL(msg->value); + smsg_msg_process(ipc, &msg_recv, true); + /* update smsg rdptr */ + BL_WRITEL(BL_READL(ipc->rxbuf_rdptr) + 1, ipc->rxbuf_rdptr); + } + + sipc_smem_release_resource(ipc->sipc_pms, ipc->dst); + + return 0; +} + + +static irqreturn_t smsg_irq_handler(int irq, void *private) +{ + struct smsg_ipc *ipc = (struct smsg_ipc *)private; + + if (ipc->rxirq_status(ipc->dst)) + ipc->rxirq_clear(ipc->dst); + + sipc_process_all_msg(ipc); + + return IRQ_HANDLED; +} + +static void smsg_ipc_init_smsg_irq_callback(struct smsg_ipc *ipc) +{ +#ifdef CONFIG_SPRD_MAILBOX + if (ipc->type == SIPC_BASE_MBOX) { + mbox_register_irq_handle(ipc->core_id, + smsg_mbox_irq_handler, ipc); + + if ((ipc->dst == SIPC_ID_PM_SYS) && + (ipc->core_sensor_id != MBOX_INVALID_CORE)) + mbox_register_irq_handle(ipc->core_sensor_id, + smsg_mbox_sensor_irq_handler, + ipc); + return; + } +#endif + +#ifdef CONFIG_SPRD_PCIE_EP_DEVICE + if (ipc->type == SIPC_BASE_PCIE) { + sprd_ep_dev_register_irq_handler(ipc->ep_dev, + PCIE_MSI_SIPC_IRQ, + smsg_irq_handler, ipc); + sprd_ep_dev_set_irq_addr(ipc->ep_dev, ipc->write_addr + 4); + return; + } +#endif + +#ifdef CONFIG_PCIE_EPF_SPRD + if (ipc->type == SIPC_BASE_PCIE) { + sprd_pci_epf_register_irq_handler(ipc->ep_fun, + PCIE_DBELL_SIPC_IRQ, + smsg_irq_handler, + ipc); + sprd_pci_epf_set_write_addr(ipc->ep_fun, ipc->write_addr); + return; + } +#endif + + if (ipc->type == SIPC_BASE_IPI) { + int ret; + + /* explicitly call irq handler in case of missing irq on boot */ + smsg_irq_handler(ipc->irq, ipc); + + /* register IPI irq */ + ret = request_irq(ipc->irq, + smsg_irq_handler, + IRQF_NO_SUSPEND, + ipc->name, + ipc); + if (ret) + pr_info("%s: request irq err = %d!\n", ipc->name, ret); + } +} + +static int smsg_ipc_smem_init(struct smsg_ipc *ipc) +{ + void __iomem *base, *p; + phys_addr_t offset = 0; + int ret; + + pr_debug("%s: %s, smem_type = %d!\n", + __func__, ipc->name, ipc->smem_type); + + ret = smem_init(ipc->smem_base, ipc->smem_size, + ipc->dst, ipc->smem_type); + if (ret) { + pr_err("%s: %s err = %d!\n", __func__, ipc->name, ret); + return ret; + } + + if (ipc->type != SIPC_BASE_MBOX) { + ipc->ring_base = smem_alloc(ipc->dst, SZ_4K); + ipc->ring_size = SZ_4K; + pr_info("%s: ring_base = 0x%x, ring_size = 0x%x\n", + __func__, + ipc->ring_base, + ipc->ring_size); + } + +#ifdef CONFIG_PHYS_ADDR_T_64BIT + offset = ipc->high_offset; + offset = offset << 32; +#endif + + if (ipc->ring_base) { + base = (void __iomem *)shmem_ram_vmap_nocache(ipc->dst, + ipc->ring_base + offset, + ipc->ring_size); + if (!base) { + pr_err("%s: ioremap failed!\n", __func__); + smem_free(ipc->dst, ipc->ring_base, SZ_4K); + ipc->ring_base = 0; + return -ENOMEM; + } + + /* assume client is boot later than host */ + if (!ipc->client) { + /** + * memset(base, 0, ipc->ring_size); + * the instruction dc avz + * will abort for nocache memory + */ + for (p = base; p < base + ipc->ring_size;) { +#ifdef CONFIG_64BIT + *(uint64_t *)p = 0x0; + p += sizeof(uint64_t); +#else + *(u32 *)p = 0x0; + p += sizeof(u32); +#endif + } + } + + if (ipc->client) { + /* clent mode, tx is host rx , rx is host tx*/ + ipc->smem_vbase = (void *)base; + ipc->txbuf_size = SMSG_RXBUF_SIZE / + sizeof(struct smsg); + ipc->txbuf_addr = (uintptr_t)base + + SMSG_RXBUF_ADDR; + ipc->txbuf_rdptr = (uintptr_t)base + + SMSG_RXBUF_RDPTR; + ipc->txbuf_wrptr = (uintptr_t)base + + SMSG_RXBUF_WRPTR; + ipc->rxbuf_size = SMSG_TXBUF_SIZE / + sizeof(struct smsg); + ipc->rxbuf_addr = (uintptr_t)base + + SMSG_TXBUF_ADDR; + ipc->rxbuf_rdptr = (uintptr_t)base + + SMSG_TXBUF_RDPTR; + ipc->rxbuf_wrptr = (uintptr_t)base + + SMSG_TXBUF_WRPTR; + } else { + ipc->smem_vbase = (void *)base; + ipc->txbuf_size = SMSG_TXBUF_SIZE / + sizeof(struct smsg); + ipc->txbuf_addr = (uintptr_t)base + + SMSG_TXBUF_ADDR; + ipc->txbuf_rdptr = (uintptr_t)base + + SMSG_TXBUF_RDPTR; + ipc->txbuf_wrptr = (uintptr_t)base + + SMSG_TXBUF_WRPTR; + ipc->rxbuf_size = SMSG_RXBUF_SIZE / + sizeof(struct smsg); + ipc->rxbuf_addr = (uintptr_t)base + + SMSG_RXBUF_ADDR; + ipc->rxbuf_rdptr = (uintptr_t)base + + SMSG_RXBUF_RDPTR; + ipc->rxbuf_wrptr = (uintptr_t)base + + SMSG_RXBUF_WRPTR; + } + ipc->write_addr = base + SMSG_PCIE_WRPTR; + } + + /* after smem_init complete, regist msg irq */ + smsg_ipc_init_smsg_irq_callback(ipc); + + return 0; +} + +#ifdef CONFIG_PCIE_EPF_SPRD +static void smsg_pcie_first_ready(void *data) +{ + struct smsg_ipc *ipc = (struct smsg_ipc *)data; + + if (ipc->smem_type == SMEM_PCIE) + smsg_ipc_smem_init(ipc); + else + pr_err("%s: pcie first ready, smem_type =%d!\n", + ipc->name, ipc->smem_type); +} +#endif + +static void smsg_ipc_mpm_init(struct smsg_ipc *ipc) +{ + /* create modem power manger instance for this sipc */ + sprd_mpm_create(ipc->dst, ipc->name, ipc->latency); + + /* init a power manager source */ + ipc->sipc_pms = sprd_pms_create(ipc->dst, ipc->name, true); + if (!ipc->sipc_pms) + pr_warn("create pms %s failed!\n", ipc->name); + + if (ipc->type == SIPC_BASE_PCIE) { + /* int mpm resource ops */ + sprd_mpm_init_resource_ops(ipc->dst, + sprd_pcie_wait_resource, + sprd_pcie_request_resource, + sprd_pcie_release_resource); + +#ifdef CONFIG_SPRD_PCIE_EP_DEVICE + /* in pcie host side, init pcie host resource */ + sprd_pcie_resource_host_init(ipc->dst, + ipc->ep_dev, ipc->pcie_dev); +#endif + +#ifdef CONFIG_PCIE_EPF_SPRD + /* in pcie ep side, init pcie client resource */ + sprd_pcie_resource_client_init(ipc->dst, ipc->ep_fun); +#endif + } +} + +void smsg_ipc_create(struct smsg_ipc *ipc) +{ + pr_info("%s: %s\n", __func__, ipc->name); + + smsg_ipcs[ipc->dst] = ipc; + + smsg_ipc_mpm_init(ipc); + + + if (ipc->type == SIPC_BASE_PCIE) { +#ifdef CONFIG_PCIE_EPF_SPRD + /* set epf door bell irq number */ + sprd_pci_epf_set_irq_number(ipc->ep_fun, ipc->irq); + + /* register first pcie ready notify */ + sprd_register_pcie_resource_first_ready(ipc->dst, + smsg_pcie_first_ready, + ipc); +#endif + } + + /* if SMEM_PCIE, must init after pcie ready */ + if (ipc->smem_type != SMEM_PCIE) + smsg_ipc_smem_init(ipc); +} + +void smsg_ipc_destroy(struct smsg_ipc *ipc) +{ + shmem_ram_unmap(ipc->dst, ipc->smem_vbase); + smem_free(ipc->dst, ipc->ring_base, SZ_4K); + +#ifdef CONFIG_SPRD_MAILBOX + if (ipc->type == SIPC_BASE_MBOX) { + mbox_unregister_irq_handle(ipc->core_id); + + if ((ipc->dst == SIPC_ID_PM_SYS) && + (ipc->core_sensor_id != MBOX_INVALID_CORE)) + mbox_unregister_irq_handle(ipc->core_sensor_id); + } +#endif + + if (ipc->type == SIPC_BASE_PCIE) { +#ifdef CONFIG_SPRD_PCIE_EP_DEVICE + sprd_ep_dev_unregister_irq_handler(ipc->ep_dev, ipc->irq); +#endif + +#ifdef CONFIG_PCIE_EPF_SPRD + sprd_pci_epf_unregister_irq_handler(ipc->ep_fun, ipc->irq); +#endif + sprd_pcie_resource_trash(ipc->dst); + } else { + free_irq(ipc->irq, ipc); + } + + smsg_ipcs[ipc->dst] = NULL; +} + +int sipc_get_wakeup_flag(void) +{ + return (int)g_wakeup_flag; +} +EXPORT_SYMBOL_GPL(sipc_get_wakeup_flag); + +void sipc_set_wakeup_flag(void) +{ + g_wakeup_flag = 1; +} +EXPORT_SYMBOL_GPL(sipc_set_wakeup_flag); + +void sipc_clear_wakeup_flag(void) +{ + g_wakeup_flag = 0; +} +EXPORT_SYMBOL_GPL(sipc_clear_wakeup_flag); + +int smsg_ch_wake_unlock(u8 dst, u8 channel) +{ + struct smsg_ipc *ipc = smsg_ipcs[dst]; + struct smsg_channel *ch; + u8 ch_index; + + ch_index = channel2index[channel]; + if (ch_index == INVALID_CHANEL_INDEX) { + pr_err("%s:channel %d invalid!\n", __func__, channel); + return -EINVAL; + } + + if (!ipc) + return -ENODEV; + + ch = ipc->channels[ch_index]; + if (!ch) + return -ENODEV; + + sprd_pms_release_wakelock(ch->rx_pms); + return 0; +} +EXPORT_SYMBOL_GPL(smsg_ch_wake_unlock); + +int smsg_ch_open(u8 dst, u8 channel, int timeout) +{ + struct smsg_ipc *ipc = smsg_ipcs[dst]; + struct smsg_channel *ch; + struct smsg mopen; + struct smsg mrecv; + int rval = 0; + u8 ch_index; + + ch_index = channel2index[channel]; + if (ch_index == INVALID_CHANEL_INDEX) { + pr_err("%s:channel %d invalid!\n", __func__, channel); + return -EINVAL; + } + + if (!ipc) + return -ENODEV; + + ch = kzalloc(sizeof(*ch), GFP_KERNEL); + if (!ch) + return -ENOMEM; + + sprintf(ch->tx_name, "smsg-%d-%d-tx", dst, channel); + ch->tx_pms = sprd_pms_create(dst, ch->tx_name, true); + if (!ch->tx_pms) + pr_warn("create pms %s failed!\n", ch->tx_name); + + sprintf(ch->rx_name, "smsg-%d-%d-rx", dst, channel); + ch->rx_pms = sprd_pms_create(dst, ch->rx_name, true); + if (!ch->rx_pms) + pr_warn("create pms %s failed!\n", ch->rx_name); + + atomic_set(&ipc->busy[ch_index], 1); + init_waitqueue_head(&ch->rxwait); + mutex_init(&ch->rxlock); + ipc->channels[ch_index] = ch; + + pr_info("%s: channel %d-%d send open msg!\n", + __func__, dst, channel); + + smsg_set(&mopen, channel, SMSG_TYPE_OPEN, SMSG_OPEN_MAGIC, 0); + rval = smsg_send(dst, &mopen, timeout); + if (rval != 0) { + pr_err("%s: channel %d-%d send open msg error = %d!\n", + __func__, dst, channel, rval); + ipc->states[ch_index] = CHAN_STATE_UNUSED; + ipc->channels[ch_index] = NULL; + atomic_dec(&ipc->busy[ch_index]); + /* guarantee that channel resource isn't used in irq handler */ + while (atomic_read(&ipc->busy[ch_index])) + ; + + kfree(ch); + + return rval; + } + + /* open msg might be got before */ + if (ipc->states[ch_index] == CHAN_STATE_CLIENT_OPENED) + goto open_done; + + ipc->states[ch_index] = CHAN_STATE_HOST_OPENED; + + do { + smsg_set(&mrecv, channel, 0, 0, 0); + rval = smsg_recv(dst, &mrecv, timeout); + if (rval != 0) { + pr_err("%s: channel %d-%d smsg receive error = %d!\n", + __func__, dst, channel, rval); + ipc->states[ch_index] = CHAN_STATE_UNUSED; + ipc->channels[ch_index] = NULL; + atomic_dec(&ipc->busy[ch_index]); + /* guarantee that channel resource isn't used + * in irq handler + */ + while (atomic_read(&ipc->busy[ch_index])) + ; + + kfree(ch); + return rval; + } + } while (mrecv.type != SMSG_TYPE_OPEN || mrecv.flag != SMSG_OPEN_MAGIC); + + pr_info("%s: channel %d-%d receive open msg!\n", + __func__, dst, channel); + +open_done: + pr_info("%s: channel %d-%d success\n", __func__, dst, channel); + ipc->states[ch_index] = CHAN_STATE_OPENED; + atomic_dec(&ipc->busy[ch_index]); + + return 0; +} +EXPORT_SYMBOL_GPL(smsg_ch_open); + +int smsg_ch_close(u8 dst, u8 channel, int timeout) +{ + struct smsg_ipc *ipc = smsg_ipcs[dst]; + struct smsg_channel *ch; + struct smsg mclose; + u8 ch_index; + + ch_index = channel2index[channel]; + if (ch_index == INVALID_CHANEL_INDEX) { + pr_err("%s:channel %d invalid!\n", __func__, channel); + return -EINVAL; + } + + ch = ipc->channels[ch_index]; + if (!ch) + return 0; + + smsg_set(&mclose, channel, SMSG_TYPE_CLOSE, SMSG_CLOSE_MAGIC, 0); + smsg_send(dst, &mclose, timeout); + + ipc->states[ch_index] = CHAN_STATE_FREE; + wake_up_interruptible_all(&ch->rxwait); + + /* wait for the channel being unused */ + while (atomic_read(&ipc->busy[ch_index])) + ; + + /* maybe channel has been free for smsg_ch_open failed */ + if (ipc->channels[ch_index]) { + ipc->channels[ch_index] = NULL; + /* guarantee that channel resource isn't used in irq handler */ + while (atomic_read(&ipc->busy[ch_index])) + ; + sprd_pms_destroy(ch->rx_pms); + sprd_pms_destroy(ch->tx_pms); + kfree(ch); + } + + /* finally, update the channel state*/ + ipc->states[ch_index] = CHAN_STATE_UNUSED; + + return 0; +} +EXPORT_SYMBOL_GPL(smsg_ch_close); + +static void smsg_bl_cpoy_msg(struct smsg *dst, struct smsg *src) +{ + dst->channel = src->channel; + dst->type = src->type; + BL_SETW(dst->flag, src->flag); + BL_SETL(dst->value, src->value); +} + +int smsg_senddie(u8 dst) +{ + struct smsg msg; + struct smsg_ipc *ipc = smsg_ipcs[dst]; + uintptr_t txpos; + int rval = 0; + + if (!ipc) + return -ENODEV; + + msg.channel = SMSG_CH_CTRL; + msg.type = SMSG_TYPE_DIE; + msg.flag = 0; + msg.value = 0; + +#ifdef CONFIG_SPRD_MAILBOX + if (ipc->type == SIPC_BASE_MBOX) { + mbox_just_sent(ipc->core_id, *((u64 *)&msg)); + return 0; + } +#endif + + if (ipc->ring_base) { + /* must wait resource before read or write share memory */ + rval = sprd_pms_request_resource(ipc->sipc_pms, 0); + if (rval < 0) + return rval; + + if (((int)(BL_READL(ipc->txbuf_wrptr) - + BL_READL(ipc->txbuf_rdptr)) >= + ipc->txbuf_size)) { + pr_info("%s: smsg txbuf is full!\n", __func__); + rval = -EBUSY; + } else { + /* calc txpos and write smsg */ + txpos = (BL_READL(ipc->txbuf_wrptr) & + (ipc->txbuf_size - 1)) * + sizeof(struct smsg) + ipc->txbuf_addr; + smsg_bl_cpoy_msg((void *)txpos, &msg); + + /* update wrptr */ + BL_WRITEL(BL_READL(ipc->txbuf_wrptr) + 1, + ipc->txbuf_wrptr); + } + ipc->txirq_trigger(ipc->dst, *((u64 *)&msg)); + sprd_pms_release_resource(ipc->sipc_pms); + } + + return rval; +} +EXPORT_SYMBOL_GPL(smsg_senddie); + +int smsg_send(u8 dst, struct smsg *msg, int timeout) +{ + struct smsg_ipc *ipc = smsg_ipcs[dst]; + struct smsg_channel *ch; + uintptr_t txpos; + int rval = 0; + unsigned long flags; + u8 ch_index; + + ch_index = channel2index[msg->channel]; + if (ch_index == INVALID_CHANEL_INDEX) { + pr_err("%s:channel %d invalid!\n", __func__, msg->channel); + return -EINVAL; + } + + if (!ipc) + return -ENODEV; + + if (!ipc->channels[ch_index]) { + pr_err("%s: channel %d not inited!\n", __func__, msg->channel); + return -ENODEV; + } + + if (ipc->states[ch_index] != CHAN_STATE_OPENED && + msg->type != SMSG_TYPE_OPEN && + msg->type != SMSG_TYPE_CLOSE) { + pr_err("%s: channel %d not opened!\n", __func__, msg->channel); + return -EINVAL; + } + + ch = ipc->channels[ch_index]; + + pr_debug("send smsg: channel=%d, type=%d, flag=0x%04x, value=0x%08x\n", + msg->channel, msg->type, msg->flag, msg->value); + + /* + * Must wait resource before read or write share memory, + * and must wait resource before trigger irq, + * And it must before if (ipc->ring_base), + * because it be inited as the same time as resource ready. + */ + rval = sprd_pms_request_resource(ch->tx_pms, timeout); + if (rval < 0) + return rval; + + if (ipc->ring_base) { + spin_lock_irqsave(&ipc->txpinlock, flags); + if (((int)(BL_READL(ipc->txbuf_wrptr) - + BL_READL(ipc->txbuf_rdptr)) >= + ipc->txbuf_size)) { + pr_err("write smsg: txbuf full, wrptr=0x%x, rdptr=0x%x\n", + BL_READL(ipc->txbuf_wrptr), + BL_READL(ipc->txbuf_rdptr)); + rval = -EBUSY; + } else { + /* calc txpos and write smsg */ + txpos = (BL_READL(ipc->txbuf_wrptr) & + (ipc->txbuf_size - 1)) * + sizeof(struct smsg) + ipc->txbuf_addr; + smsg_bl_cpoy_msg((void *)txpos, msg); + + /* update wrptr */ + BL_WRITEL(BL_READL(ipc->txbuf_wrptr) + 1, + ipc->txbuf_wrptr); + } + spin_unlock_irqrestore(&ipc->txpinlock, flags); + } else if (ipc->type != SIPC_BASE_MBOX) { + pr_err("send smsg:ring_base is NULL"); + sprd_pms_release_resource(ch->tx_pms); + return -EINVAL; + } + + ipc->txirq_trigger(ipc->dst, *(u64 *)msg); + sprd_pms_release_resource(ch->tx_pms); + + return rval; +} +EXPORT_SYMBOL_GPL(smsg_send); + +int smsg_recv(u8 dst, struct smsg *msg, int timeout) +{ + struct smsg_ipc *ipc = smsg_ipcs[dst]; + struct smsg_channel *ch; + u32 rd; + int rval = 0; + u8 ch_index; + + ch_index = channel2index[msg->channel]; + if (ch_index == INVALID_CHANEL_INDEX) { + pr_err("%s:channel %d invalid!\n", __func__, msg->channel); + return -EINVAL; + } + + if (!ipc) + return -ENODEV; + + atomic_inc(&ipc->busy[ch_index]); + + ch = ipc->channels[ch_index]; + + if (!ch) { + pr_err("%s: channel %d not opened!\n", __func__, msg->channel); + atomic_dec(&ipc->busy[ch_index]); + return -ENODEV; + } + + pr_debug("%s: dst=%d, channel=%d, timeout=%d, ch_index = %d\n", + __func__, dst, msg->channel, timeout, ch_index); + + if (timeout == 0) { + if (!mutex_trylock(&ch->rxlock)) { + pr_err("dst=%d, channel=%d recv smsg busy!\n", + dst, msg->channel); + atomic_dec(&ipc->busy[ch_index]); + + return -EBUSY; + } + + /* no wait */ + if (SIPC_READL(ch->wrptr) == SIPC_READL(ch->rdptr)) { + pr_info("dst=%d, channel=%d smsg rx cache is empty!\n", + dst, msg->channel); + + rval = -ENODATA; + + goto recv_failed; + } + } else if (timeout < 0) { + mutex_lock_interruptible(&ch->rxlock); + /* wait forever */ + rval = wait_event_interruptible( + ch->rxwait, + (SIPC_READL(ch->wrptr) != + SIPC_READL(ch->rdptr)) || + (ipc->states[ch_index] == CHAN_STATE_FREE)); + if (rval < 0) { + pr_debug("%s: dst=%d, channel=%d wait interrupted!\n", + __func__, dst, msg->channel); + + goto recv_failed; + } + + if (ipc->states[ch_index] == CHAN_STATE_FREE) { + pr_info("%s: dst=%d, channel=%d channel is free!\n", + __func__, dst, msg->channel); + + rval = -EIO; + + goto recv_failed; + } + } else { + mutex_lock_interruptible(&ch->rxlock); + /* wait timeout */ + rval = wait_event_interruptible_timeout( + ch->rxwait, + (SIPC_READL(ch->wrptr) != SIPC_READL(ch->rdptr)) || + (ipc->states[ch_index] == CHAN_STATE_FREE), + timeout); + if (rval < 0) { + pr_debug("%s: dst=%d, channel=%d wait interrupted!\n", + __func__, dst, msg->channel); + + goto recv_failed; + } else if (rval == 0) { + pr_debug("%s: dst=%d, channel=%d wait timeout!\n", + __func__, dst, msg->channel); + + rval = -ETIME; + + goto recv_failed; + } + + if (ipc->states[ch_index] == CHAN_STATE_FREE) { + pr_info("%s: dst=%d, channel=%d channel is free!\n", + __func__, dst, msg->channel); + + rval = -EIO; + + goto recv_failed; + } + } + + /* read smsg from cache */ + rd = SIPC_READL(ch->rdptr) & (SMSG_CACHE_NR - 1); + memcpy(msg, &ch->caches[rd], sizeof(struct smsg)); + SIPC_WRITEL(SIPC_READL(ch->rdptr) + 1, ch->rdptr); + + if (ipc->ring_base) + pr_debug("read smsg: dst=%d, channel=%d, wrptr=%d, rdptr=%d, rd=%d\n", + dst, + msg->channel, + SIPC_READL(ch->wrptr), + SIPC_READL(ch->rdptr), + rd); + + pr_debug("recv smsg: dst=%d, channel=%d, type=%d, flag=0x%04x, value=0x%08x, rval = %d\n", + dst, msg->channel, msg->type, msg->flag, msg->value, rval); + +recv_failed: + mutex_unlock(&ch->rxlock); + atomic_dec(&ipc->busy[ch_index]); + return rval; +} +EXPORT_SYMBOL_GPL(smsg_recv); + +u8 sipc_channel2index(u8 channel) +{ + return channel2index[channel]; +} +EXPORT_SYMBOL_GPL(sipc_channel2index); + +#if defined(CONFIG_DEBUG_FS) +static int smsg_debug_show(struct seq_file *m, void *private) +{ + struct smsg_ipc *ipc = NULL; + struct smsg_channel *ch; + + int i, j, cnt; + + for (i = 0; i < SIPC_ID_NR; i++) { + ipc = smsg_ipcs[i]; + if (!ipc) + continue; + + sipc_debug_putline(m, '*', 120); + seq_printf(m, "sipc: %s:\n", ipc->name); + seq_printf(m, "dst: 0x%0x, irq: 0x%0x\n", + ipc->dst, ipc->irq); + if (ipc->ring_base) { + /* + * must wait resource before + * read or write share memory. + */ + if (sipc_smem_request_resource(ipc->sipc_pms, + ipc->dst, 1000) < 0) + continue; + + seq_printf(m, "txbufAddr: 0x%p, txbufsize: 0x%x, txbufrdptr: [0x%p]=%d, txbufwrptr: [0x%p]=%d\n", + (void *)ipc->txbuf_addr, + ipc->txbuf_size, + (void *)ipc->txbuf_rdptr, + BL_READL(ipc->txbuf_rdptr), + (void *)ipc->txbuf_wrptr, + BL_READL(ipc->txbuf_wrptr)); + seq_printf(m, "rxbufAddr: 0x%p, rxbufsize: 0x%x, rxbufrdptr: [0x%p]=%d, rxbufwrptr: [0x%p]=%d\n", + (void *)ipc->rxbuf_addr, + ipc->rxbuf_size, + (void *)ipc->rxbuf_rdptr, + BL_READL(ipc->rxbuf_rdptr), + (void *)ipc->rxbuf_wrptr, + BL_READL(ipc->rxbuf_wrptr)); + + /* release resource */ + sipc_smem_release_resource(ipc->sipc_pms, ipc->dst); + } + sipc_debug_putline(m, '-', 80); + seq_puts(m, "1. all channel state list:\n"); + + for (j = 0; j < SMSG_VALID_CH_NR; j++) + seq_printf(m, + "%2d. channel[%3d] states: %d, name: %s\n", + j, + sipc_cfg[j].channel, + ipc->states[j], + sipc_cfg[j].name); + + sipc_debug_putline(m, '-', 80); + seq_puts(m, "2. channel rdpt < wrpt list:\n"); + + cnt = 1; + for (j = 0; j < SMSG_VALID_CH_NR; j++) { + ch = ipc->channels[j]; + if (!ch) + continue; + + if (SIPC_READL(ch->rdptr) < SIPC_READL(ch->wrptr)) + seq_printf(m, "%2d. channel[%3d] rd: %d, wt: %d, name: %s\n", + cnt++, + sipc_cfg[j].channel, + SIPC_READL(ch->rdptr), + SIPC_READL(ch->wrptr), + sipc_cfg[j].name); + } + } + return 0; +} + +static int smsg_debug_open(struct inode *inode, struct file *file) +{ + return single_open(file, smsg_debug_show, inode->i_private); +} + +static const struct file_operations smsg_debug_fops = { + .open = smsg_debug_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +int smsg_init_debugfs(void *root) +{ + if (!root) + return -ENXIO; + debugfs_create_file("smsg", 0444, + (struct dentry *)root, + NULL, + &smsg_debug_fops); + return 0; +} +EXPORT_SYMBOL_GPL(smsg_init_debugfs); + +#endif /* CONFIG_DEBUG_FS */ + + +MODULE_AUTHOR("Chen Gaopeng"); +MODULE_DESCRIPTION("SIPC/SMSG driver"); +MODULE_LICENSE("GPL v2"); diff --git a/package/wwan/driver/quectel_SRPD_PCIE/src/sipc/spipe.c b/package/wwan/driver/quectel_SRPD_PCIE/src/sipc/spipe.c new file mode 100644 index 000000000..7fbd7bc83 --- /dev/null +++ b/package/wwan/driver/quectel_SRPD_PCIE/src/sipc/spipe.c @@ -0,0 +1,480 @@ +/* + * Copyright (C) 2018 Spreadtrum Communications Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "../include/sipc.h" +#include "sipc_priv.h" +#include "spipe.h" + +#define SPIPE_NR_BASE_NUM MKDEV(254, 0) +#define SDIAG_NR_BASE_NUM MKDEV(253, 0) +#define STTY_NR_BASE_NUM MKDEV(252, 0) +#define SNV_NR_BASE_NUM MKDEV(251, 0) + +struct spipe_device { + struct spipe_init_data *init; + int major; + int minor; + struct cdev cdev; +}; + +struct spipe_sbuf { + u8 dst; + u8 channel; + u32 bufid; +}; + +static struct class *spipe_class; + +static int spipe_open(struct inode *inode, struct file *filp) +{ + int minor = iminor(filp->f_path.dentry->d_inode); + struct spipe_device *spipe; + struct spipe_sbuf *sbuf; + + spipe = container_of(inode->i_cdev, struct spipe_device, cdev); + if (sbuf_status(spipe->init->dst, spipe->init->channel) != 0) { + printk("spipe %d-%d not ready to open!\n", + spipe->init->dst, spipe->init->channel); + filp->private_data = NULL; + return -ENODEV; + } + + sbuf = kmalloc(sizeof(struct spipe_sbuf), GFP_KERNEL); + if (!sbuf) + return -ENOMEM; + filp->private_data = sbuf; + + sbuf->dst = spipe->init->dst; + sbuf->channel = spipe->init->channel; + sbuf->bufid = minor - spipe->minor; + + return 0; +} + +static int spipe_release(struct inode *inode, struct file *filp) +{ + struct spipe_sbuf *sbuf = filp->private_data; + + kfree(sbuf); + + return 0; +} + +static ssize_t spipe_read(struct file *filp, + char __user *buf, size_t count, loff_t *ppos) +{ + struct spipe_sbuf *sbuf = filp->private_data; + int timeout = -1; + + if (filp->f_flags & O_NONBLOCK) + timeout = 0; + + return sbuf_read(sbuf->dst, sbuf->channel, sbuf->bufid, + (void *)buf, count, timeout); +} + +static ssize_t spipe_write(struct file *filp, + const char __user *buf, size_t count, loff_t *ppos) +{ + struct spipe_sbuf *sbuf = filp->private_data; + int timeout = -1; + + if (filp->f_flags & O_NONBLOCK) + timeout = 0; + + return sbuf_write(sbuf->dst, sbuf->channel, sbuf->bufid, + (void *)buf, count, timeout); +} + +static unsigned int spipe_poll(struct file *filp, poll_table *wait) +{ + struct spipe_sbuf *sbuf = filp->private_data; + + return sbuf_poll_wait(sbuf->dst, sbuf->channel, sbuf->bufid, + filp, wait); +} + +static long spipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + return 0; +} + +static const struct file_operations spipe_fops = { + .open = spipe_open, + .release = spipe_release, + .read = spipe_read, + .write = spipe_write, + .poll = spipe_poll, + .unlocked_ioctl = spipe_ioctl, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +#ifdef SPRD_PCIE_USE_DTS +static int spipe_parse_dt(struct spipe_init_data **init, + struct device_node *np, struct device *dev, dev_t *devid) +{ + struct spipe_init_data *pdata = NULL; + int ret; + u32 data; + + pdata = devm_kzalloc(dev, sizeof(struct spipe_init_data), GFP_KERNEL); + if (!pdata) + return -ENOMEM; + + ret = of_property_read_string(np, + "sprd,name", + (const char **)&pdata->name); + if (ret) + goto error; + + if (!strcmp(pdata->name, "spipe_nr")) + *devid = SPIPE_NR_BASE_NUM; + else if (!strcmp(pdata->name, "sdiag_nr")) + *devid = SDIAG_NR_BASE_NUM; + else if (!strcmp(pdata->name, "stty_nr")) + *devid = STTY_NR_BASE_NUM; + else if (!strcmp(pdata->name, "snv_nr")) + *devid = SNV_NR_BASE_NUM; + + ret = of_property_read_u32(np, "sprd,dst", (u32 *)&data); + if (ret) + goto error; + pdata->dst = (u8)data; + + ret = of_property_read_u32(np, "sprd,channel", (u32 *)&data); + if (ret) + goto error; + pdata->channel = (u8)data; + + ret = of_property_read_u32(np, + "sprd,ringnr", + (u32 *)&pdata->ringnr); + if (ret) + goto error; + + ret = of_property_read_u32(np, + "sprd,size-rxbuf", + (u32 *)&pdata->rxbuf_size); + if (ret) + goto error; + + ret = of_property_read_u32(np, + "sprd,size-txbuf", + (u32 *)&pdata->txbuf_size); + if (ret) + goto error; + + *init = pdata; + return ret; +error: + devm_kfree(dev, pdata); + *init = NULL; + return ret; +} +#else +static int spipe_parse_dt(struct spipe_init_data **init, + struct device_node *np, struct device *dev, dev_t *devid) +{ + struct spipe_init_data *pdata = *init; + + if (!strcmp(pdata->name, "spipe_nr")) + *devid = SPIPE_NR_BASE_NUM; + else if (!strcmp(pdata->name, "sdiag_nr")) + *devid = SDIAG_NR_BASE_NUM; + else if (!strcmp(pdata->name, "stty_nr")) + *devid = STTY_NR_BASE_NUM; + else if (!strcmp(pdata->name, "snv_nr")) + *devid = SNV_NR_BASE_NUM; + + return 0; +} +#endif + +static inline void spipe_destroy_pdata(struct spipe_init_data **init, + struct device *dev) +{ + *init = NULL; +} + +static int spipe_probe(struct platform_device *pdev) +{ + struct spipe_init_data *init = pdev->dev.platform_data; + struct spipe_device *spipe; + dev_t devid; + int i, rval; + struct device_node *np; + + printk("%s!\n", __func__); + + if (1) { + np = pdev->dev.of_node; + rval = spipe_parse_dt(&init, np, &pdev->dev, &devid); + if (rval) { + pr_err("Failed to parse spipe device tree, ret=%d\n", rval); + return rval; + } + + printk("spipe: after parse device tree, name=%s, dst=%u, channel=%u, ringnr=%u, rxbuf_size=0x%x, txbuf_size=0x%x\n", + init->name, + init->dst, + init->channel, + init->ringnr, + init->rxbuf_size, + init->txbuf_size); + + rval = sbuf_create(init->dst, init->channel, init->ringnr, + init->txbuf_size, init->rxbuf_size); + if (rval != 0) { + printk("Failed to create sbuf: %d\n", rval); + spipe_destroy_pdata(&init, &pdev->dev); + return rval; + } + + spipe = devm_kzalloc(&pdev->dev, + sizeof(struct spipe_device), + GFP_KERNEL); + if (spipe == NULL) { + sbuf_destroy(init->dst, init->channel); + spipe_destroy_pdata(&init, &pdev->dev); + printk("Failed to allocate spipe_device\n"); + return -ENOMEM; + } + + rval = alloc_chrdev_region(&devid, 0, init->ringnr, init->name); + //rval = register_chrdev_region(devid, init->ringnr, init->name); + if (rval != 0) { + sbuf_destroy(init->dst, init->channel); + devm_kfree(&pdev->dev, spipe); + spipe_destroy_pdata(&init, &pdev->dev); + printk("Failed to alloc spipe chrdev\n"); + return rval; + } + + cdev_init(&(spipe->cdev), &spipe_fops); + rval = cdev_add(&(spipe->cdev), devid, init->ringnr); + if (rval != 0) { + sbuf_destroy(init->dst, init->channel); + devm_kfree(&pdev->dev, spipe); + unregister_chrdev_region(devid, init->ringnr); + spipe_destroy_pdata(&init, &pdev->dev); + printk("Failed to add spipe cdev\n"); + return rval; + } + + spipe->major = MAJOR(devid); + spipe->minor = MINOR(devid); + if (init->ringnr > 1) { + for (i = 0; i < init->ringnr; i++) { + device_create(spipe_class, NULL, + MKDEV(spipe->major, spipe->minor + i), + NULL, "%s%d", init->name, i); + } + } else { + device_create(spipe_class, NULL, + MKDEV(spipe->major, spipe->minor), + NULL, "%s", init->name); + } + + spipe->init = init; + + platform_set_drvdata(pdev, spipe); + } + + return 0; +} + +static int spipe_remove(struct platform_device *pdev) +{ + struct spipe_device *spipe = platform_get_drvdata(pdev); + int i; + + if (spipe) { + for (i = 0; i < spipe->init->ringnr; i++) { + device_destroy(spipe_class, MKDEV(spipe->major, spipe->minor + i)); + } + cdev_del(&(spipe->cdev)); + unregister_chrdev_region(MKDEV(spipe->major, spipe->minor), spipe->init->ringnr); + + sbuf_destroy(spipe->init->dst, spipe->init->channel); + + spipe_destroy_pdata(&spipe->init, &pdev->dev); + + devm_kfree(&pdev->dev, spipe); + + platform_set_drvdata(pdev, NULL); + } + + return 0; +} + +#ifdef SPRD_PCIE_USE_DTS +static const struct of_device_id spipe_match_table[] = { + {.compatible = "sprd,spipe", }, + { }, +}; +#endif + +static struct platform_driver spipe_driver = { + .driver = { + .owner = THIS_MODULE, + .name = "spipe", +#ifdef SPRD_PCIE_USE_DTS + .of_match_table = spipe_match_table, +#endif + }, + .probe = spipe_probe, + .remove = spipe_remove, +}; + +static struct platform_device *spipe_pdev[MAX_SPIPE_CHN_NUM]; +static struct spipe_init_data spipe_data[MAX_SPIPE_CHN_NUM] = { + { + .sipc_name = SPIPE_DRVIER_NMAE, + .name = "spipe_nr", + .dst = 1, + .channel = 4, + .ringnr = 15, + .txbuf_size = 0x1000, + .rxbuf_size = 0x1000 + }, + { + .sipc_name = SPIPE_DRVIER_NMAE, + .name = "sdiag_nr", + .dst = 1, + .channel = 21, + .ringnr = 1, + .txbuf_size = 0x40000, + .rxbuf_size = 0x8000 + }, + { + .sipc_name = SPIPE_DRVIER_NMAE, + .name = "stty_nr", + .dst = 1, + .channel = 6, + .ringnr = 32, + .txbuf_size = 0x0800, + .rxbuf_size = 0x0800 + }, + { + .sipc_name = SPIPE_DRVIER_NMAE, + .name = "snv_nr", + .dst = 1, + .channel = 40, + .ringnr = 1, + .txbuf_size = 0x40400, + .rxbuf_size = 0x1000 + } + +}; + +static int spipe_platform_device_reigster(void) +{ + int retval = -ENOMEM; + int i; + + for(i = 0; i < MAX_SPIPE_CHN_NUM; i++) { + spipe_pdev[i] = platform_device_alloc(SPIPE_DRVIER_NMAE, i); + if (!spipe_pdev[i]) { + i--; + while (i >= 0) + platform_device_put(spipe_pdev[i--]); + return retval; + } + } + + for (i = 0; i < MAX_SPIPE_CHN_NUM; i++) { + retval = platform_device_add_data(spipe_pdev[i], &spipe_data[i], + sizeof(struct spipe_init_data)); + if (retval) + goto err_add_pdata; + } + + for (i = 0; i < MAX_SPIPE_CHN_NUM; i++) { + retval = platform_device_add(spipe_pdev[i]); + if (retval < 0) { + i--; + while (i >= 0) + platform_device_del(spipe_pdev[i]); + goto err_add_pdata; + } + } + + return retval; + +err_add_pdata: + for (i = 0; i < MAX_SPIPE_CHN_NUM; i++) + platform_device_put(spipe_pdev[i]); + return retval; +} + +static void spipe_platform_device_unreigster(void) +{ + int i; + for (i = 0; i < MAX_SPIPE_CHN_NUM; i++) { + platform_device_unregister(spipe_pdev[i]); + } +} + + +int spipe_init(void) +{ + int ret; + + spipe_class = class_create(THIS_MODULE, "spipe"); + if (IS_ERR(spipe_class)) + return PTR_ERR(spipe_class); +#ifndef SPRD_PCIE_USE_DTS + if((ret = spipe_platform_device_reigster())) + return ret; +#endif + + if((ret = platform_driver_register(&spipe_driver))) { +#ifndef SPRD_PCIE_USE_DTS + spipe_platform_device_unreigster(); +#endif + return ret; + } + return ret; +} +EXPORT_SYMBOL_GPL(spipe_init); + +void spipe_exit(void) +{ + platform_driver_unregister(&spipe_driver); +#ifndef SPRD_PCIE_USE_DTS + spipe_platform_device_unreigster(); +#endif + class_destroy(spipe_class); +} + +void spipe_device_down(void) +{ + int retval = -ENOMEM; + int i; + + for(i = 0; i < MAX_SPIPE_CHN_NUM; i++) { + sbuf_down(spipe_data[i].dst, spipe_data[i].channel); + } +} + +EXPORT_SYMBOL_GPL(spipe_exit); +EXPORT_SYMBOL_GPL(spipe_device_down); diff --git a/package/wwan/driver/quectel_SRPD_PCIE/src/sipc/spipe.h b/package/wwan/driver/quectel_SRPD_PCIE/src/sipc/spipe.h new file mode 100644 index 000000000..eacca6c67 --- /dev/null +++ b/package/wwan/driver/quectel_SRPD_PCIE/src/sipc/spipe.h @@ -0,0 +1,29 @@ +/* + * Copyright (C) 2019 Spreadtrum Communications Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __SPIPE_H +#define __SPIPE_H + +#define MAX_SPIPE_CHN_NUM 4 +#define SPIPE_DRVIER_NMAE "spipe" + +struct spipe_init_data { + char *name; + char *sipc_name; + u8 dst; + u8 channel; + u32 ringnr; + u32 txbuf_size; + u32 rxbuf_size; +}; +#endif diff --git a/package/wwan/driver/quectel_SRPD_PCIE/src/sipc/spool.c b/package/wwan/driver/quectel_SRPD_PCIE/src/sipc/spool.c new file mode 100644 index 000000000..724dc9648 --- /dev/null +++ b/package/wwan/driver/quectel_SRPD_PCIE/src/sipc/spool.c @@ -0,0 +1,538 @@ +/* + * Copyright (C) 2018 Spreadtrum Communications Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "../include/sipc.h" +#include "spool.h" + +#define SLOG_NR_BASE_NUM MKDEV(156, 0) + +struct spool_device; + +struct spool_device { + struct spool_init_data *init; + int major; + int minor; + struct cdev cdev; + struct platform_device *plt_dev; + + struct device *sys_dev; /* Device object in sysfs */ +}; + +struct spool_sblock { + u8 dst; + u8 channel; + bool is_hold; + struct sblock hold; +}; + +static struct class *spool_class; + +static int spool_open(struct inode *inode, struct file *filp) +{ + struct spool_device *spool; + struct spool_sblock *sblock; + int ret; + + spool = container_of(inode->i_cdev, struct spool_device, cdev); + ret = sblock_query(spool->init->dst, spool->init->channel); + if (ret) + return ret; + sblock = kmalloc(sizeof(struct spool_sblock), GFP_KERNEL); + if (!sblock) + return -ENOMEM; + filp->private_data = sblock; + + sblock->dst = spool->init->dst; + sblock->channel = spool->init->channel; + sblock->is_hold = 0; + + return 0; +} + +static int spool_release(struct inode *inode, struct file *filp) +{ + struct spool_sblock *sblock = filp->private_data; + + if (sblock->is_hold) { + if (sblock_release(sblock->dst, sblock->channel, &sblock->hold)) + pr_debug("failed to release block!\n"); + } + kfree(sblock); + + return 0; +} + +static ssize_t spool_read(struct file *filp, + char __user *buf, size_t count, loff_t *ppos) +{ + struct spool_sblock *sblock = filp->private_data; + int timeout = -1; + int ret = 0; + int rdsize = 0; + struct sblock blk = {0}; + + if (filp->f_flags & O_NONBLOCK) + timeout = 0; + + if (sblock->is_hold) { + if (count < sblock->hold.length - *ppos) { + rdsize = count; + } else { + rdsize = sblock->hold.length - *ppos; + sblock->is_hold = 0; + } + blk = sblock->hold; + } else{ + *ppos = 0; + ret = sblock_receive(sblock->dst, + sblock->channel, &blk, timeout); + if (ret < 0) { + pr_debug("%s: failed to receive block!\n", __func__); + return ret; + } + if (blk.length <= count) + rdsize = blk.length; + else { + rdsize = count; + sblock->is_hold = 1; + sblock->hold = blk; + } + } + + if (unalign_copy_to_user(buf, blk.addr + *ppos, rdsize)) { + pr_err("%s: failed to copy to user!\n", __func__); + sblock->is_hold = 0; + *ppos = 0; + ret = -EFAULT; + } else { + ret = rdsize; + *ppos += rdsize; + } + + if (sblock->is_hold == 0) { + if (sblock_release(sblock->dst, sblock->channel, &blk)) + pr_err("%s: failed to release block!\n", __func__); + } + + return ret; +} + +static ssize_t spool_write(struct file *filp, + const char __user *buf, size_t count, loff_t *ppos) +{ + struct spool_sblock *sblock = filp->private_data; + int timeout = -1; + int ret = 0; + int wrsize = 0; + int pos = 0; + struct sblock blk = {0}; + size_t len = count; + + if (filp->f_flags & O_NONBLOCK) + timeout = 0; + + do { + ret = sblock_get(sblock->dst, sblock->channel, &blk, timeout); + if (ret < 0) { + pr_info("%s: failed to get block!\n", __func__); + return ret; + } + + wrsize = (blk.length > len ? len : blk.length); + if (unalign_copy_from_user(blk.addr, buf + pos, wrsize)) { + pr_info("%s: failed to copy from user!\n", __func__); + ret = -EFAULT; + } else { + blk.length = wrsize; + len -= wrsize; + pos += wrsize; + } + + if (sblock_send(sblock->dst, sblock->channel, &blk)) + pr_debug("%s: failed to send block!", __func__); + } while (len > 0 && ret == 0); + + return count - len; +} + +static unsigned int spool_poll(struct file *filp, poll_table *wait) +{ + struct spool_sblock *sblock = filp->private_data; + + return sblock_poll_wait(sblock->dst, sblock->channel, filp, wait); +} + +static long spool_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + return 0; +} + +static const struct file_operations spool_fops = { + .open = spool_open, + .release = spool_release, + .read = spool_read, + .write = spool_write, + .poll = spool_poll, + .unlocked_ioctl = spool_ioctl, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +#ifdef SPRD_PCIE_USE_DTS +static int spool_parse_dt(struct spool_init_data **init, struct device *dev, + struct device_node *np, dev_t *dev_no) +{ + struct spool_init_data *pdata = NULL; + int ret; + u32 data; + + pdata = devm_kzalloc(dev, sizeof(struct spool_init_data), GFP_KERNEL); + if (!pdata) + return -ENOMEM; + + ret = of_property_read_string(np, "sprd,name", + (const char **)&pdata->name); + if (ret) + goto error; + + if (!strcmp(pdata->name, "slog_nr")) + *dev_no = SLOG_NR_BASE_NUM; + + ret = of_property_read_u32(np, "sprd,dst", (u32 *)&data); + if (ret) + goto error; + pdata->dst = (u8)data; + + ret = of_property_read_u32(np, "sprd,channel", (u32 *)&data); + if (ret) + goto error; + pdata->channel = (u8)data; + + ret = of_property_read_u32(np, "sprd,preconfigured", (u32 *)&data); + if (!ret) + pdata->pre_cfg = (int)data; + + ret = of_property_read_u32(np, "sprd,tx-blksize", + (u32 *)&pdata->txblocksize); + if (ret) + goto error; + ret = of_property_read_u32(np, "sprd,tx-blknum", + (u32 *)&pdata->txblocknum); + if (ret) + goto error; + ret = of_property_read_u32(np, "sprd,rx-blksize", + (u32 *)&pdata->rxblocksize); + if (ret) + goto error; + ret = of_property_read_u32(np, "sprd,rx-blknum", + (u32 *)&pdata->rxblocknum); + if (ret) + goto error; + + if (!of_property_read_u32(np, "sprd,nodev", (u32 *)&data)) + pdata->nodev = (u8)data; + + *init = pdata; + return ret; +error: + devm_kfree(dev, pdata); + *init = NULL; + return ret; +} +#else +static int spool_parse_dt(struct spool_init_data **init, struct device *dev, + struct device_node *np, dev_t *dev_no) +{ + struct spool_init_data *pdata = NULL; + + pdata = devm_kzalloc(dev, sizeof(struct spool_init_data), GFP_KERNEL); + if (!pdata) + return -ENOMEM; + pdata->name = "slog_nr"; + if (!strcmp(pdata->name, "slog_nr")) + *dev_no = SLOG_NR_BASE_NUM; + pdata->dst = 1; + pdata->channel = 5; + pdata->txblocksize = 0; + pdata->txblocknum = 0; + pdata->rxblocksize = 0x10000; + pdata->rxblocknum = 32; + *init = pdata; + + return 0; +} +#endif + + +static ssize_t base_addr_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct spool_device *spool = (struct spool_device *) + dev_get_drvdata(dev); + struct spool_init_data *init = spool->init; + uint32_t addr; + int ret; + + ret = sblock_get_smem_cp_addr(init->dst, init->channel, + &addr); + if (ret < 0) + return ret; + + return snprintf(buf, PAGE_SIZE, "%u %u 0x%08X %d %u %u %u %u\n", + (unsigned int)init->dst, + (unsigned int)init->channel, + addr, + init->pre_cfg, + (unsigned int)init->txblocknum, + (unsigned int)init->txblocksize, + (unsigned int)init->rxblocknum, + (unsigned int)init->rxblocksize); +} + +static DEVICE_ATTR(base_addr, 0440, + base_addr_show, NULL); + +static int create_spool(struct platform_device *pdev, + struct spool_init_data *init, + struct spool_device **out, dev_t dev_no) +{ + int rval; + struct spool_device *spool; + //dev_t dev_no; + char sp_name[16]; + + snprintf(sp_name, sizeof(sp_name), "spool-%u-%u", + (unsigned int)init->dst, + (unsigned int)init->channel); + //rval = alloc_chrdev_region(&dev_no, 0, 1, sp_name); + rval = register_chrdev_region(dev_no, 1, sp_name); + if (rval) + return rval; + + if (init->pre_cfg) + rval = sblock_pcfg_create(init->dst, + init->channel, + init->txblocknum, + init->txblocksize, + init->rxblocknum, + init->rxblocksize); + else + rval = sblock_create(init->dst, + init->channel, + init->txblocknum, + init->txblocksize, + init->rxblocknum, + init->rxblocksize); + if (rval) { + pr_info("Failed to create sblock: %d\n", rval); + goto free_devno; + } + + spool = devm_kzalloc(&pdev->dev, + sizeof(struct spool_device), + GFP_KERNEL); + if (!spool) { + pr_info("Failed to allocate spool_device\n"); + rval = -ENOMEM; + goto free_sblock; + } + + spool->init = init; + spool->major = MAJOR(dev_no); + spool->minor = MINOR(dev_no); + spool->plt_dev = pdev; + + if (!init->nodev) { + cdev_init(&spool->cdev, &spool_fops); + + rval = cdev_add(&spool->cdev, dev_no, 1); + if (rval) { + pr_info("Failed to add spool cdev\n"); + goto free_spool; + } + } + + spool->sys_dev = device_create(spool_class, NULL, + dev_no, + spool, "%s", init->name); + device_create_file(&pdev->dev, &dev_attr_base_addr); + + platform_set_drvdata(pdev, spool); + + *out = spool; + + return 0; + +free_spool: + devm_kfree(&pdev->dev, spool); + +free_sblock: + sblock_destroy(init->dst, init->channel); + +free_devno: + unregister_chrdev_region(dev_no, 1); + return rval; +} + +static int destroy_spool(struct spool_device *spool) +{ + dev_t dev_no = MKDEV(spool->major, spool->minor); + struct spool_init_data *init = spool->init; + + if (spool->sys_dev) { + device_destroy(spool_class, dev_no); + spool->sys_dev = NULL; + } + if (!init->nodev) + cdev_del(&spool->cdev); + sblock_destroy(init->dst, init->channel); + unregister_chrdev_region(dev_no, 1); + devm_kfree(&spool->plt_dev->dev, init); + devm_kfree(&spool->plt_dev->dev, spool); + + return 0; +} + +static int spool_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + int rval; + struct spool_init_data *init; + struct spool_device *spool; + dev_t dev_no; + +#ifdef SPRD_PCIE_USE_DTS + if (!np) + return -ENODEV; +#endif + + rval = spool_parse_dt(&init, &pdev->dev, np, &dev_no); + if (rval) { + pr_err("Failed to parse spool device tree, ret=%d\n", + rval); + return rval; + } + + pr_info("spool: name=%s, dst=%u, channel=%u, pre_cfg=%u\n", + init->name, + init->dst, + init->channel, + init->pre_cfg); + + pr_info("spool: tx_num=%u, tx_size=%u, rx_num=%u, rx_size=%u\n", + init->txblocknum, + init->txblocksize, + init->rxblocknum, + init->rxblocksize); + + rval = create_spool(pdev, init, &spool, dev_no); + if (rval) { + pr_err("Failed to create spool device %u:%u, ret=%d\n", + (unsigned int)init->dst, + (unsigned int)init->channel, rval); + devm_kfree(&pdev->dev, init); + } + + return 0; +} + +static int spool_remove(struct platform_device *pdev) +{ + struct spool_device *priv = (struct spool_device *) + platform_get_drvdata(pdev); + + destroy_spool(priv); + + platform_set_drvdata(pdev, NULL); + + return 0; +} + +#ifdef SPRD_PCIE_USE_DTS +static const struct of_device_id spool_match_table[] = { + { .compatible = "sprd,spool", }, + { }, +}; +#endif + +static struct platform_driver spool_driver = { + .driver = { + .owner = THIS_MODULE, + .name = "spool", +#ifdef SPRD_PCIE_USE_DTS + .of_match_table = spool_match_table, +#endif + }, + .probe = spool_probe, + .remove = spool_remove, +}; + +#ifndef SPRD_PCIE_USE_DTS +static void spool_platform_device_release(struct device *dev) {} +static struct platform_device spool_device = { + .name = "spool", + .id = -1, + .dev = { + .release = spool_platform_device_release, + } +}; +#endif + +int spool_init(void) +{ + int ret; + + spool_class = class_create(THIS_MODULE, "spool"); + if (IS_ERR(spool_class)) + return PTR_ERR(spool_class); +#ifndef SPRD_PCIE_USE_DTS + if((ret = platform_device_register(&spool_device))) + return ret; +#endif + if((ret = platform_driver_register(&spool_driver))) { +#ifndef SPRD_PCIE_USE_DTS + platform_device_unregister(&spool_device); +#endif + return ret; + } + + return ret; +} +EXPORT_SYMBOL_GPL(spool_init); + +void spool_exit(void) +{ + platform_driver_unregister(&spool_driver); +#ifndef SPRD_PCIE_USE_DTS + platform_device_unregister(&spool_device); +#endif + class_destroy(spool_class); +} + + +void spool_device_down(void) +{ + sblock_down(1, 5); +} + +EXPORT_SYMBOL_GPL(spool_exit); +EXPORT_SYMBOL_GPL(spool_device_down); diff --git a/package/wwan/driver/quectel_SRPD_PCIE/src/sipc/spool.h b/package/wwan/driver/quectel_SRPD_PCIE/src/sipc/spool.h new file mode 100644 index 000000000..c0c5d2eeb --- /dev/null +++ b/package/wwan/driver/quectel_SRPD_PCIE/src/sipc/spool.h @@ -0,0 +1,29 @@ +/* + * Copyright (C) 2019 Spreadtrum Communications Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __SPOOL_H +#define __SPOOL_H + +struct spool_init_data { + char *name; + u8 dst; + u8 channel; + u8 nodev; + /* Preconfigured channel */ + int pre_cfg; + u32 txblocknum; + u32 txblocksize; + u32 rxblocknum; + u32 rxblocksize; +}; +#endif diff --git a/package/wwan/driver/quectel_cm_5G/Makefile b/package/wwan/driver/quectel_cm_5G/Makefile new file mode 100644 index 000000000..623cda979 --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/Makefile @@ -0,0 +1,45 @@ +include $(TOPDIR)/rules.mk +include $(INCLUDE_DIR)/kernel.mk + +PKG_NAME:= quectel-CM-5G +PKG_RELEASE:=1 +PKG_VERSION:=1.0 + +include $(INCLUDE_DIR)/package.mk + +define Package/quectel-CM-5G + SECTION:=utils + CATEGORY:=Utilities + TITLE:=quectel-CM-5G app building test +endef + +define Package/quectel-CM-5G/description + quectel-CM-5G app building test +endef + +define Build/Prepare + mkdir -p $(PKG_BUILD_DIR) + $(CP) ${TOPDIR}/package/quectel_cm_5G/src/* $(PKG_BUILD_DIR)/ +endef + +define Build/Compile + $(MAKE) -C "$(PKG_BUILD_DIR)" \ + CROSS_COMPILE="$(TARGET_CROSS)" \ + ARCH="$(LINUX_KARCH)" \ + SUBDIRS="$(PKG_BUILD_DIR)" \ + EXTRA_CFLAGS="$(EXTRA_CFLAGS)" \ + CC="$(TARGET_CC)" \ + +endef + +define Package/quectel-CM-5G/install + $(INSTALL_DIR) $(1)/usr/bin $(1)/lib/netifd/proto $(1)/lib/netifd + $(INSTALL_BIN) $(PKG_BUILD_DIR)/quectel-CM $(1)/usr/bin/quectel-CM + $(INSTALL_BIN) ./files/rmnet_init.sh $(1)/usr/bin + $(INSTALL_BIN) ./files/rmnet.sh $(1)/lib/netifd/proto + $(INSTALL_BIN) ./files/rmnet.script $(1)/lib/netifd + $(INSTALL_BIN) ./files/rmnet6.sh $(1)/lib/netifd/proto + $(INSTALL_BIN) ./files/rmnet6.script $(1)/lib/netifd +endef + +$(eval $(call BuildPackage,quectel-CM-5G)) diff --git a/package/wwan/driver/quectel_cm_5G/files/dhcp b/package/wwan/driver/quectel_cm_5G/files/dhcp new file mode 100644 index 000000000..76739279b --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/files/dhcp @@ -0,0 +1,48 @@ + +config dnsmasq + option domainneeded '1' + option boguspriv '1' + option filterwin2k '0' + option localise_queries '1' + option rebind_protection '1' + option rebind_localhost '1' + option local '/lan/' + option domain 'lan' + option expandhosts '1' + option nonegcache '0' + option authoritative '1' + option readethers '1' + option leasefile '/tmp/dhcp.leases' + option resolvfile '/tmp/resolv.conf.auto' + option nonwildcard '1' + option localservice '1' + +config dhcp 'lan' + option interface 'lan' + option start '100' + option limit '150' + option leasetime '12h' + option ra 'relay' + option dhcpv6 'disabled' + option ndp 'relay' + +config dhcp 'wan' + option interface 'wan' + option ignore '1' + option ra 'relay' + option dhcpv6 'disabled' + option ndp 'relay' + option ndproxy_routing '0' + option master '1' + +config dhcp 'wan6' + option ra 'relay' + option dhcpv6 'disabled' + option ndp 'relay' + option ndproxy_routing '0' + option master '1' + option interface 'wan6' + +config odhcpd 'odhcpd' + option loglevel '7' + diff --git a/package/wwan/driver/quectel_cm_5G/files/rmnet.script b/package/wwan/driver/quectel_cm_5G/files/rmnet.script new file mode 100644 index 000000000..93d2cf05b --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/files/rmnet.script @@ -0,0 +1,65 @@ +#!/bin/sh +# Copyright (c) 2019 Qualcomm Technologies, Inc. +# All Rights Reserved. +# Confidential and Proprietary - Qualcomm Technologies, Inc. + + +[ -z "$1" ] && echo "Error: should be run by rmnet" && exit 1 +[ -z "$2" ] && echo "Error: should be run by rmnet" && exit 1 + +. /lib/functions.sh +. /lib/functions/network.sh +. /lib/netifd/netifd-proto.sh +setup_interface() { + INTERFACE=$1 + CONFIG=/tmp/rmnet_$2_ipv4config + logger "rmnet setup_interface $1 $2 here" + #Fetch information from lower. + [ -f ${CONFIG} ] || { + proto_notify_error "$INTERFACE" "RMNET data call Not ready" + proto_block_restart "$INTERFACE" + return + } + . ${CONFIG} + ip=$PUBLIC_IP + DNS=$DNSSERVERS + router=$GATEWAY + subnet=$NETMASK + interface=$IFNAME + #Send the information to the netifd + proto_init_update "$interface" 1 1 + #ip and subnet + proto_add_ipv4_address "$ip" "${subnet:-255.255.255.0}" + + #Any router? if not, remove below scripts + #router format should be separated by space + for i in $router; do + proto_add_ipv4_route "$i" 32 "" "$ip" + proto_add_ipv4_route 0.0.0.0 0 "$i" "$ip" + done + + #dns information tell the netifd. + for dns in $DNS; do + proto_add_dns_server "$dns" + done + + #Domain information tell the netifd + for domain in $domain; do + proto_add_dns_search "$domain" + done + + #proto_add_data + [ -n "$ZONE" ] && json_add_string zone "$ZONE" + proto_close_data + + proto_send_update "$INTERFACE" + +} + +case "$1" in + renew) + setup_interface $2 $3 + ;; +esac + +exit 0 diff --git a/package/wwan/driver/quectel_cm_5G/files/rmnet.sh b/package/wwan/driver/quectel_cm_5G/files/rmnet.sh new file mode 100644 index 000000000..f783561e8 --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/files/rmnet.sh @@ -0,0 +1,31 @@ +#!/bin/sh +# Copyright (c) 2019 Qualcomm Technologies, Inc. +# All Rights Reserved. +# Confidential and Proprietary - Qualcomm Technologies, Inc. + +. /lib/functions.sh +. /lib/functions/network.sh +. ../netifd-proto.sh +init_proto "$@" + +proto_rmnet_setup() { + local cfg="$1" + local iface="$2" + + logger "rmnet started" + #Call rmnet management script below!! + logger "rmnet updated ${cfg} ${iface}" + /lib/netifd/rmnet.script renew $cfg $iface +} + +proto_rmnet_teardown() { + local cfg="$1" + #Tear down rmnet manager script here.*/ +} + +proto_rmnet_init_config() { + #ddno_device=1 + available=1 +} + +add_protocol rmnet diff --git a/package/wwan/driver/quectel_cm_5G/files/rmnet6.script b/package/wwan/driver/quectel_cm_5G/files/rmnet6.script new file mode 100644 index 000000000..d252c2c37 --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/files/rmnet6.script @@ -0,0 +1,60 @@ +#!/bin/sh +# Copyright (c) 2019 Qualcomm Technologies, Inc. +# All Rights Reserved. +# Confidential and Proprietary - Qualcomm Technologies, Inc. + + +[ -z "$1" ] && echo "Error: should be run by rmnet" && exit 1 +[ -z "$2" ] && echo "Error: should be run by rmnet" && exit 1 + +. /lib/functions.sh +. /lib/functions/network.sh +. /lib/netifd/netifd-proto.sh +setup_interface() { + INTERFACE=$1 + CONFIG=/tmp/rmnet_$2_ipv6config + logger "rmnet setup_interface $1 $2 here" + #Fetch information from lower. + [ -f ${CONFIG} ] || { + proto_notify_error "$INTERFACE" "RMNET data call NOT ready" + proto_block_restart "$INTERFACE" + return + } + . ${CONFIG} + ADDRESSES=$PUBLIC_IP + interface=$IFNAME + #Send the information to the netifd + proto_init_update "$interface" 1 1 + + #ip and subnet + proto_add_ipv6_address "${PUBLIC_IP}" "128" + proto_add_ipv6_prefix "${PUBLIC_IP}/${PrefixLength}" + + #router format should be separated by space + proto_add_ipv6_route "$GATEWAY" 128 + proto_add_ipv6_route "::0" 0 "$GATEWAY" "" "" "${PUBLIC_IP}/${PrefixLength}" + + #dns information tell the netifd. + for dns in $DNSSERVERS; do + proto_add_dns_server "$dns" + done + + #Domain information tell the netifd + for domain in $domain; do + proto_add_dns_search "$domain" + done + + #proto_add_data + [ -n "$ZONE" ] && json_add_string zone "$ZONE" + proto_close_data + + proto_send_update "$INTERFACE" +} + +case "$1" in + renew|bound) + setup_interface $2 $3 + ;; +esac + +exit 0 diff --git a/package/wwan/driver/quectel_cm_5G/files/rmnet6.sh b/package/wwan/driver/quectel_cm_5G/files/rmnet6.sh new file mode 100644 index 000000000..94a9598b4 --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/files/rmnet6.sh @@ -0,0 +1,31 @@ +#!/bin/sh +# Copyright (c) 2019 Qualcomm Technologies, Inc. +# All Rights Reserved. +# Confidential and Proprietary - Qualcomm Technologies, Inc. + +. /lib/functions.sh +. /lib/functions/network.sh +. ../netifd-proto.sh +init_proto "$@" + +proto_rmnet6_setup() { + local cfg="$1" + local iface="$2" + + logger "rmnet6 started" + #Call rmnet management script below!! + /lib/netifd/rmnet6.script renew $cfg $iface + logger "rmnet6 updated" +} + +proto_rmnet6_teardown() { + local cfg="$1" + #Tear down rmnet manager script here.*/ +} + +proto_rmnet6_init_config() { + #ddno_device=1 + available=1 +} + +add_protocol rmnet6 diff --git a/package/wwan/driver/quectel_cm_5G/files/rmnet_init.sh b/package/wwan/driver/quectel_cm_5G/files/rmnet_init.sh new file mode 100644 index 000000000..3d5543862 --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/files/rmnet_init.sh @@ -0,0 +1,31 @@ +#!/bin/sh + +uci set network.wan='interface' +uci set network.wan.ifname='wwan0' +uci set network.wan.proto='rmnet' + +uci set network.wan6='interface' +uci set network.wan6.ifname='wwan0' +uci set network.wan6.proto='rmnet6' + +uci set dhcp.lan.ra='relay' +uci set dhcp.lan.dhcpv6='disabled' +uci set dhcp.lan.ndp='relay' + +uci set dhcp.wan.ra='relay' +uci set dhcp.wan.dhcpv6='disabled' +uci set dhcp.wan.ndp='relay' +uci set dhcp.wan.ndproxy_routing='0' + +uci set dhcp.wan6=dhcp +uci set dhcp.wan6.interface='wan6' +uci set dhcp.wan6.ra='relay' +uci set dhcp.wan6.dhcpv6='disabled' +uci set dhcp.wan6.ndp='relay' +uci set dhcp.wan6.ndproxy_routing='0' +uci set dhcp.wan6.master='1' + +uci set dhcp.odhcpd=odhcpd +uci set dhcp.odhcpd.loglevel='7' + +uci commit diff --git a/package/wwan/driver/quectel_cm_5G/src/CMakeLists.txt b/package/wwan/driver/quectel_cm_5G/src/CMakeLists.txt new file mode 100644 index 000000000..8ce3f5a70 --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/CMakeLists.txt @@ -0,0 +1,36 @@ +cmake_minimum_required(VERSION 2.4) + +project(quectel-CM) +add_definitions(-Wall -Wextra -Werror -O1) +option(USE_QRTR "Enable QRTR" OFF) + +set( QL_CM_SRC + QmiWwanCM.c GobiNetCM.c main.c MPQMUX.c QMIThread.c util.c qmap_bridge_mode.c mbim-cm.c device.c + atc.c atchannel.c at_tok.c + udhcpc.c + ) + +if(USE_QRTR) +add_definitions(-DCONFIG_QRTR) +set( QRTR_SRC qrtr.c rmnetctl.c) +endif() + +add_executable(quectel-CM ${QL_CM_SRC} ${QRTR_SRC}) +target_link_libraries(quectel-CM PUBLIC pthread) +install (TARGETS quectel-CM DESTINATION bin) + +add_executable(quectel-qmi-proxy quectel-qmi-proxy.c) +target_link_libraries(quectel-qmi-proxy PUBLIC pthread) +install (TARGETS quectel-qmi-proxy DESTINATION bin) + +add_executable(quectel-mbim-proxy quectel-mbim-proxy.c) +target_link_libraries(quectel-mbim-proxy PUBLIC pthread) +install (TARGETS quectel-mbim-proxy DESTINATION bin) + +add_executable(quectel-atc-proxy quectel-atc-proxy.c atchannel.c at_tok.c util.c) +target_link_libraries(quectel-atc-proxy PUBLIC pthread) +install (TARGETS quectel-atc-proxy DESTINATION bin) + +add_executable(quectel-qrtr-proxy quectel-qrtr-proxy.c) +target_link_libraries(quectel-qrtr-proxy PUBLIC pthread) +install (TARGETS quectel-qrtr-proxy DESTINATION bin) diff --git a/package/wwan/driver/quectel_cm_5G/src/GobiNetCM.c b/package/wwan/driver/quectel_cm_5G/src/GobiNetCM.c new file mode 100644 index 000000000..253a4f1d1 --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/GobiNetCM.c @@ -0,0 +1,246 @@ +/****************************************************************************** + @file GobiNetCM.c + @brief GobiNet driver. + + DESCRIPTION + Connectivity Management Tool for USB network adapter of Quectel wireless cellular modules. + + INITIALIZATION AND SEQUENCING REQUIREMENTS + None. + + --------------------------------------------------------------------------- + Copyright (c) 2016 - 2020 Quectel Wireless Solution, Co., Ltd. All Rights Reserved. + Quectel Wireless Solution Proprietary and Confidential. + --------------------------------------------------------------------------- +******************************************************************************/ +#include +#include +#include +#include +#include +#include "QMIThread.h" + +#ifdef CONFIG_GOBINET +static int qmiclientId[QMUX_TYPE_ALL]; + +// IOCTL to generate a client ID for this service type +#define IOCTL_QMI_GET_SERVICE_FILE 0x8BE0 + 1 + +// IOCTL to get the VIDPID of the device +#define IOCTL_QMI_GET_DEVICE_VIDPID 0x8BE0 + 2 + +// IOCTL to get the MEID of the device +#define IOCTL_QMI_GET_DEVICE_MEID 0x8BE0 + 3 + +static int GobiNetSendQMI(PQCQMIMSG pRequest) { + int ret, fd; + + fd = qmiclientId[pRequest->QMIHdr.QMIType]; + pRequest->QMIHdr.ClientId = (fd&0xFF) ? fd&0xFF : pRequest->QMIHdr.QMIType; + + if (fd <= 0) { + dbg_time("%s QMIType: %d has no clientID", __func__, pRequest->QMIHdr.QMIType); + return -ENODEV; + } + + // Always ready to write + if (1 == 1) { + ssize_t nwrites = le16_to_cpu(pRequest->QMIHdr.Length) + 1 - sizeof(QCQMI_HDR); + ret = write(fd, &pRequest->MUXMsg, nwrites); + if (ret == nwrites) { + ret = 0; + } else { + dbg_time("%s write=%d, errno: %d (%s)", __func__, ret, errno, strerror(errno)); + } + } else { + dbg_time("%s poll=%d, errno: %d (%s)", __func__, ret, errno, strerror(errno)); + } + + return ret; +} + +static int GobiNetGetClientID(const char *qcqmi, UCHAR QMIType) { + int ClientId; + ClientId = cm_open_dev(qcqmi); + if (ClientId == -1) { + dbg_time("failed to open %s, errno: %d (%s)", qcqmi, errno, strerror(errno)); + return -1; + } + + if (ioctl(ClientId, IOCTL_QMI_GET_SERVICE_FILE, QMIType) != 0) { + dbg_time("failed to get ClientID for 0x%02x errno: %d (%s)", QMIType, errno, strerror(errno)); + close(ClientId); + ClientId = 0; + } + + switch (QMIType) { + case QMUX_TYPE_WDS: dbg_time("Get clientWDS = %d", ClientId); break; + case QMUX_TYPE_DMS: dbg_time("Get clientDMS = %d", ClientId); break; + case QMUX_TYPE_NAS: dbg_time("Get clientNAS = %d", ClientId); break; + case QMUX_TYPE_QOS: dbg_time("Get clientQOS = %d", ClientId); break; + case QMUX_TYPE_WMS: dbg_time("Get clientWMS = %d", ClientId); break; + case QMUX_TYPE_PDS: dbg_time("Get clientPDS = %d", ClientId); break; + case QMUX_TYPE_UIM: dbg_time("Get clientUIM = %d", ClientId); break; + case QMUX_TYPE_COEX: dbg_time("Get clientCOEX = %d", ClientId); break; + case QMUX_TYPE_WDS_ADMIN: dbg_time("Get clientWDA = %d", ClientId); + break; + default: break; + } + + return ClientId; +} + +static int GobiNetDeInit(void) { + unsigned int i; + for (i = 0; i < sizeof(qmiclientId)/sizeof(qmiclientId[0]); i++) + { + if (qmiclientId[i] != 0) + { + close(qmiclientId[i]); + qmiclientId[i] = 0; + } + } + + return 0; +} + +static void * GobiNetThread(void *pData) { + PROFILE_T *profile = (PROFILE_T *)pData; + const char *qcqmi = (const char *)profile->qmichannel; + int wait_for_request_quit = 0; + + qmiclientId[QMUX_TYPE_WDS] = GobiNetGetClientID(qcqmi, QMUX_TYPE_WDS); + if (profile->enable_ipv6) + qmiclientId[QMUX_TYPE_WDS_IPV6] = GobiNetGetClientID(qcqmi, QMUX_TYPE_WDS); + qmiclientId[QMUX_TYPE_DMS] = GobiNetGetClientID(qcqmi, QMUX_TYPE_DMS); + qmiclientId[QMUX_TYPE_NAS] = GobiNetGetClientID(qcqmi, QMUX_TYPE_NAS); + qmiclientId[QMUX_TYPE_UIM] = GobiNetGetClientID(qcqmi, QMUX_TYPE_UIM); +#ifdef CONFIG_COEX_WWAN_STATE + qmiclientId[QMUX_TYPE_COEX] = GobiNetGetClientID(qcqmi, QMUX_TYPE_COEX); +#endif + if (profile->qmap_mode == 0 || profile->loopback_state) {//when QMAP enabled, set data format in GobiNet Driver + qmiclientId[QMUX_TYPE_WDS_ADMIN] = GobiNetGetClientID(qcqmi, QMUX_TYPE_WDS_ADMIN); + profile->wda_client = qmiclientId[QMUX_TYPE_WDS_ADMIN]; + } + + //donot check clientWDA, there is only one client for WDA, if quectel-CM is killed by SIGKILL, i cannot get client ID for WDA again! + if (qmiclientId[QMUX_TYPE_WDS] == 0) /*|| (clientWDA == -1)*/ { + GobiNetDeInit(); + dbg_time("%s Failed to open %s, errno: %d (%s)", __func__, qcqmi, errno, strerror(errno)); + qmidevice_send_event_to_main(RIL_INDICATE_DEVICE_DISCONNECTED); + pthread_exit(NULL); + return NULL; + } + + qmidevice_send_event_to_main(RIL_INDICATE_DEVICE_CONNECTED); + + while (1) { + struct pollfd pollfds[16] = {{qmidevice_control_fd[1], POLLIN, 0}}; + int ne, ret, nevents = 1; + unsigned int i; + + for (i = 0; i < sizeof(qmiclientId)/sizeof(qmiclientId[0]); i++) + { + if (qmiclientId[i] != 0) + { + pollfds[nevents].fd = qmiclientId[i]; + pollfds[nevents].events = POLLIN; + pollfds[nevents].revents = 0; + nevents++; + } + } + + do { + ret = poll(pollfds, nevents, wait_for_request_quit ? 1000: -1); + } while ((ret < 0) && (errno == EINTR)); + + if (ret == 0 && wait_for_request_quit) { + QmiThreadRecvQMI(NULL); //main thread may pending on QmiThreadSendQMI() + continue; + } + + if (ret <= 0) { + dbg_time("%s poll=%d, errno: %d (%s)", __func__, ret, errno, strerror(errno)); + break; + } + + for (ne = 0; ne < nevents; ne++) { + int fd = pollfds[ne].fd; + short revents = pollfds[ne].revents; + + if (revents & (POLLERR | POLLHUP | POLLNVAL)) { + dbg_time("%s poll err/hup/inval", __func__); + dbg_time("epoll fd = %d, events = 0x%04x", fd, revents); + if (fd == qmidevice_control_fd[1]) { + } else { + } + if (revents & (POLLERR | POLLHUP | POLLNVAL)) + goto __GobiNetThread_quit; + } + + if ((revents & POLLIN) == 0) + continue; + + if (fd == qmidevice_control_fd[1]) { + int triger_event; + if (read(fd, &triger_event, sizeof(triger_event)) == sizeof(triger_event)) { + //DBG("triger_event = 0x%x", triger_event); + switch (triger_event) { + case RIL_REQUEST_QUIT: + goto __GobiNetThread_quit; + break; + case SIG_EVENT_STOP: + wait_for_request_quit = 1; + break; + default: + break; + } + } + continue; + } + + { + ssize_t nreads; + PQCQMIMSG pResponse = (PQCQMIMSG)cm_recv_buf; + + nreads = read(fd, &pResponse->MUXMsg, sizeof(cm_recv_buf) - sizeof(QCQMI_HDR)); + if (nreads <= 0) + { + dbg_time("%s read=%d errno: %d (%s)", __func__, (int)nreads, errno, strerror(errno)); + break; + } + + for (i = 0; i < sizeof(qmiclientId)/sizeof(qmiclientId[0]); i++) + { + if (qmiclientId[i] == fd) + { + pResponse->QMIHdr.QMIType = i; + } + } + + pResponse->QMIHdr.IFType = USB_CTL_MSG_TYPE_QMI; + pResponse->QMIHdr.Length = cpu_to_le16(nreads + sizeof(QCQMI_HDR) - 1); + pResponse->QMIHdr.CtlFlags = 0x00; + pResponse->QMIHdr.ClientId = (fd&0xFF) ? fd&0xFF : pResponse->QMIHdr.QMIType;; + + QmiThreadRecvQMI(pResponse); + } + } + } + +__GobiNetThread_quit: + GobiNetDeInit(); + qmidevice_send_event_to_main(RIL_INDICATE_DEVICE_DISCONNECTED); + QmiThreadRecvQMI(NULL); //main thread may pending on QmiThreadSendQMI() + dbg_time("%s exit", __func__); + pthread_exit(NULL); + return NULL; +} + +const struct qmi_device_ops gobi_qmidev_ops = { + .deinit = GobiNetDeInit, + .send = GobiNetSendQMI, + .read = GobiNetThread, +}; +#endif + diff --git a/package/wwan/driver/quectel_cm_5G/src/MPQCTL.h b/package/wwan/driver/quectel_cm_5G/src/MPQCTL.h new file mode 100644 index 000000000..c2faa3c46 --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/MPQCTL.h @@ -0,0 +1,390 @@ +/*=========================================================================== + + M P Q C T L. H +DESCRIPTION: + + This module contains QMI QCTL module. + +INITIALIZATION AND SEQUENCING REQUIREMENTS: + +Copyright (C) 2011 by Qualcomm Technologies, Incorporated. All Rights Reserved. +===========================================================================*/ + +#ifndef MPQCTL_H +#define MPQCTL_H + +#include "MPQMI.h" + +#pragma pack(push, 1) + +// ================= QMICTL ================== + +// QMICTL Control Flags +#define QMICTL_CTL_FLAG_CMD 0x00 +#define QMICTL_CTL_FLAG_RSP 0x01 +#define QMICTL_CTL_FLAG_IND 0x02 + +#if 0 +typedef struct _QMICTL_TRANSACTION_ITEM +{ + LIST_ENTRY List; + UCHAR TransactionId; // QMICTL transaction id + PVOID Context; // Adapter or IocDev + PIRP Irp; +} QMICTL_TRANSACTION_ITEM, *PQMICTL_TRANSACTION_ITEM; +#endif + +typedef struct _QCQMICTL_MSG_HDR +{ + UCHAR CtlFlags; // 00-cmd, 01-rsp, 10-ind + UCHAR TransactionId; + USHORT QMICTLType; + USHORT Length; +} __attribute__ ((packed)) QCQMICTL_MSG_HDR, *PQCQMICTL_MSG_HDR; + +#define QCQMICTL_MSG_HDR_SIZE sizeof(QCQMICTL_MSG_HDR) + +typedef struct _QCQMICTL_MSG_HDR_RESP +{ + UCHAR CtlFlags; // 00-cmd, 01-rsp, 10-ind + UCHAR TransactionId; + USHORT QMICTLType; + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_FAULT +} __attribute__ ((packed)) QCQMICTL_MSG_HDR_RESP, *PQCQMICTL_MSG_HDR_RESP; + +typedef struct _QCQMICTL_MSG +{ + UCHAR CtlFlags; // 00-cmd, 01-rsp, 10-ind + UCHAR TransactionId; + USHORT QMICTLType; + USHORT Length; + UCHAR Payload; +} __attribute__ ((packed)) QCQMICTL_MSG, *PQCQMICTL_MSG; + +// TLV Header +typedef struct _QCQMICTL_TLV_HDR +{ + UCHAR TLVType; + USHORT TLVLength; +} __attribute__ ((packed)) QCQMICTL_TLV_HDR, *PQCQMICTL_TLV_HDR; + +#define QCQMICTL_TLV_HDR_SIZE sizeof(QCQMICTL_TLV_HDR) + +// QMICTL Type +#define QMICTL_SET_INSTANCE_ID_REQ 0x0020 +#define QMICTL_SET_INSTANCE_ID_RESP 0x0020 +#define QMICTL_GET_VERSION_REQ 0x0021 +#define QMICTL_GET_VERSION_RESP 0x0021 +#define QMICTL_GET_CLIENT_ID_REQ 0x0022 +#define QMICTL_GET_CLIENT_ID_RESP 0x0022 +#define QMICTL_RELEASE_CLIENT_ID_REQ 0x0023 +#define QMICTL_RELEASE_CLIENT_ID_RESP 0x0023 +#define QMICTL_REVOKE_CLIENT_ID_IND 0x0024 +#define QMICTL_INVALID_CLIENT_ID_IND 0x0025 +#define QMICTL_SET_DATA_FORMAT_REQ 0x0026 +#define QMICTL_SET_DATA_FORMAT_RESP 0x0026 +#define QMICTL_SYNC_REQ 0x0027 +#define QMICTL_SYNC_RESP 0x0027 +#define QMICTL_SYNC_IND 0x0027 +#define QMI_MESSAGE_CTL_INTERNAL_PROXY_OPEN 0xFF00 + +#define QMICTL_FLAG_REQUEST 0x00 +#define QMICTL_FLAG_RESPONSE 0x01 +#define QMICTL_FLAG_INDICATION 0x02 + +// QMICTL Message Definitions + +typedef struct _QMICTL_SET_INSTANCE_ID_REQ_MSG +{ + UCHAR CtlFlags; // QMICTL_FLAG_REQUEST + UCHAR TransactionId; + USHORT QMICTLType; // QMICTL_SET_INSTANCE_ID_REQ + USHORT Length; // 4 + UCHAR TLVType; // QCTLV_TYPE_REQUIRED_PARAMETER + USHORT TLVLength; // 1 + UCHAR Value; // Host-unique QMI instance for this device driver +} __attribute__ ((packed)) QMICTL_SET_INSTANCE_ID_REQ_MSG, *PQMICTL_SET_INSTANCE_ID_REQ_MSG; + +typedef struct _QMICTL_SET_INSTANCE_ID_RESP_MSG +{ + UCHAR CtlFlags; // QMICTL_FLAG_RESPONSE + UCHAR TransactionId; + USHORT QMICTLType; // QMICTL_SET_INSTANCE_ID_RESP + USHORT Length; + UCHAR TLVType; // QCTLV_TYPE_RESULT_CODE + USHORT TLVLength; // 0x0004 + USHORT QMIResult; + USHORT QMIError; + UCHAR TLV2Type; // QCTLV_TYPE_REQUIRED_PARAMETER + USHORT TLV2Length; // 0x0002 + USHORT QMI_ID; // Upper byte is assigned by MSM, + // lower assigned by host +} __attribute__ ((packed)) QMICTL_SET_INSTANCE_ID_RESP_MSG, *PQMICTL_SET_INSTANCE_ID_RESP_MSG; + +typedef struct _QMICTL_GET_VERSION_REQ_MSG +{ + UCHAR CtlFlags; // QMICTL_FLAG_REQUEST + UCHAR TransactionId; + USHORT QMICTLType; // QMICTL_GET_VERSION_REQ + USHORT Length; // 0 + UCHAR TLVType; // QCTLV_TYPE_REQUIRED_PARAMETER + USHORT TLVLength; // var + UCHAR QMUXTypes; // List of one byte QMUX_TYPE values + // 0xFF returns a list of versions for all + // QMUX_TYPEs implemented on the device +} __attribute__ ((packed)) QMICTL_GET_VERSION_REQ_MSG, *PQMICTL_GET_VERSION_REQ_MSG; + +typedef struct _QMUX_TYPE_VERSION_STRUCT +{ + UCHAR QMUXType; + USHORT MajorVersion; + USHORT MinorVersion; +} __attribute__ ((packed)) QMUX_TYPE_VERSION_STRUCT, *PQMUX_TYPE_VERSION_STRUCT; + +typedef struct _ADDENDUM_VERSION_PREAMBLE +{ + UCHAR LabelLength; + UCHAR Label; +} __attribute__ ((packed)) ADDENDUM_VERSION_PREAMBLE, *PADDENDUM_VERSION_PREAMBLE; + +#define QMICTL_GET_VERSION_RSP_TLV_TYPE_VERSION 0x01 +#define QMICTL_GET_VERSION_RSP_TLV_TYPE_ADD_VERSION 0x10 + +typedef struct _QMICTL_GET_VERSION_RESP_MSG +{ + UCHAR CtlFlags; // QMICTL_FLAG_RESPONSE + UCHAR TransactionId; + USHORT QMICTLType; // QMICTL_GET_VERSION_RESP + USHORT Length; + UCHAR TLVType; // QCTLV_TYPE_RESULT_CODE + USHORT TLVLength; // 0x0004 + USHORT QMIResult; + USHORT QMIError; + UCHAR TLV2Type; // QCTLV_TYPE_REQUIRED_PARAMETER + USHORT TLV2Length; // var + UCHAR NumElements; // Num of QMUX_TYPE_VERSION_STRUCT + QMUX_TYPE_VERSION_STRUCT TypeVersion[0]; +} __attribute__ ((packed)) QMICTL_GET_VERSION_RESP_MSG, *PQMICTL_GET_VERSION_RESP_MSG; + +typedef struct _QMICTL_GET_CLIENT_ID_REQ_MSG +{ + UCHAR CtlFlags; // QMICTL_FLAG_REQUEST + UCHAR TransactionId; + USHORT QMICTLType; // QMICTL_GET_CLIENT_ID_REQ + USHORT Length; + UCHAR TLVType; // QCTLV_TYPE_REQUIRED_PARAMETER + USHORT TLVLength; // 1 + UCHAR QMIType; // QMUX type +} __attribute__ ((packed)) QMICTL_GET_CLIENT_ID_REQ_MSG, *PQMICTL_GET_CLIENT_ID_REQ_MSG; + +typedef struct _QMICTL_GET_CLIENT_ID_RESP_MSG +{ + UCHAR CtlFlags; // QMICTL_FLAG_RESPONSE + UCHAR TransactionId; + USHORT QMICTLType; // QMICTL_GET_CLIENT_ID_RESP + USHORT Length; + UCHAR TLVType; // QCTLV_TYPE_RESULT_CODE + USHORT TLVLength; // 0x0004 + USHORT QMIResult; // result code + USHORT QMIError; // error code + UCHAR TLV2Type; // QCTLV_TYPE_REQUIRED_PARAMETER + USHORT TLV2Length; // 2 + UCHAR QMIType; + UCHAR ClientId; +} __attribute__ ((packed)) QMICTL_GET_CLIENT_ID_RESP_MSG, *PQMICTL_GET_CLIENT_ID_RESP_MSG; + +typedef struct _QMICTL_RELEASE_CLIENT_ID_REQ_MSG +{ + UCHAR CtlFlags; // QMICTL_FLAG_REQUEST + UCHAR TransactionId; + USHORT QMICTLType; // QMICTL_RELEASE_CLIENT_ID_REQ + USHORT Length; + UCHAR TLVType; // QCTLV_TYPE_REQUIRED_PARAMETER + USHORT TLVLength; // 0x0002 + UCHAR QMIType; + UCHAR ClientId; +} __attribute__ ((packed)) QMICTL_RELEASE_CLIENT_ID_REQ_MSG, *PQMICTL_RELEASE_CLIENT_ID_REQ_MSG; + +typedef struct _QMICTL_RELEASE_CLIENT_ID_RESP_MSG +{ + UCHAR CtlFlags; // QMICTL_FLAG_RESPONSE + UCHAR TransactionId; + USHORT QMICTLType; // QMICTL_RELEASE_CLIENT_ID_RESP + USHORT Length; + UCHAR TLVType; // QCTLV_TYPE_RESULT_CODE + USHORT TLVLength; // 0x0004 + USHORT QMIResult; // result code + USHORT QMIError; // error code + UCHAR TLV2Type; // QCTLV_TYPE_REQUIRED_PARAMETER + USHORT TLV2Length; // 2 + UCHAR QMIType; + UCHAR ClientId; +} __attribute__ ((packed)) QMICTL_RELEASE_CLIENT_ID_RESP_MSG, *PQMICTL_RELEASE_CLIENT_ID_RESP_MSG; + +typedef struct _QMICTL_REVOKE_CLIENT_ID_IND_MSG +{ + UCHAR CtlFlags; // QMICTL_FLAG_INDICATION + UCHAR TransactionId; + USHORT QMICTLType; // QMICTL_REVOKE_CLIENT_ID_IND + USHORT Length; + UCHAR TLVType; // QCTLV_TYPE_REQUIRED_PARAMETER + USHORT TLVLength; // 0x0002 + UCHAR QMIType; + UCHAR ClientId; +} __attribute__ ((packed)) QMICTL_REVOKE_CLIENT_ID_IND_MSG, *PQMICTL_REVOKE_CLIENT_ID_IND_MSG; + +typedef struct _QMICTL_INVALID_CLIENT_ID_IND_MSG +{ + UCHAR CtlFlags; // QMICTL_FLAG_INDICATION + UCHAR TransactionId; + USHORT QMICTLType; // QMICTL_REVOKE_CLIENT_ID_IND + USHORT Length; + UCHAR TLVType; // QCTLV_TYPE_REQUIRED_PARAMETER + USHORT TLVLength; // 0x0002 + UCHAR QMIType; + UCHAR ClientId; +} __attribute__ ((packed)) QMICTL_INVALID_CLIENT_ID_IND_MSG, *PQMICTL_INVALID_CLIENT_ID_IND_MSG; + +typedef struct _QMICTL_SET_DATA_FORMAT_REQ_MSG +{ + UCHAR CtlFlags; // QMICTL_FLAG_REQUEST + UCHAR TransactionId; + USHORT QMICTLType; // QMICTL_SET_DATA_FORMAT_REQ + USHORT Length; + UCHAR TLVType; // QCTLV_TYPE_REQUIRED_PARAMETER + USHORT TLVLength; // 1 + UCHAR DataFormat; // 0-default; 1-QoS hdr present +} __attribute__ ((packed)) QMICTL_SET_DATA_FORMAT_REQ_MSG, *PQMICTL_SET_DATA_FORMAT_REQ_MSG; + +#ifdef QC_IP_MODE +#define SET_DATA_FORMAT_TLV_TYPE_LINK_PROTO 0x10 +#define SET_DATA_FORMAT_LINK_PROTO_ETH 0x0001 +#define SET_DATA_FORMAT_LINK_PROTO_IP 0x0002 +typedef struct _QMICTL_SET_DATA_FORMAT_TLV_LINK_PROT +{ + UCHAR TLVType; // Link-Layer Protocol + USHORT TLVLength; // 2 + USHORT LinkProt; // 0x1: ETH; 0x2: IP +} QMICTL_SET_DATA_FORMAT_TLV_LINK_PROT, *PQMICTL_SET_DATA_FORMAT_TLV_LINK_PROT; + +#ifdef QCMP_UL_TLP +#define SET_DATA_FORMAT_TLV_TYPE_UL_TLP 0x11 +typedef struct _QMICTL_SET_DATA_FORMAT_TLV_UL_TLP +{ + UCHAR TLVType; // 0x11, Uplink TLP Setting + USHORT TLVLength; // 1 + UCHAR UlTlpSetting; // 0x0: Disable; 0x01: Enable +} QMICTL_SET_DATA_FORMAT_TLV_UL_TLP, *PQMICTL_SET_DATA_FORMAT_TLV_UL_TLP; +#endif // QCMP_UL_TLP + +#ifdef QCMP_DL_TLP +#define SET_DATA_FORMAT_TLV_TYPE_DL_TLP 0x13 +typedef struct _QMICTL_SET_DATA_FORMAT_TLV_DL_TLP +{ + UCHAR TLVType; // 0x11, Uplink TLP Setting + USHORT TLVLength; // 1 + UCHAR DlTlpSetting; // 0x0: Disable; 0x01: Enable +} QMICTL_SET_DATA_FORMAT_TLV_DL_TLP, *PQMICTL_SET_DATA_FORMAT_TLV_DL_TLP; +#endif // QCMP_DL_TLP + +#endif // QC_IP_MODE + +#ifdef MP_QCQOS_ENABLED +#define SET_DATA_FORMAT_TLV_TYPE_QOS_SETTING 0x12 +typedef struct _QMICTL_SET_DATA_FORMAT_TLV_QOS_SETTING +{ + UCHAR TLVType; // 0x12, QoS setting + USHORT TLVLength; // 1 + UCHAR QosSetting; // 0x0: Disable; 0x01: Enable +} QMICTL_SET_DATA_FORMAT_TLV_QOS_SETTING, *PQMICTL_SET_DATA_FORMAT_TLV_QOS_SETTING; +#endif // MP_QCQOS_ENABLED + +typedef struct _QMICTL_SET_DATA_FORMAT_RESP_MSG +{ + UCHAR CtlFlags; // QMICTL_FLAG_RESPONSE + UCHAR TransactionId; + USHORT QMICTLType; // QMICTL_SET_DATA_FORMAT_RESP + USHORT Length; + UCHAR TLVType; // QCTLV_TYPE_RESULT_CODE + USHORT TLVLength; // 0x0004 + USHORT QMIResult; // result code + USHORT QMIError; // error code +} __attribute__ ((packed)) QMICTL_SET_DATA_FORMAT_RESP_MSG, *PQMICTL_SET_DATA_FORMAT_RESP_MSG; + +typedef struct _QMICTL_SYNC_REQ_MSG +{ + UCHAR CtlFlags; // QMICTL_FLAG_REQUEST + UCHAR TransactionId; + USHORT QMICTLType; // QMICTL_CTL_SYNC_REQ + USHORT Length; // 0 +} __attribute__ ((packed)) QMICTL_SYNC_REQ_MSG, *PQMICTL_SYNC_REQ_MSG; + +typedef struct _QMICTL_SYNC_RESP_MSG +{ + UCHAR CtlFlags; // QMICTL_FLAG_RESPONSE + UCHAR TransactionId; + USHORT QMICTLType; // QMICTL_CTL_SYNC_RESP + USHORT Length; + UCHAR TLVType; // QCTLV_TYPE_RESULT_CODE + USHORT TLVLength; // 0x0004 + USHORT QMIResult; + USHORT QMIError; +} __attribute__ ((packed)) QMICTL_SYNC_RESP_MSG, *PQMICTL_SYNC_RESP_MSG; + +typedef struct _QMICTL_SYNC_IND_MSG +{ + UCHAR CtlFlags; // QMICTL_FLAG_INDICATION + UCHAR TransactionId; + USHORT QMICTLType; // QMICTL_REVOKE_CLIENT_ID_IND + USHORT Length; +} __attribute__ ((packed)) QMICTL_SYNC_IND_MSG, *PQMICTL_SYNC_IND_MSG; + +typedef struct _QMICTL_LIBQMI_PROXY_OPEN_MSG +{ + UCHAR CtlFlags; // QMICTL_FLAG_RESPONSE + UCHAR TransactionId; + USHORT QMICTLType; // QMICTL_SET_DATA_FORMAT_RESP + USHORT Length; + UCHAR TLVType; // QCTLV_TYPE_RESULT_CODE + USHORT TLVLength; // 0x0004 + char device_path[0]; // result code +} __attribute__ ((packed)) QMICTL_LIBQMI_PROXY_OPEN_MSG, *PQMICTL_LIBQMI_PROXY_OPEN_MSG; + +typedef struct _QMICTL_MSG +{ + union + { + // Message Header + QCQMICTL_MSG_HDR QMICTLMsgHdr; + QCQMICTL_MSG_HDR_RESP QMICTLMsgHdrRsp; + + // QMICTL Message + QMICTL_SET_INSTANCE_ID_REQ_MSG SetInstanceIdReq; + QMICTL_SET_INSTANCE_ID_RESP_MSG SetInstanceIdRsp; + QMICTL_GET_VERSION_REQ_MSG GetVersionReq; + QMICTL_GET_VERSION_RESP_MSG GetVersionRsp; + QMICTL_GET_CLIENT_ID_REQ_MSG GetClientIdReq; + QMICTL_GET_CLIENT_ID_RESP_MSG GetClientIdRsp; + QMICTL_RELEASE_CLIENT_ID_REQ_MSG ReleaseClientIdReq; + QMICTL_RELEASE_CLIENT_ID_RESP_MSG ReleaseClientIdRsp; + QMICTL_REVOKE_CLIENT_ID_IND_MSG RevokeClientIdInd; + QMICTL_INVALID_CLIENT_ID_IND_MSG InvalidClientIdInd; + QMICTL_SET_DATA_FORMAT_REQ_MSG SetDataFormatReq; + QMICTL_SET_DATA_FORMAT_RESP_MSG SetDataFormatRsp; + QMICTL_SYNC_REQ_MSG SyncReq; + QMICTL_SYNC_RESP_MSG SyncRsp; + QMICTL_SYNC_IND_MSG SyncInd; + QMICTL_LIBQMI_PROXY_OPEN_MSG LibQmiProxyOpenReq; + }; +} __attribute__ ((packed)) QMICTL_MSG, *PQMICTL_MSG; +#pragma pack(pop) + +#endif // MPQCTL_H diff --git a/package/wwan/driver/quectel_cm_5G/src/MPQMI.h b/package/wwan/driver/quectel_cm_5G/src/MPQMI.h new file mode 100644 index 000000000..f5af61274 --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/MPQMI.h @@ -0,0 +1,325 @@ +/*=========================================================================== + + M P Q M I. H +DESCRIPTION: + + This module contains forward references to the QMI module. + +INITIALIZATION AND SEQUENCING REQUIREMENTS: + +Copyright (C) 2011 by Qualcomm Technologies, Incorporated. All Rights Reserved. +===========================================================================*/ +/*=========================================================================== + + EDIT HISTORY FOR FILE + $Header: //depot/QMI/win/qcdrivers/ndis/MPQMI.h#3 $ + +when who what, where, why +-------- --- ---------------------------------------------------------- +11/20/04 hg Initial version. +===========================================================================*/ + +#ifndef USBQMI_H +#define USBQMI_H + +typedef uint8_t uint8; +typedef int8_t int8; +typedef uint16_t uint16; +typedef int16_t int16; +typedef uint32_t uint32; +typedef uint64_t uint64; + +typedef signed char CHAR; +typedef unsigned char UCHAR; +typedef short SHORT; +typedef unsigned short USHORT; +typedef int INT; +typedef unsigned int UINT; +typedef long LONG; +typedef unsigned int ULONG; +typedef unsigned long long ULONG64; +typedef signed char *PCHAR; +typedef unsigned char *PUCHAR; +typedef int *PINT; +typedef int BOOL; + +#define TRUE (1 == 1) +#define FALSE (1 != 1) + +#define QMICTL_SUPPORTED_MAJOR_VERSION 1 +#define QMICTL_SUPPORTED_MINOR_VERSION 0 + +#pragma pack(push, 1) + +// ========= USB Control Message ========== + +#define USB_CTL_MSG_TYPE_QMI 0x01 + +// USB Control Message +typedef struct _QCUSB_CTL_MSG_HDR +{ + UCHAR IFType; +} __attribute__ ((packed)) QCUSB_CTL_MSG_HDR, *PQCUSB_CTL_MSG_HDR; + +#define QCUSB_CTL_MSG_HDR_SIZE sizeof(QCUSB_CTL_MSG_HDR) + +typedef struct _QCUSB_CTL_MSG +{ + UCHAR IFType; + UCHAR Message; +} __attribute__ ((packed)) QCUSB_CTL_MSG, *PQCUSB_CTL_MSG; + +#define QCTLV_TYPE_REQUIRED_PARAMETER 0x01 +#define QCTLV_TYPE_RESULT_CODE 0x02 + +// ================= QMI ================== + +// Define QMI Type +typedef enum _QMI_SERVICE_TYPE +{ + QMUX_TYPE_CTL = 0x00, + QMUX_TYPE_WDS = 0x01, + QMUX_TYPE_DMS = 0x02, + QMUX_TYPE_NAS = 0x03, + QMUX_TYPE_QOS = 0x04, + QMUX_TYPE_WMS = 0x05, + QMUX_TYPE_PDS = 0x06, + QMUX_TYPE_UIM = 0x0B, + QMUX_TYPE_WDS_IPV6 = 0x11, + QMUX_TYPE_WDS_ADMIN = 0x1A, + QMUX_TYPE_COEX = 0x22, + QMUX_TYPE_MAX = 0xFF, + QMUX_TYPE_ALL = 0xFF +} QMI_SERVICE_TYPE; + +typedef enum _QMI_RESULT_CODE_TYPE +{ + QMI_RESULT_SUCCESS = 0x0000, + QMI_RESULT_FAILURE = 0x0001 +} QMI_RESULT_CODE_TYPE; + +typedef enum _QMI_ERROR_CODE_TYPE +{ + QMI_ERR_NONE = 0x0000 + ,QMI_ERR_MALFORMED_MSG = 0x0001 + ,QMI_ERR_NO_MEMORY = 0x0002 + ,QMI_ERR_INTERNAL = 0x0003 + ,QMI_ERR_ABORTED = 0x0004 + ,QMI_ERR_CLIENT_IDS_EXHAUSTED = 0x0005 + ,QMI_ERR_UNABORTABLE_TRANSACTION = 0x0006 + ,QMI_ERR_INVALID_CLIENT_ID = 0x0007 + ,QMI_ERR_NO_THRESHOLDS = 0x0008 + ,QMI_ERR_INVALID_HANDLE = 0x0009 + ,QMI_ERR_INVALID_PROFILE = 0x000A + ,QMI_ERR_INVALID_PINID = 0x000B + ,QMI_ERR_INCORRECT_PIN = 0x000C + ,QMI_ERR_NO_NETWORK_FOUND = 0x000D + ,QMI_ERR_CALL_FAILED = 0x000E + ,QMI_ERR_OUT_OF_CALL = 0x000F + ,QMI_ERR_NOT_PROVISIONED = 0x0010 + ,QMI_ERR_MISSING_ARG = 0x0011 + ,QMI_ERR_ARG_TOO_LONG = 0x0013 + ,QMI_ERR_INVALID_TX_ID = 0x0016 + ,QMI_ERR_DEVICE_IN_USE = 0x0017 + ,QMI_ERR_OP_NETWORK_UNSUPPORTED = 0x0018 + ,QMI_ERR_OP_DEVICE_UNSUPPORTED = 0x0019 + ,QMI_ERR_NO_EFFECT = 0x001A + ,QMI_ERR_NO_FREE_PROFILE = 0x001B + ,QMI_ERR_INVALID_PDP_TYPE = 0x001C + ,QMI_ERR_INVALID_TECH_PREF = 0x001D + ,QMI_ERR_INVALID_PROFILE_TYPE = 0x001E + ,QMI_ERR_INVALID_SERVICE_TYPE = 0x001F + ,QMI_ERR_INVALID_REGISTER_ACTION = 0x0020 + ,QMI_ERR_INVALID_PS_ATTACH_ACTION = 0x0021 + ,QMI_ERR_AUTHENTICATION_FAILED = 0x0022 + ,QMI_ERR_PIN_BLOCKED = 0x0023 + ,QMI_ERR_PIN_PERM_BLOCKED = 0x0024 + ,QMI_ERR_SIM_NOT_INITIALIZED = 0x0025 + ,QMI_ERR_MAX_QOS_REQUESTS_IN_USE = 0x0026 + ,QMI_ERR_INCORRECT_FLOW_FILTER = 0x0027 + ,QMI_ERR_NETWORK_QOS_UNAWARE = 0x0028 + ,QMI_ERR_INVALID_QOS_ID = 0x0029 + ,QMI_ERR_INVALID_ID = 0x0029 + ,QMI_ERR_REQUESTED_NUM_UNSUPPORTED = 0x002A + ,QMI_ERR_INTERFACE_NOT_FOUND = 0x002B + ,QMI_ERR_FLOW_SUSPENDED = 0x002C + ,QMI_ERR_INVALID_DATA_FORMAT = 0x002D + ,QMI_ERR_GENERAL = 0x002E + ,QMI_ERR_UNKNOWN = 0x002F + ,QMI_ERR_INVALID_ARG = 0x0030 + ,QMI_ERR_INVALID_INDEX = 0x0031 + ,QMI_ERR_NO_ENTRY = 0x0032 + ,QMI_ERR_DEVICE_STORAGE_FULL = 0x0033 + ,QMI_ERR_DEVICE_NOT_READY = 0x0034 + ,QMI_ERR_NETWORK_NOT_READY = 0x0035 + ,QMI_ERR_CAUSE_CODE = 0x0036 + ,QMI_ERR_MESSAGE_NOT_SENT = 0x0037 + ,QMI_ERR_MESSAGE_DELIVERY_FAILURE = 0x0038 + ,QMI_ERR_INVALID_MESSAGE_ID = 0x0039 + ,QMI_ERR_ENCODING = 0x003A + ,QMI_ERR_AUTHENTICATION_LOCK = 0x003B + ,QMI_ERR_INVALID_TRANSITION = 0x003C + ,QMI_ERR_NOT_A_MCAST_IFACE = 0x003D + ,QMI_ERR_MAX_MCAST_REQUESTS_IN_USE = 0x003E + ,QMI_ERR_INVALID_MCAST_HANDLE = 0x003F + ,QMI_ERR_INVALID_IP_FAMILY_PREF = 0x0040 + ,QMI_ERR_SESSION_INACTIVE = 0x0041 + ,QMI_ERR_SESSION_INVALID = 0x0042 + ,QMI_ERR_SESSION_OWNERSHIP = 0x0043 + ,QMI_ERR_INSUFFICIENT_RESOURCES = 0x0044 + ,QMI_ERR_DISABLED = 0x0045 + ,QMI_ERR_INVALID_OPERATION = 0x0046 + ,QMI_ERR_INVALID_QMI_CMD = 0x0047 + ,QMI_ERR_TPDU_TYPE = 0x0048 + ,QMI_ERR_SMSC_ADDR = 0x0049 + ,QMI_ERR_INFO_UNAVAILABLE = 0x004A + ,QMI_ERR_SEGMENT_TOO_LONG = 0x004B + ,QMI_ERR_SEGMENT_ORDER = 0x004C + ,QMI_ERR_BUNDLING_NOT_SUPPORTED = 0x004D + ,QMI_ERR_OP_PARTIAL_FAILURE = 0x004E + ,QMI_ERR_POLICY_MISMATCH = 0x004F + ,QMI_ERR_SIM_FILE_NOT_FOUND = 0x0050 + ,QMI_ERR_EXTENDED_INTERNAL = 0x0051 + ,QMI_ERR_ACCESS_DENIED = 0x0052 + ,QMI_ERR_HARDWARE_RESTRICTED = 0x0053 + ,QMI_ERR_ACK_NOT_SENT = 0x0054 + ,QMI_ERR_INJECT_TIMEOUT = 0x0055 + ,QMI_ERR_INCOMPATIBLE_STATE = 0x005A + ,QMI_ERR_FDN_RESTRICT = 0x005B + ,QMI_ERR_SUPS_FAILURE_CAUSE = 0x005C + ,QMI_ERR_NO_RADIO = 0x005D + ,QMI_ERR_NOT_SUPPORTED = 0x005E + ,QMI_ERR_NO_SUBSCRIPTION = 0x005F + ,QMI_ERR_CARD_CALL_CONTROL_FAILED = 0x0060 + ,QMI_ERR_NETWORK_ABORTED = 0x0061 + ,QMI_ERR_MSG_BLOCKED = 0x0062 + ,QMI_ERR_INVALID_SESSION_TYPE = 0x0064 + ,QMI_ERR_INVALID_PB_TYPE = 0x0065 + ,QMI_ERR_NO_SIM = 0x0066 + ,QMI_ERR_PB_NOT_READY = 0x0067 + ,QMI_ERR_PIN_RESTRICTION = 0x0068 + ,QMI_ERR_PIN2_RESTRICTION = 0x0069 + ,QMI_ERR_PUK_RESTRICTION = 0x006A + ,QMI_ERR_PUK2_RESTRICTION = 0x006B + ,QMI_ERR_PB_ACCESS_RESTRICTED = 0x006C + ,QMI_ERR_PB_DELETE_IN_PROG = 0x006D + ,QMI_ERR_PB_TEXT_TOO_LONG = 0x006E + ,QMI_ERR_PB_NUMBER_TOO_LONG = 0x006F + ,QMI_ERR_PB_HIDDEN_KEY_RESTRICTION = 0x0070 +} QMI_ERROR_CODE_TYPE; + +#define QCQMI_CTL_FLAG_SERVICE 0x80 +#define QCQMI_CTL_FLAG_CTL_POINT 0x00 + +typedef struct _QCQMI_HDR +{ + UCHAR IFType; + USHORT Length; + UCHAR CtlFlags; // reserved + UCHAR QMIType; + UCHAR ClientId; +} __attribute__ ((packed)) QCQMI_HDR, *PQCQMI_HDR; + +#define QCQMI_HDR_SIZE (sizeof(QCQMI_HDR)-1) + +typedef struct _QCQMI +{ + UCHAR IFType; + USHORT Length; + UCHAR CtlFlags; // reserved + UCHAR QMIType; + UCHAR ClientId; + UCHAR SDU; +} __attribute__ ((packed)) QCQMI, *PQCQMI; + +typedef struct _QMI_SERVICE_VERSION +{ + USHORT Major; + USHORT Minor; + USHORT AddendumMajor; + USHORT AddendumMinor; +} __attribute__ ((packed)) QMI_SERVICE_VERSION, *PQMI_SERVICE_VERSION; + +// ================= QMUX ================== + +#define QMUX_MSG_OVERHEAD_BYTES 4 // Type(USHORT) Length(USHORT) -- header + +#define QMUX_BROADCAST_CID 0xFF + +typedef struct _QCQMUX_HDR +{ + UCHAR CtlFlags; // 0: single QMUX Msg; 1: + USHORT TransactionId; +} __attribute__ ((packed)) QCQMUX_HDR, *PQCQMUX_HDR; + +typedef struct _QCQMUX +{ + UCHAR CtlFlags; // 0: single QMUX Msg; 1: + USHORT TransactionId; + UCHAR Message; // Type(2), Length(2), Value +} __attribute__ ((packed)) QCQMUX, *PQCQMUX; + +#define QCQMUX_HDR_SIZE sizeof(QCQMUX_HDR) + +typedef struct _QCQMUX_MSG_HDR +{ + USHORT Type; + USHORT Length; +} __attribute__ ((packed)) QCQMUX_MSG_HDR, *PQCQMUX_MSG_HDR; + +#define QCQMUX_MSG_HDR_SIZE sizeof(QCQMUX_MSG_HDR) + +typedef struct _QCQMUX_MSG_HDR_RESP +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_FAULT +} __attribute__ ((packed)) QCQMUX_MSG_HDR_RESP, *PQCQMUX_MSG_HDR_RESP; + +typedef struct _QCQMUX_TLV +{ + UCHAR Type; + USHORT Length; + UCHAR Value; +} __attribute__ ((packed)) QCQMUX_TLV, *PQCQMUX_TLV; + +typedef struct _QMI_TLV_HDR +{ + UCHAR TLVType; + USHORT TLVLength; +} __attribute__ ((packed)) QMI_TLV_HDR, *PQMI_TLV_HDR; + +typedef struct _QMI_TLV +{ + UCHAR TLVType; + USHORT TLVLength; + union { + int8_t s8; + uint8_t u8; + int16_t s16; + uint16_t u16; + int32_t s32; + uint32_t u32; + uint64_t u64; + }; +} __attribute__ ((packed)) QMI_TLV, *PQMI_TLV; + +// QMUX Message Definitions -- QMI SDU +#define QMUX_CTL_FLAG_SINGLE_MSG 0x00 +#define QMUX_CTL_FLAG_COMPOUND_MSG 0x01 +#define QMUX_CTL_FLAG_TYPE_CMD 0x00 +#define QMUX_CTL_FLAG_TYPE_RSP 0x02 +#define QMUX_CTL_FLAG_TYPE_IND 0x04 +#define QMUX_CTL_FLAG_MASK_COMPOUND 0x01 +#define QMUX_CTL_FLAG_MASK_TYPE 0x06 // 00-cmd, 01-rsp, 10-ind + +#pragma pack(pop) + +#endif // USBQMI_H diff --git a/package/wwan/driver/quectel_cm_5G/src/MPQMUX.c b/package/wwan/driver/quectel_cm_5G/src/MPQMUX.c new file mode 100644 index 000000000..7c8dfb38e --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/MPQMUX.c @@ -0,0 +1,477 @@ +/****************************************************************************** + @file MPQMUX.c + @brief QMI mux. + + DESCRIPTION + Connectivity Management Tool for USB network adapter of Quectel wireless cellular modules. + + INITIALIZATION AND SEQUENCING REQUIREMENTS + None. + + --------------------------------------------------------------------------- + Copyright (c) 2016 - 2020 Quectel Wireless Solution, Co., Ltd. All Rights Reserved. + Quectel Wireless Solution Proprietary and Confidential. + --------------------------------------------------------------------------- +******************************************************************************/ + +#include "QMIThread.h" +static char line[1024]; +static pthread_mutex_t dumpQMIMutex = PTHREAD_MUTEX_INITIALIZER; +#undef dbg +#define dbg( format, arg... ) do {if (strlen(line) < sizeof(line)) snprintf(&line[strlen(line)], sizeof(line) - strlen(line), format, ## arg);} while (0) + +PQMI_TLV_HDR GetTLV (PQCQMUX_MSG_HDR pQMUXMsgHdr, int TLVType); + +typedef struct { + UINT type; + const char *name; +} QMI_NAME_T; + +#define qmi_name_item(type) {type, #type} + +#if 0 +static const QMI_NAME_T qmi_IFType[] = { +{USB_CTL_MSG_TYPE_QMI, "USB_CTL_MSG_TYPE_QMI"}, +}; + +static const QMI_NAME_T qmi_CtlFlags[] = { +qmi_name_item(QMICTL_CTL_FLAG_CMD), +qmi_name_item(QCQMI_CTL_FLAG_SERVICE), +}; + +static const QMI_NAME_T qmi_QMIType[] = { +qmi_name_item(QMUX_TYPE_CTL), +qmi_name_item(QMUX_TYPE_WDS), +qmi_name_item(QMUX_TYPE_DMS), +qmi_name_item(QMUX_TYPE_NAS), +qmi_name_item(QMUX_TYPE_QOS), +qmi_name_item(QMUX_TYPE_WMS), +qmi_name_item(QMUX_TYPE_PDS), +qmi_name_item(QMUX_TYPE_WDS_ADMIN), +qmi_name_item(QMUX_TYPE_COEX), +}; + +static const QMI_NAME_T qmi_ctl_CtlFlags[] = { +qmi_name_item(QMICTL_FLAG_REQUEST), +qmi_name_item(QMICTL_FLAG_RESPONSE), +qmi_name_item(QMICTL_FLAG_INDICATION), +}; +#endif + +static const QMI_NAME_T qmux_ctl_QMICTLType[] = { +// QMICTL Type +qmi_name_item(QMICTL_SET_INSTANCE_ID_REQ), // 0x0020 +qmi_name_item(QMICTL_SET_INSTANCE_ID_RESP), // 0x0020 +qmi_name_item(QMICTL_GET_VERSION_REQ), // 0x0021 +qmi_name_item(QMICTL_GET_VERSION_RESP), // 0x0021 +qmi_name_item(QMICTL_GET_CLIENT_ID_REQ), // 0x0022 +qmi_name_item(QMICTL_GET_CLIENT_ID_RESP), // 0x0022 +qmi_name_item(QMICTL_RELEASE_CLIENT_ID_REQ), // 0x0023 +qmi_name_item(QMICTL_RELEASE_CLIENT_ID_RESP), // 0x0023 +qmi_name_item(QMICTL_REVOKE_CLIENT_ID_IND), // 0x0024 +qmi_name_item(QMICTL_INVALID_CLIENT_ID_IND), // 0x0025 +qmi_name_item(QMICTL_SET_DATA_FORMAT_REQ), // 0x0026 +qmi_name_item(QMICTL_SET_DATA_FORMAT_RESP), // 0x0026 +qmi_name_item(QMICTL_SYNC_REQ), // 0x0027 +qmi_name_item(QMICTL_SYNC_RESP), // 0x0027 +qmi_name_item(QMICTL_SYNC_IND), // 0x0027 +}; + +static const QMI_NAME_T qmux_CtlFlags[] = { +qmi_name_item(QMUX_CTL_FLAG_TYPE_CMD), +qmi_name_item(QMUX_CTL_FLAG_TYPE_RSP), +qmi_name_item(QMUX_CTL_FLAG_TYPE_IND), +}; + + +static const QMI_NAME_T qmux_wds_Type[] = { +qmi_name_item(QMIWDS_SET_EVENT_REPORT_REQ), // 0x0001 +qmi_name_item(QMIWDS_SET_EVENT_REPORT_RESP), // 0x0001 +qmi_name_item(QMIWDS_EVENT_REPORT_IND), // 0x0001 +qmi_name_item(QMIWDS_START_NETWORK_INTERFACE_REQ), // 0x0020 +qmi_name_item(QMIWDS_START_NETWORK_INTERFACE_RESP), // 0x0020 +qmi_name_item(QMIWDS_STOP_NETWORK_INTERFACE_REQ), // 0x0021 +qmi_name_item(QMIWDS_STOP_NETWORK_INTERFACE_RESP), // 0x0021 +qmi_name_item(QMIWDS_GET_PKT_SRVC_STATUS_REQ), // 0x0022 +qmi_name_item(QMIWDS_GET_PKT_SRVC_STATUS_RESP), // 0x0022 +qmi_name_item(QMIWDS_GET_PKT_SRVC_STATUS_IND), // 0x0022 +qmi_name_item(QMIWDS_GET_CURRENT_CHANNEL_RATE_REQ), // 0x0023 +qmi_name_item(QMIWDS_GET_CURRENT_CHANNEL_RATE_RESP), // 0x0023 +qmi_name_item(QMIWDS_GET_PKT_STATISTICS_REQ), // 0x0024 +qmi_name_item(QMIWDS_GET_PKT_STATISTICS_RESP), // 0x0024 +qmi_name_item(QMIWDS_MODIFY_PROFILE_SETTINGS_REQ), // 0x0028 +qmi_name_item(QMIWDS_MODIFY_PROFILE_SETTINGS_RESP), // 0x0028 +qmi_name_item(QMIWDS_GET_PROFILE_SETTINGS_REQ), // 0x002B +qmi_name_item(QMIWDS_GET_PROFILE_SETTINGS_RESP), // 0x002BD +qmi_name_item(QMIWDS_GET_DEFAULT_SETTINGS_REQ), // 0x002C +qmi_name_item(QMIWDS_GET_DEFAULT_SETTINGS_RESP), // 0x002C +qmi_name_item(QMIWDS_GET_RUNTIME_SETTINGS_REQ), // 0x002D +qmi_name_item(QMIWDS_GET_RUNTIME_SETTINGS_RESP), // 0x002D +qmi_name_item(QMIWDS_GET_MIP_MODE_REQ), // 0x002F +qmi_name_item(QMIWDS_GET_MIP_MODE_RESP), // 0x002F +qmi_name_item(QMIWDS_GET_DATA_BEARER_REQ), // 0x0037 +qmi_name_item(QMIWDS_GET_DATA_BEARER_RESP), // 0x0037 +qmi_name_item(QMIWDS_DUN_CALL_INFO_REQ), // 0x0038 +qmi_name_item(QMIWDS_DUN_CALL_INFO_RESP), // 0x0038 +qmi_name_item(QMIWDS_DUN_CALL_INFO_IND), // 0x0038 +qmi_name_item(QMIWDS_SET_CLIENT_IP_FAMILY_PREF_REQ), // 0x004D +qmi_name_item(QMIWDS_SET_CLIENT_IP_FAMILY_PREF_RESP), // 0x004D +qmi_name_item(QMIWDS_SET_AUTO_CONNECT_REQ), // 0x0051 +qmi_name_item(QMIWDS_SET_AUTO_CONNECT_RESP), // 0x0051 +qmi_name_item(QMIWDS_BIND_MUX_DATA_PORT_REQ), // 0x00A2 +qmi_name_item(QMIWDS_BIND_MUX_DATA_PORT_RESP), // 0x00A2 +}; + +static const QMI_NAME_T qmux_dms_Type[] = { +// ======================= DMS ============================== +qmi_name_item(QMIDMS_SET_EVENT_REPORT_REQ), // 0x0001 +qmi_name_item(QMIDMS_SET_EVENT_REPORT_RESP), // 0x0001 +qmi_name_item(QMIDMS_EVENT_REPORT_IND), // 0x0001 +qmi_name_item(QMIDMS_GET_DEVICE_CAP_REQ), // 0x0020 +qmi_name_item(QMIDMS_GET_DEVICE_CAP_RESP), // 0x0020 +qmi_name_item(QMIDMS_GET_DEVICE_MFR_REQ), // 0x0021 +qmi_name_item(QMIDMS_GET_DEVICE_MFR_RESP), // 0x0021 +qmi_name_item(QMIDMS_GET_DEVICE_MODEL_ID_REQ), // 0x0022 +qmi_name_item(QMIDMS_GET_DEVICE_MODEL_ID_RESP), // 0x0022 +qmi_name_item(QMIDMS_GET_DEVICE_REV_ID_REQ), // 0x0023 +qmi_name_item(QMIDMS_GET_DEVICE_REV_ID_RESP), // 0x0023 +qmi_name_item(QMIDMS_GET_MSISDN_REQ), // 0x0024 +qmi_name_item(QMIDMS_GET_MSISDN_RESP), // 0x0024 +qmi_name_item(QMIDMS_GET_DEVICE_SERIAL_NUMBERS_REQ), // 0x0025 +qmi_name_item(QMIDMS_GET_DEVICE_SERIAL_NUMBERS_RESP), // 0x0025 +qmi_name_item(QMIDMS_UIM_SET_PIN_PROTECTION_REQ), // 0x0027 +qmi_name_item(QMIDMS_UIM_SET_PIN_PROTECTION_RESP), // 0x0027 +qmi_name_item(QMIDMS_UIM_VERIFY_PIN_REQ), // 0x0028 +qmi_name_item(QMIDMS_UIM_VERIFY_PIN_RESP), // 0x0028 +qmi_name_item(QMIDMS_UIM_UNBLOCK_PIN_REQ), // 0x0029 +qmi_name_item(QMIDMS_UIM_UNBLOCK_PIN_RESP), // 0x0029 +qmi_name_item(QMIDMS_UIM_CHANGE_PIN_REQ), // 0x002A +qmi_name_item(QMIDMS_UIM_CHANGE_PIN_RESP), // 0x002A +qmi_name_item(QMIDMS_UIM_GET_PIN_STATUS_REQ), // 0x002B +qmi_name_item(QMIDMS_UIM_GET_PIN_STATUS_RESP), // 0x002B +qmi_name_item(QMIDMS_GET_DEVICE_HARDWARE_REV_REQ), // 0x002C +qmi_name_item(QMIDMS_GET_DEVICE_HARDWARE_REV_RESP), // 0x002C +qmi_name_item(QMIDMS_GET_OPERATING_MODE_REQ), // 0x002D +qmi_name_item(QMIDMS_GET_OPERATING_MODE_RESP), // 0x002D +qmi_name_item(QMIDMS_SET_OPERATING_MODE_REQ), // 0x002E +qmi_name_item(QMIDMS_SET_OPERATING_MODE_RESP), // 0x002E +qmi_name_item(QMIDMS_GET_ACTIVATED_STATUS_REQ), // 0x0031 +qmi_name_item(QMIDMS_GET_ACTIVATED_STATUS_RESP), // 0x0031 +qmi_name_item(QMIDMS_ACTIVATE_AUTOMATIC_REQ), // 0x0032 +qmi_name_item(QMIDMS_ACTIVATE_AUTOMATIC_RESP), // 0x0032 +qmi_name_item(QMIDMS_ACTIVATE_MANUAL_REQ), // 0x0033 +qmi_name_item(QMIDMS_ACTIVATE_MANUAL_RESP), // 0x0033 +qmi_name_item(QMIDMS_UIM_GET_ICCID_REQ), // 0x003C +qmi_name_item(QMIDMS_UIM_GET_ICCID_RESP), // 0x003C +qmi_name_item(QMIDMS_UIM_GET_CK_STATUS_REQ), // 0x0040 +qmi_name_item(QMIDMS_UIM_GET_CK_STATUS_RESP), // 0x0040 +qmi_name_item(QMIDMS_UIM_SET_CK_PROTECTION_REQ), // 0x0041 +qmi_name_item(QMIDMS_UIM_SET_CK_PROTECTION_RESP), // 0x0041 +qmi_name_item(QMIDMS_UIM_UNBLOCK_CK_REQ), // 0x0042 +qmi_name_item(QMIDMS_UIM_UNBLOCK_CK_RESP), // 0x0042 +qmi_name_item(QMIDMS_UIM_GET_IMSI_REQ), // 0x0043 +qmi_name_item(QMIDMS_UIM_GET_IMSI_RESP), // 0x0043 +qmi_name_item(QMIDMS_UIM_GET_STATE_REQ), // 0x0044 +qmi_name_item(QMIDMS_UIM_GET_STATE_RESP), // 0x0044 +qmi_name_item(QMIDMS_GET_BAND_CAP_REQ), // 0x0045 +qmi_name_item(QMIDMS_GET_BAND_CAP_RESP), // 0x0045 +}; + +static const QMI_NAME_T qmux_qos_Type[] = { +qmi_name_item( QMI_QOS_SET_EVENT_REPORT_REQ), // 0x0001 +qmi_name_item( QMI_QOS_SET_EVENT_REPORT_RESP), // 0x0001 +qmi_name_item( QMI_QOS_SET_EVENT_REPORT_IND), // 0x0001 +qmi_name_item( QMI_QOS_BIND_DATA_PORT_REQ), // 0x002B +qmi_name_item( QMI_QOS_BIND_DATA_PORT_RESP), // 0x002B +qmi_name_item( QMI_QOS_INDICATION_REGISTER_REQ), // 0x002F +qmi_name_item( QMI_QOS_INDICATION_REGISTER_RESP), // 0x002F +qmi_name_item( QMI_QOS_GLOBAL_QOS_FLOW_IND), // 0x0031 +qmi_name_item( QMI_QOS_GET_QOS_INFO_REQ), // 0x0033 +qmi_name_item( QMI_QOS_GET_QOS_INFO_RESP), // 0x0033 +}; + +static const QMI_NAME_T qmux_nas_Type[] = { +// ======================= NAS ============================== +qmi_name_item(QMINAS_SET_EVENT_REPORT_REQ), // 0x0002 +qmi_name_item(QMINAS_SET_EVENT_REPORT_RESP), // 0x0002 +qmi_name_item(QMINAS_EVENT_REPORT_IND), // 0x0002 +qmi_name_item(QMINAS_GET_SIGNAL_STRENGTH_REQ), // 0x0020 +qmi_name_item(QMINAS_GET_SIGNAL_STRENGTH_RESP), // 0x0020 +qmi_name_item(QMINAS_PERFORM_NETWORK_SCAN_REQ), // 0x0021 +qmi_name_item(QMINAS_PERFORM_NETWORK_SCAN_RESP), // 0x0021 +qmi_name_item(QMINAS_INITIATE_NW_REGISTER_REQ), // 0x0022 +qmi_name_item(QMINAS_INITIATE_NW_REGISTER_RESP), // 0x0022 +qmi_name_item(QMINAS_INITIATE_ATTACH_REQ), // 0x0023 +qmi_name_item(QMINAS_INITIATE_ATTACH_RESP), // 0x0023 +qmi_name_item(QMINAS_GET_SERVING_SYSTEM_REQ), // 0x0024 +qmi_name_item(QMINAS_GET_SERVING_SYSTEM_RESP), // 0x0024 +qmi_name_item(QMINAS_SERVING_SYSTEM_IND), // 0x0024 +qmi_name_item(QMINAS_GET_HOME_NETWORK_REQ), // 0x0025 +qmi_name_item(QMINAS_GET_HOME_NETWORK_RESP), // 0x0025 +qmi_name_item(QMINAS_GET_PREFERRED_NETWORK_REQ), // 0x0026 +qmi_name_item(QMINAS_GET_PREFERRED_NETWORK_RESP), // 0x0026 +qmi_name_item(QMINAS_SET_PREFERRED_NETWORK_REQ), // 0x0027 +qmi_name_item(QMINAS_SET_PREFERRED_NETWORK_RESP), // 0x0027 +qmi_name_item(QMINAS_GET_FORBIDDEN_NETWORK_REQ), // 0x0028 +qmi_name_item(QMINAS_GET_FORBIDDEN_NETWORK_RESP), // 0x0028 +qmi_name_item(QMINAS_SET_FORBIDDEN_NETWORK_REQ), // 0x0029 +qmi_name_item(QMINAS_SET_FORBIDDEN_NETWORK_RESP), // 0x0029 +qmi_name_item(QMINAS_SET_TECHNOLOGY_PREF_REQ), // 0x002A +qmi_name_item(QMINAS_SET_TECHNOLOGY_PREF_RESP), // 0x002A +qmi_name_item(QMINAS_GET_RF_BAND_INFO_REQ), // 0x0031 +qmi_name_item(QMINAS_GET_RF_BAND_INFO_RESP), // 0x0031 +qmi_name_item(QMINAS_GET_CELL_LOCATION_INFO_REQ), +qmi_name_item(QMINAS_GET_CELL_LOCATION_INFO_RESP), +qmi_name_item(QMINAS_GET_PLMN_NAME_REQ), // 0x0044 +qmi_name_item(QMINAS_GET_PLMN_NAME_RESP), // 0x0044 +qmi_name_item(QUECTEL_PACKET_TRANSFER_START_IND), // 0X100 +qmi_name_item(QUECTEL_PACKET_TRANSFER_END_IND), // 0X101 +qmi_name_item(QMINAS_GET_SYS_INFO_REQ), // 0x004D +qmi_name_item(QMINAS_GET_SYS_INFO_RESP), // 0x004D +qmi_name_item(QMINAS_SYS_INFO_IND), // 0x004D +qmi_name_item(QMINAS_GET_SIG_INFO_REQ), +qmi_name_item(QMINAS_GET_SIG_INFO_RESP), + +}; + +static const QMI_NAME_T qmux_wms_Type[] = { +// ======================= WMS ============================== +qmi_name_item(QMIWMS_SET_EVENT_REPORT_REQ), // 0x0001 +qmi_name_item(QMIWMS_SET_EVENT_REPORT_RESP), // 0x0001 +qmi_name_item(QMIWMS_EVENT_REPORT_IND), // 0x0001 +qmi_name_item(QMIWMS_RAW_SEND_REQ), // 0x0020 +qmi_name_item(QMIWMS_RAW_SEND_RESP), // 0x0020 +qmi_name_item(QMIWMS_RAW_WRITE_REQ), // 0x0021 +qmi_name_item(QMIWMS_RAW_WRITE_RESP), // 0x0021 +qmi_name_item(QMIWMS_RAW_READ_REQ), // 0x0022 +qmi_name_item(QMIWMS_RAW_READ_RESP), // 0x0022 +qmi_name_item(QMIWMS_MODIFY_TAG_REQ), // 0x0023 +qmi_name_item(QMIWMS_MODIFY_TAG_RESP), // 0x0023 +qmi_name_item(QMIWMS_DELETE_REQ), // 0x0024 +qmi_name_item(QMIWMS_DELETE_RESP), // 0x0024 +qmi_name_item(QMIWMS_GET_MESSAGE_PROTOCOL_REQ), // 0x0030 +qmi_name_item(QMIWMS_GET_MESSAGE_PROTOCOL_RESP), // 0x0030 +qmi_name_item(QMIWMS_LIST_MESSAGES_REQ), // 0x0031 +qmi_name_item(QMIWMS_LIST_MESSAGES_RESP), // 0x0031 +qmi_name_item(QMIWMS_GET_SMSC_ADDRESS_REQ), // 0x0034 +qmi_name_item(QMIWMS_GET_SMSC_ADDRESS_RESP), // 0x0034 +qmi_name_item(QMIWMS_SET_SMSC_ADDRESS_REQ), // 0x0035 +qmi_name_item(QMIWMS_SET_SMSC_ADDRESS_RESP), // 0x0035 +qmi_name_item(QMIWMS_GET_STORE_MAX_SIZE_REQ), // 0x0036 +qmi_name_item(QMIWMS_GET_STORE_MAX_SIZE_RESP), // 0x0036 +}; + +static const QMI_NAME_T qmux_wds_admin_Type[] = { +qmi_name_item(QMIWDS_ADMIN_SET_DATA_FORMAT_REQ), // 0x0020 +qmi_name_item(QMIWDS_ADMIN_SET_DATA_FORMAT_RESP), // 0x0020 +qmi_name_item(QMIWDS_ADMIN_GET_DATA_FORMAT_REQ), // 0x0021 +qmi_name_item(QMIWDS_ADMIN_GET_DATA_FORMAT_RESP), // 0x0021 +qmi_name_item(QMIWDS_ADMIN_SET_QMAP_SETTINGS_REQ), // 0x002B +qmi_name_item(QMIWDS_ADMIN_SET_QMAP_SETTINGS_RESP), // 0x002B +qmi_name_item(QMIWDS_ADMIN_GET_QMAP_SETTINGS_REQ), // 0x002C +qmi_name_item(QMIWDS_ADMIN_GET_QMAP_SETTINGS_RESP), // 0x002C +qmi_name_item(QMI_WDA_SET_LOOPBACK_CONFIG_REQ), // 0x002F +qmi_name_item(QMI_WDA_SET_LOOPBACK_CONFIG_RESP), // 0x002F +qmi_name_item(QMI_WDA_SET_LOOPBACK_CONFIG_IND), // 0x002F +}; + +static const QMI_NAME_T qmux_uim_Type[] = { +qmi_name_item( QMIUIM_READ_TRANSPARENT_REQ), // 0x0020 +qmi_name_item( QMIUIM_READ_TRANSPARENT_RESP), // 0x0020 +qmi_name_item( QMIUIM_READ_TRANSPARENT_IND), // 0x0020 +qmi_name_item( QMIUIM_READ_RECORD_REQ), // 0x0021 +qmi_name_item( QMIUIM_READ_RECORD_RESP), // 0x0021 +qmi_name_item( QMIUIM_READ_RECORD_IND), // 0x0021 +qmi_name_item( QMIUIM_WRITE_TRANSPARENT_REQ), // 0x0022 +qmi_name_item( QMIUIM_WRITE_TRANSPARENT_RESP), // 0x0022 +qmi_name_item( QMIUIM_WRITE_TRANSPARENT_IND), // 0x0022 +qmi_name_item( QMIUIM_WRITE_RECORD_REQ), // 0x0023 +qmi_name_item( QMIUIM_WRITE_RECORD_RESP), // 0x0023 +qmi_name_item( QMIUIM_WRITE_RECORD_IND), // 0x0023 +qmi_name_item( QMIUIM_SET_PIN_PROTECTION_REQ), // 0x0025 +qmi_name_item( QMIUIM_SET_PIN_PROTECTION_RESP), // 0x0025 +qmi_name_item( QMIUIM_SET_PIN_PROTECTION_IND), // 0x0025 +qmi_name_item( QMIUIM_VERIFY_PIN_REQ), // 0x0026 +qmi_name_item( QMIUIM_VERIFY_PIN_RESP), // 0x0026 +qmi_name_item( QMIUIM_VERIFY_PIN_IND), // 0x0026 +qmi_name_item( QMIUIM_UNBLOCK_PIN_REQ), // 0x0027 +qmi_name_item( QMIUIM_UNBLOCK_PIN_RESP), // 0x0027 +qmi_name_item( QMIUIM_UNBLOCK_PIN_IND), // 0x0027 +qmi_name_item( QMIUIM_CHANGE_PIN_REQ), // 0x0028 +qmi_name_item( QMIUIM_CHANGE_PIN_RESP), // 0x0028 +qmi_name_item( QMIUIM_CHANGE_PIN_IND), // 0x0028 +qmi_name_item( QMIUIM_DEPERSONALIZATION_REQ), // 0x0029 +qmi_name_item( QMIUIM_DEPERSONALIZATION_RESP), // 0x0029 +qmi_name_item( QMIUIM_EVENT_REG_REQ), // 0x002E +qmi_name_item( QMIUIM_EVENT_REG_RESP), // 0x002E +qmi_name_item( QMIUIM_GET_CARD_STATUS_REQ), // 0x002F +qmi_name_item( QMIUIM_GET_CARD_STATUS_RESP), // 0x002F +qmi_name_item( QMIUIM_STATUS_CHANGE_IND), // 0x0032 +}; + +static const QMI_NAME_T qmux_coex_Type[] = { +qmi_name_item(QMI_COEX_GET_WWAN_STATE_REQ), // 0x0022 +qmi_name_item(QMI_COEX_GET_WWAN_STATE_RESP), // 0x0022 +}; + +static const char * qmi_name_get(const QMI_NAME_T *table, size_t size, int type, const char *tag) { + static char unknow[40]; + size_t i; + + if (qmux_CtlFlags == table) { + if (!strcmp(tag, "_REQ")) + tag = "_CMD"; + else if (!strcmp(tag, "_RESP")) + tag = "_RSP"; + } + + for (i = 0; i < size; i++) { + if (table[i].type == (UINT)type) { + if (!tag || (strstr(table[i].name, tag))) + return table[i].name; + } + } + sprintf(unknow, "unknow_%x", type); + return unknow; +} + +#define QMI_NAME(table, type) qmi_name_get(table, sizeof(table) / sizeof(table[0]), type, 0) +#define QMUX_NAME(table, type, tag) qmi_name_get(table, sizeof(table) / sizeof(table[0]), type, tag) + +void dump_tlv(PQCQMUX_MSG_HDR pQMUXMsgHdr) { + int TLVFind = 0; + int i; + //dbg("QCQMUX_TLV-----------------------------------\n"); + //dbg("{Type,\tLength,\tValue}\n"); + + while (1) { + PQMI_TLV_HDR TLVHdr = GetTLV(pQMUXMsgHdr, 0x1000 + (++TLVFind)); + if (TLVHdr == NULL) + break; + + //if ((TLVHdr->TLVType == 0x02) && ((USHORT *)(TLVHdr+1))[0]) + { + dbg("{%02x,\t%04x,\t", TLVHdr->TLVType, le16_to_cpu(TLVHdr->TLVLength)); + for (i = 0; i < le16_to_cpu(TLVHdr->TLVLength); i++) { + dbg("%02x ", ((UCHAR *)(TLVHdr+1))[i]); + } + dbg("}\n"); + } + } // while +} + +void dump_ctl(PQCQMICTL_MSG_HDR CTLHdr) { + const char *tag; + + //dbg("QCQMICTL_MSG--------------------------------------------\n"); + //dbg("CtlFlags: %02x\t\t%s\n", CTLHdr->CtlFlags, QMI_NAME(qmi_ctl_CtlFlags, CTLHdr->CtlFlags)); + dbg("TransactionId: %02x\n", CTLHdr->TransactionId); + switch (CTLHdr->CtlFlags) { + case QMICTL_FLAG_REQUEST: tag = "_REQ"; break; + case QMICTL_FLAG_RESPONSE: tag = "_RESP"; break; + case QMICTL_FLAG_INDICATION: tag = "_IND"; break; + default: tag = 0; break; + } + dbg("QMICTLType: %04x\t%s\n", le16_to_cpu(CTLHdr->QMICTLType), + QMUX_NAME(qmux_ctl_QMICTLType, le16_to_cpu(CTLHdr->QMICTLType), tag)); + dbg("Length: %04x\n", le16_to_cpu(CTLHdr->Length)); + + dump_tlv((PQCQMUX_MSG_HDR)(&CTLHdr->QMICTLType)); +} + +int dump_qmux(QMI_SERVICE_TYPE serviceType, PQCQMUX_HDR QMUXHdr) { + PQCQMUX_MSG_HDR QMUXMsgHdr = (PQCQMUX_MSG_HDR) (QMUXHdr + 1); + const char *tag; + + //dbg("QCQMUX--------------------------------------------\n"); + switch (QMUXHdr->CtlFlags&QMUX_CTL_FLAG_MASK_TYPE) { + case QMUX_CTL_FLAG_TYPE_CMD: tag = "_REQ"; break; + case QMUX_CTL_FLAG_TYPE_RSP: tag = "_RESP"; break; + case QMUX_CTL_FLAG_TYPE_IND: tag = "_IND"; break; + default: tag = 0; break; + } + //dbg("CtlFlags: %02x\t\t%s\n", QMUXHdr->CtlFlags, QMUX_NAME(qmux_CtlFlags, QMUXHdr->CtlFlags, tag)); + dbg("TransactionId: %04x\n", le16_to_cpu(QMUXHdr->TransactionId)); + + //dbg("QCQMUX_MSG_HDR-----------------------------------\n"); + switch (serviceType) { + case QMUX_TYPE_DMS: + dbg("Type: %04x\t%s\n", le16_to_cpu(QMUXMsgHdr->Type), + QMUX_NAME(qmux_dms_Type, le16_to_cpu(QMUXMsgHdr->Type), tag)); + break; + case QMUX_TYPE_NAS: + dbg("Type: %04x\t%s\n", le16_to_cpu(QMUXMsgHdr->Type), + QMUX_NAME(qmux_nas_Type, le16_to_cpu(QMUXMsgHdr->Type), tag)); + break; + case QMUX_TYPE_WDS: + case QMUX_TYPE_WDS_IPV6: + dbg("Type: %04x\t%s\n", le16_to_cpu(QMUXMsgHdr->Type), + QMUX_NAME(qmux_wds_Type, le16_to_cpu(QMUXMsgHdr->Type), tag)); + break; + case QMUX_TYPE_WMS: + dbg("Type: %04x\t%s\n", le16_to_cpu(QMUXMsgHdr->Type), + QMUX_NAME(qmux_wms_Type, le16_to_cpu(QMUXMsgHdr->Type), tag)); + break; + case QMUX_TYPE_WDS_ADMIN: + dbg("Type: %04x\t%s\n", le16_to_cpu(QMUXMsgHdr->Type), + QMUX_NAME(qmux_wds_admin_Type, le16_to_cpu(QMUXMsgHdr->Type), tag)); + break; + case QMUX_TYPE_UIM: + dbg("Type: %04x\t%s\n", le16_to_cpu(QMUXMsgHdr->Type), + QMUX_NAME(qmux_uim_Type, le16_to_cpu(QMUXMsgHdr->Type), tag)); + break; + case QMUX_TYPE_PDS: + case QMUX_TYPE_QOS: + dbg("Type: %04x\t%s\n", le16_to_cpu(QMUXMsgHdr->Type), + QMUX_NAME(qmux_qos_Type, le16_to_cpu(QMUXMsgHdr->Type), tag)); + break; + case QMUX_TYPE_COEX: + dbg("Type: %04x\t%s\n", le16_to_cpu(QMUXMsgHdr->Type), + QMUX_NAME(qmux_coex_Type, le16_to_cpu(QMUXMsgHdr->Type), tag)); + break; + case QMUX_TYPE_CTL: + default: + dbg("Type: %04x\t%s\n", le16_to_cpu(QMUXMsgHdr->Type), "PDS/QOS/CTL/unknown!"); + break; + } + dbg("Length: %04x\n", le16_to_cpu(QMUXMsgHdr->Length)); + + dump_tlv(QMUXMsgHdr); + + return 0; +} + +void dump_qmi(void *dataBuffer, int dataLen) +{ + PQCQMI_HDR QMIHdr = (PQCQMI_HDR)dataBuffer; + PQCQMUX_HDR QMUXHdr = (PQCQMUX_HDR) (QMIHdr + 1); + PQCQMICTL_MSG_HDR CTLHdr = (PQCQMICTL_MSG_HDR) (QMIHdr + 1); + + int i; + + if (!debug_qmi) + return; + + pthread_mutex_lock(&dumpQMIMutex); + line[0] = 0; + for (i = 0; i < dataLen; i++) { + dbg("%02x ", ((unsigned char *)dataBuffer)[i]); + } + dbg_time("%s", line); + line[0] = 0; + + //dbg("QCQMI_HDR-----------------------------------------"); + //dbg("IFType: %02x\t\t%s", QMIHdr->IFType, QMI_NAME(qmi_IFType, QMIHdr->IFType)); + //dbg("Length: %04x", le16_to_cpu(QMIHdr->Length)); + //dbg("CtlFlags: %02x\t\t%s", QMIHdr->CtlFlags, QMI_NAME(qmi_CtlFlags, QMIHdr->CtlFlags)); + //dbg("QMIType: %02x\t\t%s", QMIHdr->QMIType, QMI_NAME(qmi_QMIType, QMIHdr->QMIType)); + //dbg("ClientId: %02x", QMIHdr->ClientId); + + if (QMIHdr->QMIType == QMUX_TYPE_CTL) { + dump_ctl(CTLHdr); + } else { + dump_qmux(QMIHdr->QMIType, QMUXHdr); + } + dbg_time("%s", line); + pthread_mutex_unlock(&dumpQMIMutex); +} diff --git a/package/wwan/driver/quectel_cm_5G/src/MPQMUX.h b/package/wwan/driver/quectel_cm_5G/src/MPQMUX.h new file mode 100644 index 000000000..32dd644f5 --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/MPQMUX.h @@ -0,0 +1,4273 @@ +/*=========================================================================== + + M P Q M U X. H +DESCRIPTION: + + This file provides support for QMUX. + +INITIALIZATION AND SEQUENCING REQUIREMENTS: + +Copyright (C) 2011 by Qualcomm Technologies, Incorporated. All Rights Reserved. +===========================================================================*/ + +#ifndef MPQMUX_H +#define MPQMUX_H + +#include "MPQMI.h" + +#pragma pack(push, 1) + +#define QMIWDS_SET_EVENT_REPORT_REQ 0x0001 +#define QMIWDS_SET_EVENT_REPORT_RESP 0x0001 +#define QMIWDS_EVENT_REPORT_IND 0x0001 +#define QMIWDS_START_NETWORK_INTERFACE_REQ 0x0020 +#define QMIWDS_START_NETWORK_INTERFACE_RESP 0x0020 +#define QMIWDS_STOP_NETWORK_INTERFACE_REQ 0x0021 +#define QMIWDS_STOP_NETWORK_INTERFACE_RESP 0x0021 +#define QMIWDS_GET_PKT_SRVC_STATUS_REQ 0x0022 +#define QMIWDS_GET_PKT_SRVC_STATUS_RESP 0x0022 +#define QMIWDS_GET_PKT_SRVC_STATUS_IND 0x0022 +#define QMIWDS_GET_CURRENT_CHANNEL_RATE_REQ 0x0023 +#define QMIWDS_GET_CURRENT_CHANNEL_RATE_RESP 0x0023 +#define QMIWDS_GET_PKT_STATISTICS_REQ 0x0024 +#define QMIWDS_GET_PKT_STATISTICS_RESP 0x0024 +#define QMIWDS_CREATE_PROFILE_REQ 0x0027 +#define QMIWDS_CREATE_PROFILE_RESP 0x0027 +#define QMIWDS_MODIFY_PROFILE_SETTINGS_REQ 0x0028 +#define QMIWDS_MODIFY_PROFILE_SETTINGS_RESP 0x0028 +#define QMIWDS_GET_PROFILE_SETTINGS_REQ 0x002B +#define QMIWDS_GET_PROFILE_SETTINGS_RESP 0x002B +#define QMIWDS_GET_DEFAULT_SETTINGS_REQ 0x002C +#define QMIWDS_GET_DEFAULT_SETTINGS_RESP 0x002C +#define QMIWDS_GET_RUNTIME_SETTINGS_REQ 0x002D +#define QMIWDS_GET_RUNTIME_SETTINGS_RESP 0x002D +#define QMIWDS_GET_MIP_MODE_REQ 0x002F +#define QMIWDS_GET_MIP_MODE_RESP 0x002F +#define QMIWDS_GET_DATA_BEARER_REQ 0x0037 +#define QMIWDS_GET_DATA_BEARER_RESP 0x0037 +#define QMIWDS_DUN_CALL_INFO_REQ 0x0038 +#define QMIWDS_DUN_CALL_INFO_RESP 0x0038 +#define QMIWDS_DUN_CALL_INFO_IND 0x0038 +#define QMIWDS_SET_CLIENT_IP_FAMILY_PREF_REQ 0x004D +#define QMIWDS_SET_CLIENT_IP_FAMILY_PREF_RESP 0x004D +#define QMIWDS_SET_AUTO_CONNECT_REQ 0x0051 +#define QMIWDS_SET_AUTO_CONNECT_RESP 0x0051 +#define QMIWDS_BIND_MUX_DATA_PORT_REQ 0x00A2 +#define QMIWDS_BIND_MUX_DATA_PORT_RESP 0x00A2 + + +// Stats masks +#define QWDS_STAT_MASK_TX_PKT_OK 0x00000001 +#define QWDS_STAT_MASK_RX_PKT_OK 0x00000002 +#define QWDS_STAT_MASK_TX_PKT_ER 0x00000004 +#define QWDS_STAT_MASK_RX_PKT_ER 0x00000008 +#define QWDS_STAT_MASK_TX_PKT_OF 0x00000010 +#define QWDS_STAT_MASK_RX_PKT_OF 0x00000020 + +// TLV Types for xfer statistics +#define TLV_WDS_TX_GOOD_PKTS 0x10 +#define TLV_WDS_RX_GOOD_PKTS 0x11 +#define TLV_WDS_TX_ERROR 0x12 +#define TLV_WDS_RX_ERROR 0x13 +#define TLV_WDS_TX_OVERFLOW 0x14 +#define TLV_WDS_RX_OVERFLOW 0x15 +#define TLV_WDS_CHANNEL_RATE 0x16 +#define TLV_WDS_DATA_BEARER 0x17 +#define TLV_WDS_DORMANCY_STATUS 0x18 + +#define QWDS_PKT_DATA_UNKNOW 0x00 +#define QWDS_PKT_DATA_DISCONNECTED 0x01 +#define QWDS_PKT_DATA_CONNECTED 0x02 +#define QWDS_PKT_DATA_SUSPENDED 0x03 +#define QWDS_PKT_DATA_AUTHENTICATING 0x04 + +#define QMIWDS_ADMIN_SET_DATA_FORMAT_REQ 0x0020 +#define QMIWDS_ADMIN_SET_DATA_FORMAT_RESP 0x0020 +#define QMIWDS_ADMIN_GET_DATA_FORMAT_REQ 0x0021 +#define QMIWDS_ADMIN_GET_DATA_FORMAT_RESP 0x0021 +#define QMIWDS_ADMIN_SET_QMAP_SETTINGS_REQ 0x002B +#define QMIWDS_ADMIN_SET_QMAP_SETTINGS_RESP 0x002B +#define QMIWDS_ADMIN_GET_QMAP_SETTINGS_REQ 0x002C +#define QMIWDS_ADMIN_GET_QMAP_SETTINGS_RESP 0x002C +#define QMI_WDA_SET_LOOPBACK_CONFIG_REQ 0x002F +#define QMI_WDA_SET_LOOPBACK_CONFIG_RESP 0x002F +#define QMI_WDA_SET_LOOPBACK_CONFIG_IND 0x002F + +#define NETWORK_DESC_ENCODING_OCTET 0x00 +#define NETWORK_DESC_ENCODING_EXTPROTOCOL 0x01 +#define NETWORK_DESC_ENCODING_7BITASCII 0x02 +#define NETWORK_DESC_ENCODING_IA5 0x03 +#define NETWORK_DESC_ENCODING_UNICODE 0x04 +#define NETWORK_DESC_ENCODING_SHIFTJIS 0x05 +#define NETWORK_DESC_ENCODING_KOREAN 0x06 +#define NETWORK_DESC_ENCODING_LATINH 0x07 +#define NETWORK_DESC_ENCODING_LATIN 0x08 +#define NETWORK_DESC_ENCODING_GSM7BIT 0x09 +#define NETWORK_DESC_ENCODING_GSMDATA 0x0A +#define NETWORK_DESC_ENCODING_UNKNOWN 0xFF + +typedef struct _QMIWDS_ADMIN_SET_DATA_FORMAT +{ + USHORT Type; // QMUX type 0x0000 + USHORT Length; +} __attribute__ ((packed)) QMIWDS_ADMIN_SET_DATA_FORMAT, *PQMIWDS_ADMIN_SET_DATA_FORMAT; + +typedef struct _QMIWDS_ADMIN_SET_DATA_FORMAT_TLV_QOS +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR QOSSetting; +} __attribute__ ((packed)) QMIWDS_ADMIN_SET_DATA_FORMAT_TLV_QOS, *PQMIWDS_ADMIN_SET_DATA_FORMAT_TLV_QOS; + +typedef struct _QMIWDS_ADMIN_SET_DATA_FORMAT_TLV +{ + UCHAR TLVType; + USHORT TLVLength; + ULONG Value; +} __attribute__ ((packed)) QMIWDS_ADMIN_SET_DATA_FORMAT_TLV, *PQMIWDS_ADMIN_SET_DATA_FORMAT_TLV; + +typedef struct _QMIWDS_ENDPOINT_TLV +{ + UCHAR TLVType; + USHORT TLVLength; + ULONG ep_type; + ULONG iface_id; +} __attribute__ ((packed)) QMIWDS_ENDPOINT_TLV, *PQMIWDS_ENDPOINT_TLV; + +typedef struct _QMIWDS_ADMIN_SET_DATA_FORMAT_REQ_MSG +{ + USHORT Type; + USHORT Length; + QMIWDS_ADMIN_SET_DATA_FORMAT_TLV_QOS QosDataFormatTlv; + QMIWDS_ADMIN_SET_DATA_FORMAT_TLV UnderlyingLinkLayerProtocolTlv; + QMIWDS_ADMIN_SET_DATA_FORMAT_TLV UplinkDataAggregationProtocolTlv; + QMIWDS_ADMIN_SET_DATA_FORMAT_TLV DownlinkDataAggregationProtocolTlv; + QMIWDS_ADMIN_SET_DATA_FORMAT_TLV DownlinkDataAggregationMaxDatagramsTlv; + QMIWDS_ADMIN_SET_DATA_FORMAT_TLV DownlinkDataAggregationMaxSizeTlv; + QMIWDS_ENDPOINT_TLV epTlv; +#ifdef QUECTEL_UL_DATA_AGG + QMIWDS_ADMIN_SET_DATA_FORMAT_TLV DlMinimumPassingTlv; + QMIWDS_ADMIN_SET_DATA_FORMAT_TLV UplinkDataAggregationMaxDatagramsTlv; + QMIWDS_ADMIN_SET_DATA_FORMAT_TLV UplinkDataAggregationMaxSizeTlv; +#endif +} __attribute__ ((packed)) QMIWDS_ADMIN_SET_DATA_FORMAT_REQ_MSG, *PQMIWDS_ADMIN_SET_DATA_FORMAT_REQ_MSG; + +typedef struct _QMI_U8_TLV +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR TLVVaule; +} __attribute__ ((packed)) QMI_U8_TLV, *PQMI_U8_TLV; + +typedef struct _QMI_U32_TLV +{ + UCHAR TLVType; + USHORT TLVLength; + ULONG TLVVaule; +} __attribute__ ((packed)) QMI_U32_TLV, *PQMI_U32_TLV; + +typedef struct _QMI_WDA_SET_LOOPBACK_CONFIG_REQ_MSG { + USHORT Type; + USHORT Length; + QMI_U8_TLV loopback_state; //0x01 + QMI_U32_TLV replication_factor; //0x10 +} __attribute__ ((packed)) QMI_WDA_SET_LOOPBACK_CONFIG_REQ_MSG, *PQMI_WDA_SET_LOOPBACK_CONFIG_REQ_MSG; + +typedef struct _QMI_WDA_SET_LOOPBACK_CONFIG_IND_MSG +{ + USHORT Type; + USHORT Length; + QMI_U8_TLV loopback_state; //0x01 + QMI_U32_TLV replication_factor; //0x10 +} __attribute__ ((packed)) QMI_WDA_SET_LOOPBACK_CONFIG_IND_MSG, *PQMI_WDA_SET_LOOPBACK_CONFIG_IND_MSG; + +#if 0 +typedef enum _QMI_RETURN_CODES { + QMI_SUCCESS = 0, + QMI_SUCCESS_NOT_COMPLETE, + QMI_FAILURE +}QMI_RETURN_CODES; + +typedef struct _QMIWDS_GET_PKT_SRVC_STATUS_REQ_MSG +{ + USHORT Type; // 0x0022 + USHORT Length; // 0x0000 +} QMIWDS_GET_PKT_SRVC_STATUS_REQ_MSG, *PQMIWDS_GET_PKT_SRVC_STATUS_REQ_MSG; + +typedef struct _QMIWDS_GET_PKT_SRVC_STATUS_RESP_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + USHORT QMUXResult; + USHORT QMUXError; + UCHAR TLVType2; + USHORT TLVLength2; + UCHAR ConnectionStatus; // 0x01: QWDS_PKT_DATAC_DISCONNECTED + // 0x02: QWDS_PKT_DATA_CONNECTED + // 0x03: QWDS_PKT_DATA_SUSPENDED + // 0x04: QWDS_PKT_DATA_AUTHENTICATING +} QMIWDS_GET_PKT_SRVC_STATUS_RESP_MSG, *PQMIWDS_GET_PKT_SRVC_STATUS_RESP_MSG; + +typedef struct _QMIWDS_GET_PKT_SRVC_STATUS_IND_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + UCHAR ConnectionStatus; // 0x01: QWDS_PKT_DATAC_DISCONNECTED + // 0x02: QWDS_PKT_DATA_CONNECTED + // 0x03: QWDS_PKT_DATA_SUSPENDED + UCHAR ReconfigRequired; // 0x00: No need to reconfigure + // 0x01: Reconfiguration required +} QMIWDS_GET_PKT_SRVC_STATUS_IND_MSG, *PQMIWDS_GET_PKT_SRVC_STATUS_IND_MSG; + +typedef struct _WDS_PKT_SRVC_IP_FAMILY_TLV +{ + UCHAR TLVType; // 0x12 + USHORT TLVLength; // 1 + UCHAR IpFamily; // IPV4-0x04, IPV6-0x06 +} WDS_PKT_SRVC_IP_FAMILY_TLV, *PWDS_PKT_SRVC_IP_FAMILY_TLV; + +typedef struct _QMIWDS_DUN_CALL_INFO_REQ_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + ULONG Mask; + UCHAR TLV2Type; + USHORT TLV2Length; + UCHAR ReportConnectionStatus; +} QMIWDS_DUN_CALL_INFO_REQ_MSG, *PQMIWDS_DUN_CALL_INFO_REQ_MSG; + +typedef struct _QMIWDS_DUN_CALL_INFO_RESP_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + USHORT QMUXResult; + USHORT QMUXError; +} QMIWDS_DUN_CALL_INFO_RESP_MSG, *PQMIWDS_DUN_CALL_INFO_RESP_MSG; + +typedef struct _QMIWDS_DUN_CALL_INFO_IND_MSG +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR ConnectionStatus; +} QMIWDS_DUN_CALL_INFO_IND_MSG, *PQMIWDS_DUN_CALL_INFO_IND_MSG; + +typedef struct _QMIWDS_GET_CURRENT_CHANNEL_RATE_REQ_MSG +{ + USHORT Type; // QMUX type 0x0040 + USHORT Length; +} QMIWDS_GET_CURRENT_CHANNEL_RATE_REQ_MSG, *PQMIWDS_GET_CURRENT_CHANNEL_RATE_REQ_MSG; + +typedef struct _QMIWDS_GET_CURRENT_CHANNEL_RATE_RESP_MSG +{ + USHORT Type; // QMUX type 0x0040 + USHORT Length; + UCHAR TLVType; // 0x02 + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_FAULT + + UCHAR TLV2Type; // 0x01 + USHORT TLV2Length; // 16 + //ULONG CallHandle; // Context corresponding to reported channel + ULONG CurrentTxRate; // bps + ULONG CurrentRxRate; // bps + ULONG ServingSystemTxRate; // bps + ULONG ServingSystemRxRate; // bps + +} QMIWDS_GET_CURRENT_CHANNEL_RATE_RESP_MSG, *PQMIWDS_GET_CURRENT_CHANNEL_RATE_RESP; + +#define QWDS_EVENT_REPORT_MASK_RATES 0x01 +#define QWDS_EVENT_REPORT_MASK_STATS 0x02 + +#ifdef QCUSB_MUX_PROTOCOL +#error code not present +#endif // QCUSB_MUX_PROTOCOL + +typedef struct _QMIWDS_SET_EVENT_REPORT_REQ_MSG +{ + USHORT Type; // QMUX type 0x0042 + USHORT Length; + + UCHAR TLVType; // 0x10 -- current channel rate indicator + USHORT TLVLength; // 1 + UCHAR Mode; // 0-do not report; 1-report when rate changes + + UCHAR TLV2Type; // 0x11 + USHORT TLV2Length; // 5 + UCHAR StatsPeriod; // seconds between reports; 0-do not report + ULONG StatsMask; // + + UCHAR TLV3Type; // 0x12 -- current data bearer indicator + USHORT TLV3Length; // 1 + UCHAR Mode3; // 0-do not report; 1-report when changes + + UCHAR TLV4Type; // 0x13 -- dormancy status indicator + USHORT TLV4Length; // 1 + UCHAR DormancyStatus; // 0-do not report; 1-report when changes +} QMIWDS_SET_EVENT_REPORT_REQ_MSG, *PQMIWDS_SET_EVENT_REPORT_REQ_MSG; + +typedef struct _QMIWDS_SET_EVENT_REPORT_RESP_MSG +{ + USHORT Type; // QMUX type 0x0042 + USHORT Length; + + UCHAR TLVType; // 0x02 result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_NO_BATTERY + // QMI_ERR_FAULT +} QMIWDS_SET_EVENT_REPORT_RESP_MSG, *PQMIWDS_SET_EVENT_REPORT_RESP_MSG; + +typedef struct _QMIWDS_EVENT_REPORT_IND_MSG +{ + USHORT Type; // QMUX type 0x0001 + USHORT Length; +} QMIWDS_EVENT_REPORT_IND_MSG, *PQMIWDS_EVENT_REPORT_IND_MSG; + +// PQCTLV_PKT_STATISTICS + +typedef struct _QMIWDS_EVENT_REPORT_IND_CHAN_RATE_TLV +{ + UCHAR Type; + USHORT Length; // 8 + ULONG TxRate; + ULONG RxRate; +} QMIWDS_EVENT_REPORT_IND_CHAN_RATE_TLV, *PQMIWDS_EVENT_REPORT_IND_CHAN_RATE_TLV; + +#ifdef QCUSB_MUX_PROTOCOL +#error code not present +#endif // QCUSB_MUX_PROTOCOL + +typedef struct _QMIWDS_GET_PKT_STATISTICS_REQ_MSG +{ + USHORT Type; // QMUX type 0x0041 + USHORT Length; + UCHAR TLVType; // 0x01 + USHORT TLVLength; // 4 + ULONG StateMask; // 0x00000001 tx success packets + // 0x00000002 rx success packets + // 0x00000004 rx packet errors (checksum) + // 0x00000008 rx packets dropped (memory) + +} QMIWDS_GET_PKT_STATISTICS_REQ_MSG, *PQMIWDS_GET_PKT_STATISTICS_REQ_MSG; + +typedef struct _QMIWDS_GET_PKT_STATISTICS_RESP_MSG +{ + USHORT Type; // QMUX type 0x0041 + USHORT Length; + UCHAR TLVType; // 0x02 + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_FAULT +} QMIWDS_GET_PKT_STATISTICS_RESP_MSG, *PQMIWDS_GET_PKT_STATISTICS_RESP_MSG; + +// optional TLV for stats +typedef struct _QCTLV_PKT_STATISTICS +{ + UCHAR TLVType; // see above definitions for TLV types + USHORT TLVLength; // 4 + ULONG Count; +} QCTLV_PKT_STATISTICS, *PQCTLV_PKT_STATISTICS; +#endif + +//#ifdef QC_IP_MODE + +/* + ?Bit 0 ?Profile identifier + ?Bit 1 ?Profile name + ?Bit 2 ?PDP type + ?Bit 3 ?APN name + ?Bit 4 ?DNS address + ?Bit 5 ?UMTS/GPRS granted QoS + ?Bit 6 ?Username + ?Bit 7 ?Authentication Protocol + ?Bit 8 ?IP address + ?Bit 9 ?Gateway information (address and subnet mask) + ?Bit 10 ?PCSCF address using a PCO flag + ?Bit 11 ?PCSCF server address list + ?Bit 12 ?PCSCF domain name list + ?Bit 13 ?MTU + ?Bit 14 ?Domain name list + ?Bit 15 ?IP family + ?Bit 16 ?IM_CM flag + ?Bit 17 ?Technology name + ?Bit 18 ?Operator reserved PCO +*/ +#define QMIWDS_GET_RUNTIME_SETTINGS_MASK_IPV4DNS_ADDR (1 << 4) +#define QMIWDS_GET_RUNTIME_SETTINGS_MASK_IPV4_ADDR (1 << 8) +#define QMIWDS_GET_RUNTIME_SETTINGS_MASK_IPV4GATEWAY_ADDR (1 << 9) +#define QMIWDS_GET_RUNTIME_SETTINGS_MASK_MTU (1 << 13) +#define QMIWDS_GET_RUNTIME_SETTINGS_MASK_PCSCF_SV_ADDR (1 << 11) +#define QMIWDS_GET_RUNTIME_SETTINGS_MASK_PCSCF_DOM_NAME (1 << 14) + +typedef struct _QMIWDS_GET_RUNTIME_SETTINGS_REQ_MSG +{ + USHORT Type; // QMIWDS_GET_RUNTIME_SETTINGS_REQ + USHORT Length; + UCHAR TLVType; // 0x10 + USHORT TLVLength; // 0x0004 + ULONG Mask; // mask, bit 8: IP addr -- 0x0100 +} __attribute__ ((packed)) QMIWDS_GET_RUNTIME_SETTINGS_REQ_MSG, *PQMIWDS_GET_RUNTIME_SETTINGS_REQ_MSG; + +typedef struct _QMIWDS_BIND_MUX_DATA_PORT_REQ_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + ULONG ep_type; + ULONG iface_id; + UCHAR TLV2Type; + USHORT TLV2Length; + UCHAR MuxId; + UCHAR TLV3Type; + USHORT TLV3Length; + ULONG client_type; +} __attribute__ ((packed)) QMIWDS_BIND_MUX_DATA_PORT_REQ_MSG, *PQMIWDS_BIND_MUX_DATA_PORT_REQ_MSG; + +#define QMIWDS_GET_RUNTIME_SETTINGS_TLV_TYPE_IPV4PRIMARYDNS 0x15 +#define QMIWDS_GET_RUNTIME_SETTINGS_TLV_TYPE_IPV4SECONDARYDNS 0x16 +#define QMIWDS_GET_RUNTIME_SETTINGS_TLV_TYPE_IPV4 0x1E +#define QMIWDS_GET_RUNTIME_SETTINGS_TLV_TYPE_IPV4GATEWAY 0x20 +#define QMIWDS_GET_RUNTIME_SETTINGS_TLV_TYPE_IPV4SUBNET 0x21 + +#define QMIWDS_GET_RUNTIME_SETTINGS_TLV_TYPE_IPV6 0x25 +#define QMIWDS_GET_RUNTIME_SETTINGS_TLV_TYPE_IPV6GATEWAY 0x26 +#define QMIWDS_GET_RUNTIME_SETTINGS_TLV_TYPE_IPV6PRIMARYDNS 0x27 +#define QMIWDS_GET_RUNTIME_SETTINGS_TLV_TYPE_IPV6SECONDARYDNS 0x28 +#define QMIWDS_GET_RUNTIME_SETTINGS_TLV_TYPE_MTU 0x29 + +typedef struct _QMIWDS_GET_RUNTIME_SETTINGS_TLV_MTU +{ + UCHAR TLVType; // QMIWDS_GET_RUNTIME_SETTINGS_TLV_TYPE_MTU + USHORT TLVLength; // 4 + ULONG Mtu; // MTU +} __attribute__ ((packed)) QMIWDS_GET_RUNTIME_SETTINGS_TLV_MTU, *PQMIWDS_GET_RUNTIME_SETTINGS_TLV_MTU; + +typedef struct _QMIWDS_GET_RUNTIME_SETTINGS_TLV_IPV4_ADDR +{ + UCHAR TLVType; // QMIWDS_GET_RUNTIME_SETTINGS_TLV_TYPE_IPV4 + USHORT TLVLength; // 4 + ULONG IPV4Address; // address +} __attribute__ ((packed)) QMIWDS_GET_RUNTIME_SETTINGS_TLV_IPV4_ADDR, *PQMIWDS_GET_RUNTIME_SETTINGS_TLV_IPV4_ADDR; + +typedef struct _QMIWDS_GET_RUNTIME_SETTINGS_TLV_IPV6_ADDR +{ + UCHAR TLVType; // QMIWDS_GET_RUNTIME_SETTINGS_TLV_TYPE_IPV6 + USHORT TLVLength; // 16 + UCHAR IPV6Address[16]; // address + UCHAR PrefixLength; // prefix length +} __attribute__ ((packed)) QMIWDS_GET_RUNTIME_SETTINGS_TLV_IPV6_ADDR, *PQMIWDS_GET_RUNTIME_SETTINGS_TLV_IPV6_ADDR; + +typedef struct _QMIWDS_GET_RUNNING_SETTINGS_PCSCF_IPV6_ADDR +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR PCSCFNumber; +} __attribute__ ((packed)) QMIWDS_GET_RUNNING_SETTINGS_PCSCF_IPV6_ADDR, *PQMIWDS_GET_RUNNING_SETTINGS_PCSCF_IPV6_ADDR; + +typedef struct _QMIWDS_GET_RUNNING_SETTINGS_PCSCF_IPV4_ADDR +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR PCSCFNumber; +} __attribute__ ((packed)) QMIWDS_GET_RUNNING_SETTINGS_PCSCF_IPV4_ADDR, *PQMIWDS_GET_RUNNING_SETTINGS_PCSCF_IPV4_ADDR; + +typedef struct _QMIWDS_GET_RUNTIME_SETTINGS_RESP_MSG +{ + USHORT Type; // QMIWDS_GET_RUNTIME_SETTINGS_RESP + USHORT Length; + UCHAR TLVType; // QCTLV_TYPE_RESULT_CODE + USHORT TLVLength; // 0x0004 + USHORT QMUXResult; // result code + USHORT QMUXError; // error code +} __attribute__ ((packed)) QMIWDS_GET_RUNTIME_SETTINGS_RESP_MSG, *PQMIWDS_GET_RUNTIME_SETTINGS_RESP_MSG; + +//#endif // QC_IP_MODE + +typedef struct _QMIWDS_IP_FAMILY_TLV +{ + UCHAR TLVType; // 0x12 + USHORT TLVLength; // 1 + UCHAR IpFamily; // IPV4-0x04, IPV6-0x06 +} __attribute__ ((packed)) QMIWDS_IP_FAMILY_TLV, *PQMIWDS_IP_FAMILY_TLV; + +typedef struct _QMIWDS_PKT_SRVC_TLV +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR ConnectionStatus; + UCHAR ReconfigReqd; +} __attribute__ ((packed)) QMIWDS_PKT_SRVC_TLV, *PQMIWDS_PKT_SRVC_TLV; + +typedef struct _QMIWDS_CALL_END_REASON_TLV +{ + UCHAR TLVType; + USHORT TLVLength; + USHORT CallEndReason; +} __attribute__ ((packed)) QMIWDS_CALL_END_REASON_TLV, *PQMIWDS_CALL_END_REASON_TLV; + +typedef struct _QMIWDS_CALL_END_REASON_V_TLV +{ + UCHAR TLVType; + USHORT TLVLength; + USHORT CallEndReasonType; + USHORT CallEndReason; +} __attribute__ ((packed)) QMIWDS_CALL_END_REASON_V_TLV, *PQMIWDS_CALL_END_REASON_V_TLV; + +typedef struct _QMIWDS_SET_CLIENT_IP_FAMILY_PREF_REQ_MSG +{ + USHORT Type; // QMUX type 0x004D + USHORT Length; + UCHAR TLVType; // 0x01 + USHORT TLVLength; // 1 + UCHAR IpPreference; // IPV4-0x04, IPV6-0x06 +} __attribute__ ((packed)) QMIWDS_SET_CLIENT_IP_FAMILY_PREF_REQ_MSG, *PQMIWDS_SET_CLIENT_IP_FAMILY_PREF_REQ_MSG; + +typedef struct _QMIWDS_SET_CLIENT_IP_FAMILY_PREF_RESP_MSG +{ + USHORT Type; // QMUX type 0x0037 + USHORT Length; + UCHAR TLVType; // 0x02 + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS, QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INTERNAL, QMI_ERR_MALFORMED_MSG, QMI_ERR_INVALID_ARG +} __attribute__ ((packed)) QMIWDS_SET_CLIENT_IP_FAMILY_PREF_RESP_MSG, *PQMIWDS_SET_CLIENT_IP_FAMILY_PREF_RESP_MSG; + +typedef struct _QMIWDS_SET_AUTO_CONNECT_REQ_MSG +{ + USHORT Type; // QMUX type 0x0051 + USHORT Length; + UCHAR TLVType; // 0x01 + USHORT TLVLength; // 1 + UCHAR autoconnect_setting; // 0x00 ?C Disabled, 0x01 ?C Enabled, 0x02 ?C Paused (resume on power cycle) +} __attribute__ ((packed)) QMIWDS_SET_AUTO_CONNECT_REQ_MSG, *PQMIWDS_SET_AUTO_CONNECT_REQ_MSG; + +#if 0 +typedef struct _QMIWDS_GET_MIP_MODE_REQ_MSG +{ + USHORT Type; // QMUX type 0x0040 + USHORT Length; +} QMIWDS_GET_MIP_MODE_REQ_MSG, *PQMIWDS_GET_MIP_MODE_REQ_MSG; + +typedef struct _QMIWDS_GET_MIP_MODE_RESP_MSG +{ + USHORT Type; // QMUX type 0x0040 + USHORT Length; + UCHAR TLVType; // 0x02 + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_FAULT + + UCHAR TLV2Type; // 0x01 + USHORT TLV2Length; // 20 + UCHAR MipMode; // +} QMIWDS_GET_MIP_MODE_RESP_MSG, *PQMIWDS_GET_MIP_MODE_RESP_MSG; +#endif + +typedef struct _QMIWDS_TECHNOLOGY_PREFERECE +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR TechPreference; +} __attribute__ ((packed)) QMIWDS_TECHNOLOGY_PREFERECE, *PQMIWDS_TECHNOLOGY_PREFERECE; + +typedef struct _QMIWDS_PROFILE_IDENTIFIER +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR ProfileIndex; +} __attribute__ ((packed)) QMIWDS_PROFILE_IDENTIFIER, *PQMIWDS_PROFILE_IDENTIFIER; + +#if 0 +typedef struct _QMIWDS_IPADDRESS +{ + UCHAR TLVType; + USHORT TLVLength; + ULONG IPv4Address; +}QMIWDS_IPADDRESS, *PQMIWDS_IPADDRESS; + +/* +typedef struct _QMIWDS_UMTS_QOS +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR TrafficClass; + ULONG MaxUplinkBitRate; + ULONG MaxDownlinkBitRate; + ULONG GuarUplinkBitRate; + ULONG GuarDownlinkBitRate; + UCHAR QOSDevOrder; + ULONG MAXSDUSize; + UCHAR SDUErrorRatio; + UCHAR ResidualBerRatio; + UCHAR DeliveryErrorSDUs; + ULONG TransferDelay; + ULONG TrafficHndPri; +}QMIWDS_UMTS_QOS, *PQMIWDS_UMTS_QOS; + +typedef struct _QMIWDS_GPRS_QOS +{ + UCHAR TLVType; + USHORT TLVLength; + ULONG PrecedenceClass; + ULONG DelayClass; + ULONG ReliabilityClass; + ULONG PeekThroClass; + ULONG MeanThroClass; +}QMIWDS_GPRS_QOS, *PQMIWDS_GPRS_QOS; +*/ +#endif + +typedef struct _QMIWDS_PROFILENAME +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR ProfileName; +} __attribute__ ((packed)) QMIWDS_PROFILENAME, *PQMIWDS_PROFILENAME; + +typedef struct _QMIWDS_PDPTYPE +{ + UCHAR TLVType; + USHORT TLVLength; +// 0 ?C PDP-IP (IPv4) +// 1 ?C PDP-PPP +// 2 ?C PDP-IPv6 +// 3 ?C PDP-IPv4v6 + UCHAR PdpType; +} __attribute__ ((packed)) QMIWDS_PDPTYPE, *PQMIWDS_PDPTYPE; + +typedef struct _QMIWDS_USERNAME +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR UserName; +} __attribute__ ((packed)) QMIWDS_USERNAME, *PQMIWDS_USERNAME; + +typedef struct _QMIWDS_PASSWD +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR Passwd; +} __attribute__ ((packed)) QMIWDS_PASSWD, *PQMIWDS_PASSWD; + +typedef struct _QMIWDS_AUTH_PREFERENCE +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR AuthPreference; +} __attribute__ ((packed)) QMIWDS_AUTH_PREFERENCE, *PQMIWDS_AUTH_PREFERENCE; + +typedef struct _QMIWDS_IPTYPE +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR IPType; +} __attribute__ ((packed)) QMIWDS_IPTYPE, *PQMIWDS_IPTYPE; + +typedef struct _QMIWDS_APNNAME +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR ApnName; +} __attribute__ ((packed)) QMIWDS_APNNAME, *PQMIWDS_APNNAME; + +typedef struct _QMIWDS_AUTOCONNECT +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR AutoConnect; +} __attribute__ ((packed)) QMIWDS_AUTOCONNECT, *PQMIWDS_AUTOCONNECT; + +typedef struct _QMIWDS_START_NETWORK_INTERFACE_REQ_MSG +{ + USHORT Type; + USHORT Length; +} __attribute__ ((packed)) QMIWDS_START_NETWORK_INTERFACE_REQ_MSG, *PQMIWDS_START_NETWORK_INTERFACE_REQ_MSG; + +typedef struct _QMIWDS_CALLENDREASON +{ + UCHAR TLVType; + USHORT TLVLength; + USHORT Reason; +}__attribute__ ((packed)) QMIWDS_CALLENDREASON, *PQMIWDS_CALLENDREASON; + +typedef struct _QMIWDS_START_NETWORK_INTERFACE_RESP_MSG +{ + USHORT Type; // QMUX type 0x0040 + USHORT Length; + UCHAR TLVType; // 0x02 + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_FAULT + + UCHAR TLV2Type; // 0x01 + USHORT TLV2Length; // 20 + ULONG Handle; // +} __attribute__ ((packed)) QMIWDS_START_NETWORK_INTERFACE_RESP_MSG, *PQMIWDS_START_NETWORK_INTERFACE_RESP_MSG; + +typedef struct _QMIWDS_STOP_NETWORK_INTERFACE_REQ_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + ULONG Handle; +} __attribute__ ((packed)) QMIWDS_STOP_NETWORK_INTERFACE_REQ_MSG, *PQMIWDS_STOP_NETWORK_INTERFACE_REQ_MSG; + +typedef struct _QMIWDS_STOP_NETWORK_INTERFACE_RESP_MSG +{ + USHORT Type; // QMUX type 0x0040 + USHORT Length; + UCHAR TLVType; // 0x02 + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_FAULT + +} __attribute__ ((packed)) QMIWDS_STOP_NETWORK_INTERFACE_RESP_MSG, *PQMIWDS_STOP_NETWORK_INTERFACE_RESP_MSG; + +typedef struct _QMIWDS_GET_DEFAULT_SETTINGS_REQ_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + UCHAR ProfileType; +} __attribute__ ((packed)) QMIWDS_GET_DEFAULT_SETTINGS_REQ_MSG, *PQMIWDS_GET_DEFAULT_SETTINGS_REQ_MSG; + +typedef struct _QMIWDS_GET_DEFAULT_SETTINGS_RESP_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + USHORT QMUXResult; + USHORT QMUXError; +} __attribute__ ((packed)) QMIWDS_GET_DEFAULT_SETTINGS_RESP_MSG, *PQMIWDS_GET_DEFAULT_SETTINGS_RESP_MSG; + +typedef struct _QMIWDS_MODIFY_PROFILE_SETTINGS_REQ_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + UCHAR ProfileType; + UCHAR ProfileIndex; +} __attribute__ ((packed)) QMIWDS_MODIFY_PROFILE_SETTINGS_REQ_MSG, *PQMIWDS_MODIFY_PROFILE_SETTINGS_REQ_MSG; + +typedef struct _QMIWDS_MODIFY_PROFILE_SETTINGS_RESP_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + USHORT QMUXResult; + USHORT QMUXError; +} __attribute__ ((packed)) QMIWDS_MODIFY_PROFILE_SETTINGS_RESP_MSG, *PQMIWDS_MODIFY_PROFILE_SETTINGS_RESP_MSG; + +typedef struct _QMIWDS_GET_PROFILE_SETTINGS_REQ_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + UCHAR ProfileType; + UCHAR ProfileIndex; +} __attribute__ ((packed)) QMIWDS_GET_PROFILE_SETTINGS_REQ_MSG, *PQMIWDS_GET_PROFILE_SETTINGS_REQ_MSG; + +typedef struct _QMIWDS_CREATE_PROFILE_SETTINGS_REQ_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + UCHAR ProfileType; + UCHAR TLV2Type; //0x25 + USHORT TLV2Length; + UCHAR pdp_context; +} __attribute__ ((packed)) QMIWDS_CREATE_PROFILE_SETTINGS_REQ_MSG, *PQMIWDS_CREATE_PROFILE_SETTINGS_REQ_MSG; + +#if 0 +typedef struct _QMIWDS_EVENT_REPORT_IND_DATA_BEARER_TLV +{ + UCHAR Type; + USHORT Length; + UCHAR DataBearer; +} QMIWDS_EVENT_REPORT_IND_DATA_BEARER_TLV, *PQMIWDS_EVENT_REPORT_IND_DATA_BEARER_TLV; + +typedef struct _QMIWDS_EVENT_REPORT_IND_DORMANCY_STATUS_TLV +{ + UCHAR Type; + USHORT Length; + UCHAR DormancyStatus; +} QMIWDS_EVENT_REPORT_IND_DORMANCY_STATUS_TLV, *PQMIWDS_EVENT_REPORT_IND_DORMANCY_STATUS_TLV; + + +typedef struct _QMIWDS_GET_DATA_BEARER_REQ_MSG +{ + USHORT Type; // QMUX type 0x0037 + USHORT Length; +} QMIWDS_GET_DATA_BEARER_REQ_MSG, *PQMIWDS_GET_DATA_BEARER_REQ_MSG; + +typedef struct _QMIWDS_GET_DATA_BEARER_RESP_MSG +{ + USHORT Type; // QMUX type 0x0037 + USHORT Length; + UCHAR TLVType; // 0x02 + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INTERNAL + // QMI_ERR_MALFORMED_MSG + // QMI_ERR_NO_MEMORY + // QMI_ERR_OUT_OF_CALL + // QMI_ERR_INFO_UNAVAILABLE + UCHAR TLV2Type; // 0x01 + USHORT TLV2Length; // + UCHAR Technology; // +} QMIWDS_GET_DATA_BEARER_RESP_MSG, *PQMIWDS_GET_DATA_BEARER_RESP_MSG; +#endif + +// ======================= DMS ============================== +#define QMIDMS_SET_EVENT_REPORT_REQ 0x0001 +#define QMIDMS_SET_EVENT_REPORT_RESP 0x0001 +#define QMIDMS_EVENT_REPORT_IND 0x0001 +#define QMIDMS_GET_DEVICE_CAP_REQ 0x0020 +#define QMIDMS_GET_DEVICE_CAP_RESP 0x0020 +#define QMIDMS_GET_DEVICE_MFR_REQ 0x0021 +#define QMIDMS_GET_DEVICE_MFR_RESP 0x0021 +#define QMIDMS_GET_DEVICE_MODEL_ID_REQ 0x0022 +#define QMIDMS_GET_DEVICE_MODEL_ID_RESP 0x0022 +#define QMIDMS_GET_DEVICE_REV_ID_REQ 0x0023 +#define QMIDMS_GET_DEVICE_REV_ID_RESP 0x0023 +#define QMIDMS_GET_MSISDN_REQ 0x0024 +#define QMIDMS_GET_MSISDN_RESP 0x0024 +#define QMIDMS_GET_DEVICE_SERIAL_NUMBERS_REQ 0x0025 +#define QMIDMS_GET_DEVICE_SERIAL_NUMBERS_RESP 0x0025 +#define QMIDMS_UIM_SET_PIN_PROTECTION_REQ 0x0027 +#define QMIDMS_UIM_SET_PIN_PROTECTION_RESP 0x0027 +#define QMIDMS_UIM_VERIFY_PIN_REQ 0x0028 +#define QMIDMS_UIM_VERIFY_PIN_RESP 0x0028 +#define QMIDMS_UIM_UNBLOCK_PIN_REQ 0x0029 +#define QMIDMS_UIM_UNBLOCK_PIN_RESP 0x0029 +#define QMIDMS_UIM_CHANGE_PIN_REQ 0x002A +#define QMIDMS_UIM_CHANGE_PIN_RESP 0x002A +#define QMIDMS_UIM_GET_PIN_STATUS_REQ 0x002B +#define QMIDMS_UIM_GET_PIN_STATUS_RESP 0x002B +#define QMIDMS_GET_DEVICE_HARDWARE_REV_REQ 0x002C +#define QMIDMS_GET_DEVICE_HARDWARE_REV_RESP 0x002C +#define QMIDMS_GET_OPERATING_MODE_REQ 0x002D +#define QMIDMS_GET_OPERATING_MODE_RESP 0x002D +#define QMIDMS_SET_OPERATING_MODE_REQ 0x002E +#define QMIDMS_SET_OPERATING_MODE_RESP 0x002E +#define QMIDMS_GET_ACTIVATED_STATUS_REQ 0x0031 +#define QMIDMS_GET_ACTIVATED_STATUS_RESP 0x0031 +#define QMIDMS_ACTIVATE_AUTOMATIC_REQ 0x0032 +#define QMIDMS_ACTIVATE_AUTOMATIC_RESP 0x0032 +#define QMIDMS_ACTIVATE_MANUAL_REQ 0x0033 +#define QMIDMS_ACTIVATE_MANUAL_RESP 0x0033 +#define QMIDMS_UIM_GET_ICCID_REQ 0x003C +#define QMIDMS_UIM_GET_ICCID_RESP 0x003C +#define QMIDMS_UIM_GET_CK_STATUS_REQ 0x0040 +#define QMIDMS_UIM_GET_CK_STATUS_RESP 0x0040 +#define QMIDMS_UIM_SET_CK_PROTECTION_REQ 0x0041 +#define QMIDMS_UIM_SET_CK_PROTECTION_RESP 0x0041 +#define QMIDMS_UIM_UNBLOCK_CK_REQ 0x0042 +#define QMIDMS_UIM_UNBLOCK_CK_RESP 0x0042 +#define QMIDMS_UIM_GET_IMSI_REQ 0x0043 +#define QMIDMS_UIM_GET_IMSI_RESP 0x0043 +#define QMIDMS_UIM_GET_STATE_REQ 0x0044 +#define QMIDMS_UIM_GET_STATE_RESP 0x0044 +#define QMIDMS_GET_BAND_CAP_REQ 0x0045 +#define QMIDMS_GET_BAND_CAP_RESP 0x0045 + +#if 0 +typedef struct _QMIDMS_GET_DEVICE_MFR_REQ_MSG +{ + USHORT Type; // QMUX type 0x0003 + USHORT Length; +} QMIDMS_GET_DEVICE_MFR_REQ_MSG, *PQMIDMS_GET_DEVICE_MFR_REQ_MSG; + +typedef struct _QMIDMS_GET_DEVICE_MFR_RESP_MSG +{ + USHORT Type; // QMUX type 0x0003 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_FAULT + UCHAR TLV2Type; // 0x01 - required parameter + USHORT TLV2Length; // length of the mfr string + UCHAR DeviceManufacturer; // first byte of string +} QMIDMS_GET_DEVICE_MFR_RESP_MSG, *PQMIDMS_GET_DEVICE_MFR_RESP_MSG; + +typedef struct _QMIDMS_GET_DEVICE_MODEL_ID_REQ_MSG +{ + USHORT Type; // QMUX type 0x0004 + USHORT Length; +} QMIDMS_GET_DEVICE_MODEL_ID_REQ_MSG, *PQMIDMS_GET_DEVICE_MODEL_ID_REQ_MSG; + +typedef struct _QMIDMS_GET_DEVICE_MODEL_ID_RESP_MSG +{ + USHORT Type; // QMUX type 0x0004 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_FAULT + UCHAR TLV2Type; // 0x01 - required parameter + USHORT TLV2Length; // length of the modem id string + UCHAR DeviceModelID; // device model id +} QMIDMS_GET_DEVICE_MODEL_ID_RESP_MSG, *PQMIDMS_GET_DEVICE_MODEL_ID_RESP_MSG; +#endif + +typedef struct _QMIDMS_GET_DEVICE_REV_ID_REQ_MSG +{ + USHORT Type; // QMUX type 0x0005 + USHORT Length; +} __attribute__ ((packed)) QMIDMS_GET_DEVICE_REV_ID_REQ_MSG, *PQMIDMS_GET_DEVICE_REV_ID_REQ_MSG; + +typedef struct _DEVICE_REV_ID +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR RevisionID; +} __attribute__ ((packed)) DEVICE_REV_ID, *PDEVICE_REV_ID; + +#if 0 +typedef struct _QMIDMS_GET_DEVICE_REV_ID_RESP_MSG +{ + USHORT Type; // QMUX type 0x0023 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_FAULT +} QMIDMS_GET_DEVICE_REV_ID_RESP_MSG, *PQMIDMS_GET_DEVICE_REV_ID_RESP_MSG; + +typedef struct _QMIDMS_GET_MSISDN_REQ_MSG +{ + USHORT Type; // QMUX type 0x0024 + USHORT Length; +} QMIDMS_GET_MSISDN_REQ_MSG, *PQMIDMS_GET_MSISDN_REQ_MSG; + +typedef struct _QCTLV_DEVICE_VOICE_NUMBERS +{ + UCHAR TLVType; // as defined above + USHORT TLVLength; // 4/7/7 + UCHAR VoideNumberString; // ESN, IMEI, or MEID + +} QCTLV_DEVICE_VOICE_NUMBERS, *PQCTLV_DEVICE_VOICE_NUMBERS; + + +typedef struct _QMIDMS_GET_MSISDN_RESP_MSG +{ + USHORT Type; // QMUX type 0x0024 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG +} QMIDMS_GET_MSISDN_RESP_MSG, *PQMIDMS_GET_MSISDN_RESP_MSG; +#endif + +typedef struct _QMIDMS_UIM_GET_IMSI_REQ_MSG +{ + USHORT Type; + USHORT Length; +} __attribute__ ((packed)) QMIDMS_UIM_GET_IMSI_REQ_MSG, *PQMIDMS_UIM_GET_IMSI_REQ_MSG; + +typedef struct _QMIDMS_UIM_GET_IMSI_RESP_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + USHORT QMUXResult; + USHORT QMUXError; + UCHAR TLV2Type; + USHORT TLV2Length; + UCHAR IMSI; +} __attribute__ ((packed)) QMIDMS_UIM_GET_IMSI_RESP_MSG, *PQMIDMS_UIM_GET_IMSI_RESP_MSG; + +#if 0 +typedef struct _QMIDMS_GET_DEVICE_SERIAL_NUMBERS_REQ_MSG +{ + USHORT Type; // QMUX type 0x0007 + USHORT Length; +} QMIDMS_GET_DEVICE_SERIAL_NUMBERS_REQ_MSG, *PQMIDMS_GET_DEVICE_SERIAL_NUMBERS_REQ_MSG; + +#define QCTLV_TYPE_SER_NUM_ESN 0x10 +#define QCTLV_TYPE_SER_NUM_IMEI 0x11 +#define QCTLV_TYPE_SER_NUM_MEID 0x12 + +typedef struct _QCTLV_DEVICE_SERIAL_NUMBER +{ + UCHAR TLVType; // as defined above + USHORT TLVLength; // 4/7/7 + UCHAR SerialNumberString; // ESN, IMEI, or MEID + +} QCTLV_DEVICE_SERIAL_NUMBER, *PQCTLV_DEVICE_SERIAL_NUMBER; + +typedef struct _QMIDMS_GET_DEVICE_SERIAL_NUMBERS_RESP_MSG +{ + USHORT Type; // QMUX type 0x0007 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_FAULT + // followed by optional TLV +} QMIDMS_GET_DEVICE_SERIAL_NUMBERS_RESP_MSG, *PQMIDMS_GET_DEVICE_SERIAL_NUMBERS_RESP; + +typedef struct _QMIDMS_GET_DMS_BAND_CAP +{ + USHORT Type; + USHORT Length; +} QMIDMS_GET_BAND_CAP_REQ_MSG, *PQMIDMS_GET_BAND_CAP_REQ_MSG; + +typedef struct _QMIDMS_GET_BAND_CAP_RESP_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_NONE + // QMI_ERR_INTERNAL + // QMI_ERR_MALFORMED_MSG + // QMI_ERR_NO_MEMORY + + UCHAR TLV2Type; // 0x01 + USHORT TLV2Length; // 2 + ULONG64 BandCap; +} QMIDMS_GET_BAND_CAP_RESP_MSG, *PQMIDMS_GET_BAND_CAP_RESP; + +typedef struct _QMIDMS_GET_DEVICE_CAP_REQ_MSG +{ + USHORT Type; // QMUX type 0x0002 + USHORT Length; +} QMIDMS_GET_DEVICE_CAP_REQ_MSG, *PQMIDMS_GET_DEVICE_CAP_REQ_MSG; + +typedef struct _QMIDMS_GET_DEVICE_CAP_RESP_MSG +{ + USHORT Type; // QMUX type 0x0002 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMUX_RESULT_SUCCESS + // QMUX_RESULT_FAILURE + USHORT QMUXError; // QMUX_ERR_INVALID_ARG + // QMUX_ERR_NO_MEMORY + // QMUX_ERR_INTERNAL + // QMUX_ERR_FAULT + UCHAR TLV2Type; // 0x01 + USHORT TLV2Length; // 2 + + ULONG MaxTxChannelRate; + ULONG MaxRxChannelRate; + UCHAR VoiceCap; + UCHAR SimCap; + + UCHAR RadioIfListCnt; // #elements in radio interface list + UCHAR RadioIfList; // N 1-byte elements +} QMIDMS_GET_DEVICE_CAP_RESP_MSG, *PQMIDMS_GET_DEVICE_CAP_RESP_MSG; + +typedef struct _QMIDMS_GET_ACTIVATED_STATUS_REQ_MSG +{ + USHORT Type; // QMUX type 0x0002 + USHORT Length; +} QMIDMS_GET_ACTIVATED_STATUS_REQ_MSG, *PQMIDMS_GET_ACTIVATES_STATUD_REQ_MSG; + +typedef struct _QMIDMS_GET_ACTIVATED_STATUS_RESP_MSG +{ + USHORT Type; // QMUX type 0x0002 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMUX_RESULT_SUCCESS + // QMUX_RESULT_FAILURE + USHORT QMUXError; // QMUX_ERR_INVALID_ARG + // QMUX_ERR_NO_MEMORY + // QMUX_ERR_INTERNAL + // QMUX_ERR_FAULT + UCHAR TLV2Type; // 0x01 + USHORT TLV2Length; // 2 + + USHORT ActivatedStatus; +} QMIDMS_GET_ACTIVATED_STATUS_RESP_MSG, *PQMIDMS_GET_ACTIVATED_STATUS_RESP_MSG; + +typedef struct _QMIDMS_GET_OPERATING_MODE_REQ_MSG +{ + USHORT Type; // QMUX type 0x0002 + USHORT Length; +} QMIDMS_GET_OPERATING_MODE_REQ_MSG, *PQMIDMS_GET_OPERATING_MODE_REQ_MSG; + +typedef struct _OFFLINE_REASON +{ + UCHAR TLVType; + USHORT TLVLength; + USHORT OfflineReason; +} OFFLINE_REASON, *POFFLINE_REASON; + +typedef struct _HARDWARE_RESTRICTED_MODE +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR HardwareControlledMode; +} HARDWARE_RESTRICTED_MODE, *PHARDWARE_RESTRICTED_MODE; + +typedef struct _QMIDMS_GET_OPERATING_MODE_RESP_MSG +{ + USHORT Type; // QMUX type 0x0002 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMUX_RESULT_SUCCESS + // QMUX_RESULT_FAILURE + USHORT QMUXError; // QMUX_ERR_INVALID_ARG + // QMUX_ERR_NO_MEMORY + // QMUX_ERR_INTERNAL + // QMUX_ERR_FAULT + UCHAR TLV2Type; // 0x01 + USHORT TLV2Length; // 2 + + UCHAR OperatingMode; +} QMIDMS_GET_OPERATING_MODE_RESP_MSG, *PQMIDMS_GET_OPERATING_MODE_RESP_MSG; + +typedef struct _QMIDMS_UIM_GET_ICCID_REQ_MSG +{ + USHORT Type; // QMUX type 0x0024 + USHORT Length; +} QMIDMS_UIM_GET_ICCID_REQ_MSG, *PQMIDMS_UIM_GET_ICCID_REQ_MSG; + +typedef struct _QMIDMS_UIM_GET_ICCID_RESP_MSG +{ + USHORT Type; // QMUX type 0x0024 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_FAULT + UCHAR TLV2Type; // 0x01 - required parameter + USHORT TLV2Length; // var + UCHAR ICCID; // String of voice number +} QMIDMS_UIM_GET_ICCID_RESP_MSG, *PQMIDMS_UIM_GET_ICCID_RESP_MSG; +#endif + +typedef struct _QMIDMS_SET_OPERATING_MODE_REQ_MSG +{ + USHORT Type; // QMUX type 0x0002 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + UCHAR OperatingMode; +} __attribute__ ((packed)) QMIDMS_SET_OPERATING_MODE_REQ_MSG, *PQMIDMS_SET_OPERATING_MODE_REQ_MSG; + +typedef struct _QMIDMS_SET_OPERATING_MODE_RESP_MSG +{ + USHORT Type; // QMUX type 0x0002 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMUX_RESULT_SUCCESS + // QMUX_RESULT_FAILURE + USHORT QMUXError; // QMUX_ERR_INVALID_ARG + // QMUX_ERR_NO_MEMORY + // QMUX_ERR_INTERNAL + // QMUX_ERR_FAULT +} __attribute__ ((packed)) QMIDMS_SET_OPERATING_MODE_RESP_MSG, *PQMIDMS_SET_OPERATING_MODE_RESP_MSG; + +#if 0 +typedef struct _QMIDMS_ACTIVATE_AUTOMATIC_REQ_MSG +{ + USHORT Type; // QMUX type 0x0024 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // + UCHAR ActivateCodelen; + UCHAR ActivateCode; +} QMIDMS_ACTIVATE_AUTOMATIC_REQ_MSG, *PQMIDMS_ACTIVATE_AUTOMATIC_REQ_MSG; + +typedef struct _QMIDMS_ACTIVATE_AUTOMATIC_RESP_MSG +{ + USHORT Type; // QMUX type 0x0024 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_FAULT +} QMIDMS_ACTIVATE_AUTOMATIC_RESP_MSG, *PQMIDMS_ACTIVATE_AUTOMATIC_RESP_MSG; + + +typedef struct _SPC_MSG +{ + UCHAR SPC[6]; + USHORT SID; +} SPC_MSG, *PSPC_MSG; + +typedef struct _MDN_MSG +{ + UCHAR MDNLEN; + UCHAR MDN; +} MDN_MSG, *PMDN_MSG; + +typedef struct _MIN_MSG +{ + UCHAR MINLEN; + UCHAR MIN; +} MIN_MSG, *PMIN_MSG; + +typedef struct _PRL_MSG +{ + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // + USHORT PRLLEN; + UCHAR PRL; +} PRL_MSG, *PPRL_MSG; + +typedef struct _MN_HA_KEY_MSG +{ + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // + UCHAR MN_HA_KEY_LEN; + UCHAR MN_HA_KEY; +} MN_HA_KEY_MSG, *PMN_HA_KEY_MSG; + +typedef struct _MN_AAA_KEY_MSG +{ + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // + UCHAR MN_AAA_KEY_LEN; + UCHAR MN_AAA_KEY; +} MN_AAA_KEY_MSG, *PMN_AAA_KEY_MSG; + +typedef struct _QMIDMS_ACTIVATE_MANUAL_REQ_MSG +{ + USHORT Type; // QMUX type 0x0024 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // + UCHAR Value; +} QMIDMS_ACTIVATE_MANUAL_REQ_MSG, *PQMIDMS_ACTIVATE_MANUAL_REQ_MSG; + +typedef struct _QMIDMS_ACTIVATE_MANUAL_RESP_MSG +{ + USHORT Type; // QMUX type 0x0024 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_FAULT +} QMIDMS_ACTIVATE_MANUAL_RESP_MSG, *PQMIDMS_ACTIVATE_MANUAL_RESP_MSG; +#endif + +typedef struct _QMIDMS_UIM_GET_STATE_REQ_MSG +{ + USHORT Type; + USHORT Length; +} __attribute__ ((packed)) QMIDMS_UIM_GET_STATE_REQ_MSG, *PQMIDMS_UIM_GET_STATE_REQ_MSG; + +typedef struct _QMIDMS_UIM_GET_STATE_RESP_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + USHORT QMUXResult; + USHORT QMUXError; + UCHAR TLV2Type; + USHORT TLV2Length; + UCHAR UIMState; +} __attribute__ ((packed)) QMIDMS_UIM_GET_STATE_RESP_MSG, *PQMIDMS_UIM_GET_STATE_RESP_MSG; + +typedef struct _QMIDMS_UIM_GET_PIN_STATUS_REQ_MSG +{ + USHORT Type; // QMUX type 0x0024 + USHORT Length; +} __attribute__ ((packed)) QMIDMS_UIM_GET_PIN_STATUS_REQ_MSG, *PQMIDMS_UIM_GET_PIN_STATUS_REQ_MSG; + +typedef struct _QMIDMS_UIM_PIN_STATUS +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR PINStatus; + UCHAR PINVerifyRetriesLeft; + UCHAR PINUnblockRetriesLeft; +} __attribute__ ((packed)) QMIDMS_UIM_PIN_STATUS, *PQMIDMS_UIM_PIN_STATUS; + +#define QMI_PIN_STATUS_NOT_INIT 0 +#define QMI_PIN_STATUS_NOT_VERIF 1 +#define QMI_PIN_STATUS_VERIFIED 2 +#define QMI_PIN_STATUS_DISABLED 3 +#define QMI_PIN_STATUS_BLOCKED 4 +#define QMI_PIN_STATUS_PERM_BLOCKED 5 +#define QMI_PIN_STATUS_UNBLOCKED 6 +#define QMI_PIN_STATUS_CHANGED 7 + + +typedef struct _QMIDMS_UIM_GET_PIN_STATUS_RESP_MSG +{ + USHORT Type; // QMUX type 0x0024 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_FAULT + UCHAR PinStatus; +} __attribute__ ((packed)) QMIDMS_UIM_GET_PIN_STATUS_RESP_MSG, *PQMIDMS_UIM_GET_PIN_STATUS_RESP_MSG; + +#if 0 +typedef struct _QMIDMS_UIM_GET_CK_STATUS_REQ_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + UCHAR Facility; +} QMIDMS_UIM_GET_CK_STATUS_REQ_MSG, *PQMIDMS_UIM_GET_CK_STATUS_REQ_MSG; + + +typedef struct _QMIDMS_UIM_CK_STATUS +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR FacilityStatus; + UCHAR FacilityVerifyRetriesLeft; + UCHAR FacilityUnblockRetriesLeft; +} QMIDMS_UIM_CK_STATUS, *PQMIDMS_UIM_CK_STATUS; + +typedef struct _QMIDMS_UIM_CK_OPERATION_STATUS +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR OperationBlocking; +} QMIDMS_UIM_CK_OPERATION_STATUS, *PQMIDMS_UIM_CK_OPERATION_STATUS; + +typedef struct _QMIDMS_UIM_GET_CK_STATUS_RESP_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + USHORT QMUXResult; + USHORT QMUXError; + UCHAR CkStatus; +} QMIDMS_UIM_GET_CK_STATUS_RESP_MSG, *PQMIDMS_UIM_GET_CK_STATUS_RESP_MSG; +#endif + +typedef struct _QMIDMS_UIM_VERIFY_PIN_REQ_MSG +{ + USHORT Type; // QMUX type 0x0024 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + UCHAR PINID; + UCHAR PINLen; + UCHAR PINValue; +} __attribute__ ((packed)) QMIDMS_UIM_VERIFY_PIN_REQ_MSG, *PQMIDMS_UIM_VERIFY_PIN_REQ_MSG; + +typedef struct _QMIDMS_UIM_VERIFY_PIN_RESP_MSG +{ + USHORT Type; // QMUX type 0x0024 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_FAULT + UCHAR TLV2Type; + USHORT TLV2Length; + UCHAR PINVerifyRetriesLeft; + UCHAR PINUnblockRetriesLeft; +} __attribute__ ((packed)) QMIDMS_UIM_VERIFY_PIN_RESP_MSG, *PQMIDMS_UIM_VERIFY_PIN_RESP_MSG; + +#if 0 +typedef struct _QMIDMS_UIM_SET_PIN_PROTECTION_REQ_MSG +{ + USHORT Type; // QMUX type 0x0024 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + UCHAR PINID; + UCHAR ProtectionSetting; + UCHAR PINLen; + UCHAR PINValue; +} QMIDMS_UIM_SET_PIN_PROTECTION_REQ_MSG, *PQMIDMS_UIM_SET_PIN_PROTECTION_REQ_MSG; + +typedef struct _QMIDMS_UIM_SET_PIN_PROTECTION_RESP_MSG +{ + USHORT Type; // QMUX type 0x0024 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_FAULT + UCHAR TLV2Type; + USHORT TLV2Length; + UCHAR PINVerifyRetriesLeft; + UCHAR PINUnblockRetriesLeft; +} QMIDMS_UIM_SET_PIN_PROTECTION_RESP_MSG, *PQMIDMS_UIM_SET_PIN_PROTECTION_RESP_MSG; + +typedef struct _QMIDMS_UIM_SET_CK_PROTECTION_REQ_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + UCHAR Facility; + UCHAR FacilityState; + UCHAR FacliltyLen; + UCHAR FacliltyValue; +} QMIDMS_UIM_SET_CK_PROTECTION_REQ_MSG, *PQMIDMS_UIM_SET_CK_PROTECTION_REQ_MSG; + +typedef struct _QMIDMS_UIM_SET_CK_PROTECTION_RESP_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + USHORT QMUXResult; + USHORT QMUXError; + UCHAR TLV2Type; + USHORT TLV2Length; + UCHAR FacilityRetriesLeft; +} QMIDMS_UIM_SET_CK_PROTECTION_RESP_MSG, *PQMIDMS_UIM_SET_CK_PROTECTION_RESP_MSG; + + +typedef struct _UIM_PIN +{ + UCHAR PinLength; + UCHAR PinValue; +} UIM_PIN, *PUIM_PIN; + +typedef struct _QMIDMS_UIM_CHANGE_PIN_REQ_MSG +{ + USHORT Type; // QMUX type 0x0024 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + UCHAR PINID; + UCHAR PinDetails; +} QMIDMS_UIM_CHANGE_PIN_REQ_MSG, *PQMIDMS_UIM_CHANGE_PIN_REQ_MSG; + +typedef struct QMIDMS_UIM_CHANGE_PIN_RESP_MSG +{ + USHORT Type; // QMUX type 0x0024 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_FAULT + UCHAR TLV2Type; + USHORT TLV2Length; + UCHAR PINVerifyRetriesLeft; + UCHAR PINUnblockRetriesLeft; +} QMIDMS_UIM_CHANGE_PIN_RESP_MSG, *PQMIDMS_UIM_CHANGE_PIN_RESP_MSG; + +typedef struct _UIM_PUK +{ + UCHAR PukLength; + UCHAR PukValue; +} UIM_PUK, *PUIM_PUK; + +typedef struct _QMIDMS_UIM_UNBLOCK_PIN_REQ_MSG +{ + USHORT Type; // QMUX type 0x0024 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + UCHAR PINID; + UCHAR PinDetails; +} QMIDMS_UIM_UNBLOCK_PIN_REQ_MSG, *PQMIDMS_UIM_BLOCK_PIN_REQ_MSG; + +typedef struct QMIDMS_UIM_UNBLOCK_PIN_RESP_MSG +{ + USHORT Type; // QMUX type 0x0024 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_FAULT + UCHAR TLV2Type; + USHORT TLV2Length; + UCHAR PINVerifyRetriesLeft; + UCHAR PINUnblockRetriesLeft; +} QMIDMS_UIM_UNBLOCK_PIN_RESP_MSG, *PQMIDMS_UIM_UNBLOCK_PIN_RESP_MSG; + +typedef struct _QMIDMS_UIM_UNBLOCK_CK_REQ_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + UCHAR Facility; + UCHAR FacliltyUnblockLen; + UCHAR FacliltyUnblockValue; +} QMIDMS_UIM_UNBLOCK_CK_REQ_MSG, *PQMIDMS_UIM_BLOCK_CK_REQ_MSG; + +typedef struct QMIDMS_UIM_UNBLOCK_CK_RESP_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + USHORT QMUXResult; + USHORT QMUXError; + UCHAR TLV2Type; + USHORT TLV2Length; + UCHAR FacilityUnblockRetriesLeft; +} QMIDMS_UIM_UNBLOCK_CK_RESP_MSG, *PQMIDMS_UIM_UNBLOCK_CK_RESP_MSG; + +typedef struct _QMIDMS_SET_EVENT_REPORT_REQ_MSG +{ + USHORT Type; + USHORT Length; +} QMIDMS_SET_EVENT_REPORT_REQ_MSG, *PQMIDMS_SET_EVENT_REPORT_REQ_MSG; + +typedef struct _QMIDMS_SET_EVENT_REPORT_RESP_MSG +{ + USHORT Type; // QMUX type 0x0003 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG +} QMIDMS_SET_EVENT_REPORT_RESP_MSG, *PQMIDMS_SET_EVENT_REPORT_RESP_MSG; + +typedef struct _PIN_STATUS +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR ReportPinState; +} PIN_STATUS, *PPIN_STATUS; + +typedef struct _POWER_STATUS +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR PowerStatus; + UCHAR BatteryLvl; +} POWER_STATUS, *PPOWER_STATUS; + +typedef struct _ACTIVATION_STATE +{ + UCHAR TLVType; + USHORT TLVLength; + USHORT ActivationState; +} ACTIVATION_STATE, *PACTIVATION_STATE; + +typedef struct _ACTIVATION_STATE_REQ +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR ActivationState; +} ACTIVATION_STATE_REQ, *PACTIVATION_STATE_REQ; + +typedef struct _OPERATING_MODE +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR OperatingMode; +} OPERATING_MODE, *POPERATING_MODE; + +typedef struct _UIM_STATE +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR UIMState; +} UIM_STATE, *PUIM_STATE; + +typedef struct _WIRELESS_DISABLE_STATE +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR WirelessDisableState; +} WIRELESS_DISABLE_STATE, *PWIRELESS_DISABLE_STATE; + +typedef struct _QMIDMS_EVENT_REPORT_IND_MSG +{ + USHORT Type; + USHORT Length; +} QMIDMS_EVENT_REPORT_IND_MSG, *PQMIDMS_EVENT_REPORT_IND_MSG; +#endif + +// ============================ END OF DMS =============================== + +// ======================= QOS ============================== +typedef struct _MPIOC_DEV_INFO MPIOC_DEV_INFO, *PMPIOC_DEV_INFO; + +#define QMI_QOS_SET_EVENT_REPORT_REQ 0x0001 +#define QMI_QOS_SET_EVENT_REPORT_RESP 0x0001 +#define QMI_QOS_SET_EVENT_REPORT_IND 0x0001 +#define QMI_QOS_BIND_DATA_PORT_REQ 0x002B +#define QMI_QOS_BIND_DATA_PORT_RESP 0x002B +#define QMI_QOS_INDICATION_REGISTER_REQ 0x002F +#define QMI_QOS_INDICATION_REGISTER_RESP 0x002F +#define QMI_QOS_GLOBAL_QOS_FLOW_IND 0x0031 +#define QMI_QOS_GET_QOS_INFO_REQ 0x0033 +#define QMI_QOS_GET_QOS_INFO_RESP 0x0033 + + +#if 1 +typedef struct _QMI_QOS_SET_EVENT_REPORT_REQ_MSG +{ + USHORT Type; // QMUX type 0x0001 + USHORT Length; + // UCHAR TLVType; // 0x01 - physical link state + // USHORT TLVLength; // 1 + // UCHAR PhyLinkStatusRpt; // 0-enable; 1-disable + UCHAR TLVType2; // 0x02 = global flow reporting + USHORT TLVLength2; // 1 + UCHAR GlobalFlowRpt; // 1-enable; 0-disable +} QMI_QOS_SET_EVENT_REPORT_REQ_MSG, *PQMI_QOS_SET_EVENT_REPORT_REQ_MSG; + +typedef struct _QMI_QOS_SET_EVENT_REPORT_RESP_MSG +{ + USHORT Type; // QMUX type 0x0010 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMUX_RESULT_SUCCESS + // QMUX_RESULT_FAILURE + USHORT QMUXError; // QMUX_ERR_INVALID_ARG + // QMUX_ERR_NO_MEMORY + // QMUX_ERR_INTERNAL + // QMUX_ERR_FAULT +} QMI_QOS_SET_EVENT_REPORT_RESP_MSG, *PQMI_QOS_SET_EVENT_REPORT_RESP_MSG; + +typedef struct _QMI_QOS_SET_EVENT_REPORT_IND_MSG +{ + USHORT Type; // QMUX type 0x0001 + USHORT Length; + UCHAR TLVs; +} QMI_QOS_SET_EVENT_REPORT_IND_MSG, *PQMI_QOS_SET_EVENT_REPORT_IND_MSG; + + +typedef struct _QMI_QOS_BIND_DATA_PORT_TLV_EP_ID +{ + UCHAR TLVType; //0x10 + USHORT TLVLength; + ULONG ep_type; + ULONG iface_id; +} __attribute__ ((packed)) QMI_QOS_BIND_DATA_PORT_TLV_EP_ID, *PQMI_QOS_BIND_DATA_PORT_TLV_EP_ID; + +typedef struct _QMI_QOS_BIND_DATA_PORT_TLV_MUX_ID +{ + UCHAR TLVType; //0x11 + USHORT TLVLength; + UCHAR mux_id; +} __attribute__ ((packed)) QMI_QOS_BIND_DATA_PORT_TLV_MUX_ID, *PQMI_QOS_BIND_DATA_PORT_TLV_MUX_ID; + +typedef struct _QMI_QOS_BIND_DATA_PORT_TLV_DATA_PORT +{ + UCHAR TLVType; //0x12 + USHORT TLVLength; + USHORT data_port; +} __attribute__ ((packed)) QMI_QOS_BIND_DATA_PORT_TLV_DATA_PORT, *PQMI_QOS_BIND_DATA_PORT_TLV_DATA_PORT; + +typedef struct _QMI_QOS_BIND_DATA_PORT_REQ_MSG +{ + USHORT Type; + USHORT Length; + QMI_QOS_BIND_DATA_PORT_TLV_EP_ID EpIdTlv; + QMI_QOS_BIND_DATA_PORT_TLV_MUX_ID MuxIdTlv; + //QMI_QOS_BIND_DATA_PORT_TLV_DATA_PORT DataPortTlv; +} __attribute__ ((packed)) QMI_QOS_BIND_DATA_PORT_REQ_MSG, *PQMI_QOS_BIND_DATA_PORT_REQ_MSG; + +typedef struct _QMI_QOS_BIND_DATA_PORT_RESP_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; //0x02 + USHORT TLVLength; + USHORT QMUXResult; + USHORT QMUXError; +} __attribute__ ((packed)) QMI_QOS_BIND_DATA_PORT_RESP_MSG, *PQMI_QOS_BIND_DATA_PORT_RESP_MSG; + +typedef struct _QMI_QOS_INDICATION_REGISTER_TLV_REPORT_GLOBAL_QOS_FLOW +{ + UCHAR TLVType; //0x10 + USHORT TLVLength; + UCHAR report_global_qos_flows; +} __attribute__ ((packed)) QMI_QOS_INDICATION_REGISTER_TLV_REPORT_GLOBAL_QOS_FLOW, *PQMI_QOS_INDICATION_REGISTER_TLV_REPORT_GLOBAL_QOS_FLOW; + +typedef struct _QMI_QOS_INDICATION_REGISTER_TLV_SUPPRESS_REPORT_FLOW_CTL +{ + UCHAR TLVType; //0x11 + USHORT TLVLength; + UCHAR suppress_report_flow_control; +} __attribute__ ((packed)) QMI_QOS_INDICATION_REGISTER_TLV_SUPPRESS_REPORT_FLOW_CTL, *PQMI_QOS_INDICATION_REGISTER_TLV_SUPPRESS_REPORT_FLOW_CTL; + +typedef struct _QMI_QOS_INDICATION_REGISTER_TLV_SUPPRESS_NW_STATUS_IND +{ + UCHAR TLVType; //0x12 + USHORT TLVLength; + UCHAR suppress_network_status_ind; +} __attribute__ ((packed)) QMI_QOS_INDICATION_REGISTER_TLV_SUPPRESS_NW_STATUS_IND, *PQMI_QOS_INDICATION_REGISTER_TLV_SUPPRESS_NW_STATUS_IND; + +typedef struct _QMI_QOS_INDICATION_REGISTER_REQ_MSG +{ + USHORT Type; + USHORT Length; + QMI_QOS_INDICATION_REGISTER_TLV_REPORT_GLOBAL_QOS_FLOW ReportGlobalQosFlowTlv; + //QMI_QOS_INDICATION_REGISTER_TLV_SUPPRESS_REPORT_FLOW_CTL SuppressReportFlowCtlTlv; + //QMI_QOS_INDICATION_REGISTER_TLV_SUPPRESS_NW_STATUS_IND SuppressNWStatusIndTlv; +} __attribute__ ((packed)) QMI_QOS_INDICATION_REGISTER_REQ_MSG, *PQMI_QOS_INDICATION_REGISTER_REQ_MSG; + +typedef struct _QMI_QOS_INDICATION_REGISTER_RESP_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; //0x02 + USHORT TLVLength; + USHORT QMUXResult; + USHORT QMUXError; +} __attribute__ ((packed)) QMI_QOS_INDICATION_REGISTER_RESP_MSG, *PQMI_QOS_INDICATION_REGISTER_RESP_MSG; + +typedef struct _QMI_QOS_GLOBAL_QOS_FLOW_TLV_FLOW_STATE +{ + UCHAR TLVType; //0x01 + USHORT TLVLength; + ULONG qos_id; + UCHAR new_flow; + ULONG state_change; +} __attribute__ ((packed)) QMI_QOS_GLOBAL_QOS_FLOW_TLV_FLOW_STATE, *PQMI_QOS_GLOBAL_QOS_FLOW_TLV_FLOW_STATE; + +typedef struct _QMI_QOS_GLOBAL_QOS_FLOW_TLV_FLOW_GRANTED +{ + UCHAR TLVType; //0x10 0x11 + USHORT TLVLength; + ULONG64 flow_valid_params; + ULONG ip_flow_trf_cls; + ULONG64 data_rate_max; + ULONG64 guaranteed_rate; + ULONG peak_rate; + ULONG token_rate; + ULONG bucket_size; + ULONG ip_flow_latency; + ULONG ip_flow_jitter; + USHORT ip_flow_pkt_error_rate_multiplier; + USHORT ip_flow_pkt_error_rate_exponent; + ULONG ip_flow_min_policed_packet_size; + ULONG ip_flow_max_allowed_packet_size; + ULONG ip_flow_3gpp_residual_bit_error_rate; + ULONG ip_flow_3gpp_traffic_handling_priority; + USHORT ip_flow_3gpp2_profile_id; + UCHAR ip_flow_3gpp2_flow_priority; + UCHAR ip_flow_3gpp_im_cn_flag; + UCHAR ip_flow_3gpp_sig_ind; + ULONG ip_flow_lte_qci; +} __attribute__ ((packed)) QMI_QOS_GLOBAL_QOS_FLOW_TLV_FLOW_GRANTED, *PQMI_QOS_GLOBAL_QOS_FLOW_TLV_FLOW_GRANTED; + +typedef struct _QMI_QOS_GLOBAL_QOS_FLOW_TLV_FILTER +{ + UCHAR TLVType; //0x12 0x13 + USHORT TLVLength; + UCHAR tx_rx_qos_filter_len; + UCHAR ip_version; + ULONG64 valid_params0; + ULONG ipv4_addr0; + ULONG subnet_mask0; + ULONG ipv4_addr1; + ULONG subnet_mask1; + UCHAR val4; + UCHAR mask4; + ULONG64 valid_params01; + UCHAR ipv6_address00; + UCHAR ipv6_address01; + UCHAR ipv6_address02; + UCHAR ipv6_address03; + UCHAR ipv6_address04; + UCHAR ipv6_address05; + UCHAR ipv6_address06; + UCHAR ipv6_address07; + UCHAR ipv6_address08; + UCHAR ipv6_address09; + UCHAR ipv6_address010; + UCHAR ipv6_address011; + UCHAR ipv6_address012; + UCHAR ipv6_address013; + UCHAR ipv6_address014; + ULONG ipv6_address015; + UCHAR prefix_len0; + UCHAR ipv6_address10; + UCHAR ipv6_address11; + UCHAR ipv6_address12; + UCHAR ipv6_address13; + UCHAR ipv6_address14; + UCHAR ipv6_address15; + UCHAR ipv6_address16; + UCHAR ipv6_address17; + UCHAR ipv6_address18; + UCHAR ipv6_address19; + UCHAR ipv6_address110; + UCHAR ipv6_address111; + UCHAR ipv6_address112; + UCHAR ipv6_address113; + UCHAR ipv6_address114; + ULONG ipv6_address115; + UCHAR prefix_len1; + UCHAR val6; + UCHAR mask6; + ULONG flow_label; + ULONG xport_protocol; + ULONG64 valid_params2; + USHORT port0; + USHORT range0; + USHORT port1; + USHORT range1; + ULONG64 valid_params3; + USHORT port2; + USHORT range2; + USHORT port3; + USHORT range3; + ULONG64 valid_params4; + UCHAR type; + UCHAR code; + ULONG64 valid_params5; + ULONG spi0; + ULONG64 valid_params6; + ULONG spi1; + USHORT filter_id; + USHORT filter_precedence; +} __attribute__ ((packed)) QMI_QOS_GLOBAL_QOS_FLOW_TLV_FILTER, *PQMI_QOS_GLOBAL_QOS_FLOW_TLV_FILTER; + +typedef struct _QMI_QOS_GLOBAL_QOS_FLOW_TLV_FLOW_TYPE +{ + UCHAR TLVType; //0x14 + USHORT TLVLength; + ULONG flow_type; +} __attribute__ ((packed)) QMI_QOS_GLOBAL_QOS_FLOW_TLV_FLOW_TYPE, *PQMI_QOS_GLOBAL_QOS_FLOW_TLV_FLOW_TYPE; + +typedef struct _QMI_QOS_GLOBAL_QOS_FLOW_TLV_BEARER_ID +{ + UCHAR TLVType; //0x15 + USHORT TLVLength; + UCHAR bearer_id; +} __attribute__ ((packed)) QMI_QOS_GLOBAL_QOS_FLOW_TLV_BEARER_ID, *PQMI_QOS_GLOBAL_QOS_FLOW_TLV_BEARER_ID; + +typedef struct _QMI_QOS_GLOBAL_QOS_FLOW_TLV_FLOW_CTL_SEQ_NUM +{ + UCHAR TLVType; //0x16 + USHORT TLVLength; + USHORT fc_seq_num; +} __attribute__ ((packed)) QMI_QOS_GLOBAL_QOS_FLOW_TLV_FLOW_CTL_SEQ_NUM, *PQMI_QOS_GLOBAL_QOS_FLOW_TLV_FLOW_CTL_SEQ_NUM; + +typedef struct _QMI_QOS_GLOBAL_QOS_FLOW_TLV_5G_QCI +{ + UCHAR TLVType; //0x17 0x18 + USHORT TLVLength; + ULONG tx_rx_5g_qci; +} __attribute__ ((packed)) QMI_QOS_GLOBAL_QOS_FLOW_TLV_5G_QCI, *PQMI_QOS_GLOBAL_QOS_FLOW_TLV_5G_QCI; + +typedef struct _QMI_QOS_GLOBAL_QOS_FLOW_TLV_AVG_WINDOW +{ + UCHAR TLVType; //0x19 0x1A + USHORT TLVLength; + USHORT tx_rx_avg_window; +} __attribute__ ((packed)) QMI_QOS_GLOBAL_QOS_FLOW_TLV_AVG_WINDOW, *PQMI_QOS_GLOBAL_QOS_FLOW_TLV_AVG_WINDOW; + +typedef struct _QMI_QOS_GLOBAL_QOS_FLOW_TLV_TX_FILTER_MATCH_ALL +{ + UCHAR TLVType; //0x1B + USHORT TLVLength; + UCHAR tx_filter_match_all_len; + USHORT filter_id; +} __attribute__ ((packed)) QMI_QOS_GLOBAL_QOS_FLOW_TLV_TX_FILTER_MATCH_ALL, *PQMI_QOS_GLOBAL_QOS_FLOW_TLV_TX_FILTER_MATCH_ALL; + +typedef struct _QMI_QOS_GLOBAL_QOS_FLOW_IND_MSG +{ + USHORT Type; + USHORT Length; + QMI_QOS_GLOBAL_QOS_FLOW_TLV_FLOW_STATE FlowStateTlv; + //QMI_QOS_GLOBAL_QOS_FLOW_TLV_FLOW_GRANTED TxFlowGrantedTlv; + //QMI_QOS_GLOBAL_QOS_FLOW_TLV_FLOW_GRANTED RxFlowGrantedTlv; + //QMI_QOS_GLOBAL_QOS_FLOW_TLV_FILTER TxFilterTlv; + //QMI_QOS_GLOBAL_QOS_FLOW_TLV_FILTER RxFilterTlv; + //QMI_QOS_GLOBAL_QOS_FLOW_TLV_FLOW_TYPE FlowTypeTlv; + //QMI_QOS_GLOBAL_QOS_FLOW_TLV_BEARER_ID BearerIdTlv; + //QMI_QOS_GLOBAL_QOS_FLOW_TLV_FLOW_CTL_SEQ_NUM FlowCtlSeqNumTlv; + //QMI_QOS_GLOBAL_QOS_FLOW_TLV_5G_QCI Tx5GQciTlv; + //QMI_QOS_GLOBAL_QOS_FLOW_TLV_5G_QCI Rx5GQciTlv; + //QMI_QOS_GLOBAL_QOS_FLOW_TLV_AVG_WINDOW AvgWindowTlv; + //QMI_QOS_GLOBAL_QOS_FLOW_TLV_TX_FILTER_MATCH_ALL TxFilterMatchAllTlv; +} __attribute__ ((packed)) QMI_QOS_GLOBAL_QOS_FLOW_IND_MSG, *PQMI_QOS_GLOBAL_QOS_FLOW_IND_MSG; + +typedef struct _QMI_QOS_GET_QOS_INFO_TLV_QOS_ID +{ + UCHAR TLVType; //0x01 + USHORT TLVLength; + ULONG qos_id; +} __attribute__ ((packed)) QMI_QOS_GET_QOS_INFO_TLV_QOS_ID, *PQMI_QOS_GET_QOS_INFO_TLV_QOS_ID; + +typedef struct _QMI_QOS_GET_QOS_INFO_TLV_FLOW_STATUS +{ + UCHAR TLVType; //0x10 + USHORT TLVLength; + UCHAR flow_status; +} __attribute__ ((packed)) QMI_QOS_GET_QOS_INFO_TLV_FLOW_STATUS, *PQMI_QOS_GET_QOS_INFO_TLV_FLOW_STATUS; + +typedef struct _QMI_QOS_GET_QOS_INFO_TLV_GRANTED_FLOW +{ + UCHAR TLVType; //0x11 0x12 + USHORT TLVLength; + ULONG64 flow_valid_params; + ULONG ip_flow_trf_cls; + ULONG64 data_rate_max; + ULONG64 guaranteed_rate; + ULONG peak_rate; + ULONG token_rate; + ULONG bucket_size; + ULONG ip_flow_latency; + ULONG ip_flow_jitter; + USHORT ip_flow_pkt_error_rate_multiplier; + USHORT ip_flow_pkt_error_rate_exponent; + ULONG ip_flow_min_policed_packet_size; + ULONG ip_flow_max_allowed_packet_size; + ULONG ip_flow_3gpp_residual_bit_error_rate; + ULONG ip_flow_3gpp_traffic_handling_priority; + USHORT ip_flow_3gpp2_profile_id; + UCHAR ip_flow_3gpp2_flow_priority; + UCHAR ip_flow_3gpp_im_cn_flag; + UCHAR ip_flow_3gpp_sig_ind; + ULONG ip_flow_lte_qci; +} __attribute__ ((packed)) QMI_QOS_GET_QOS_INFO_TLV_GRANTED_FLOW, *PQMI_QOS_GET_QOS_INFO_TLV_GRANTED_FLOW; + +typedef struct _QMI_QOS_GET_QOS_INFO_TLV_FILTER_SPECS +{ + UCHAR TLVType; //0x13 0x14 + USHORT TLVLength; + UCHAR tx_rx_qos_filter_len; + UCHAR ip_version; + ULONG64 valid_params0; + ULONG ipv4_addr0; + ULONG subnet_mask0; + ULONG ipv4_addr1; + ULONG subnet_mask1; + UCHAR val4; + UCHAR mask4; + ULONG64 valid_params01; + UCHAR ipv6_address00; + UCHAR ipv6_address01; + UCHAR ipv6_address02; + UCHAR ipv6_address03; + UCHAR ipv6_address04; + UCHAR ipv6_address05; + UCHAR ipv6_address06; + UCHAR ipv6_address07; + UCHAR ipv6_address08; + UCHAR ipv6_address09; + UCHAR ipv6_address010; + UCHAR ipv6_address011; + UCHAR ipv6_address012; + UCHAR ipv6_address013; + UCHAR ipv6_address014; + ULONG ipv6_address015; + UCHAR prefix_len0; + UCHAR ipv6_address10; + UCHAR ipv6_address11; + UCHAR ipv6_address12; + UCHAR ipv6_address13; + UCHAR ipv6_address14; + UCHAR ipv6_address15; + UCHAR ipv6_address16; + UCHAR ipv6_address17; + UCHAR ipv6_address18; + UCHAR ipv6_address19; + UCHAR ipv6_address110; + UCHAR ipv6_address111; + UCHAR ipv6_address112; + UCHAR ipv6_address113; + UCHAR ipv6_address114; + ULONG ipv6_address115; + UCHAR prefix_len1; + UCHAR val6; + UCHAR mask6; + ULONG flow_label; + ULONG xport_protocol; + ULONG64 valid_params2; + USHORT port0; + USHORT range0; + USHORT port1; + USHORT range1; + ULONG64 valid_params3; + USHORT port2; + USHORT range2; + USHORT port3; + USHORT range3; + ULONG64 valid_params4; + UCHAR type; + UCHAR code; + ULONG64 valid_params5; + ULONG spi0; + ULONG64 valid_params6; + ULONG spi1; + USHORT filter_id; + USHORT filter_precedence; +} __attribute__ ((packed)) QMI_QOS_GET_QOS_INFO_TLV_FILTER_SPECS, *PQMI_QOS_GET_QOS_INFO_TLV_FILTER_SPECS; + +typedef struct _QMI_QOS_GET_QOS_INFO_TLV_EXT_ERROR_INFO +{ + UCHAR TLVType; //0x15 + USHORT TLVLength; + USHORT ext_error_info; +} __attribute__ ((packed)) QMI_QOS_GET_QOS_INFO_TLV_EXT_ERROR_INFO, *PQMI_QOS_GET_QOS_INFO_TLV_EXT_ERROR_INFO; + +typedef struct _QMI_QOS_GET_QOS_INFO_TLV_5G_QCI +{ + UCHAR TLVType; //0x16 0x17 + USHORT TLVLength; + ULONG tx_rx_5g_qci; +} __attribute__ ((packed)) QMI_QOS_GET_QOS_INFO_TLV_5G_QCI, *PQMI_QOS_GET_QOS_INFO_TLV_5G_QCI; + +typedef struct _QMI_QOS_GET_QOS_INFO_TLV_AVG_WINDOW +{ + UCHAR TLVType; //0x18 0x19 + USHORT TLVLength; + USHORT tx_rx_averaging_window; +} __attribute__ ((packed)) QMI_QOS_GET_QOS_INFO_TLV_AVG_WINDOW, *PQMI_QOS_GET_QOS_INFO_TLV_AVG_WINDOW; + +typedef struct _QMI_QOS_GET_QOS_INFO_TLV_TX_FILTER_MATCH_ALL +{ + UCHAR TLVType; //0x1A + USHORT TLVLength; + UCHAR tx_filter_match_all_len; + USHORT filter_id; +} __attribute__ ((packed)) QMI_QOS_GET_QOS_INFO_TLV_TX_FILTER_MATCH_ALL, *PQMI_QOS_GET_QOS_INFO_TLV_TX_FILTER_MATCH_ALL; + +typedef struct _QMI_QOS_GET_QOS_INFO_REQ_MSG +{ + USHORT Type; + USHORT Length; + QMI_QOS_GET_QOS_INFO_TLV_QOS_ID QosIdTlv; +} __attribute__ ((packed)) QMI_QOS_GET_QOS_INFO_REQ_MSG, *PQMI_QOS_GET_QOS_INFO_REQ_MSG; + +typedef struct _QMI_QOS_GET_QOS_INFO_RESP_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; //0x02 + USHORT TLVLength; + USHORT QMUXResult; + USHORT QMUXError; + //QMI_QOS_GET_QOS_INFO_TLV_FLOW_STATUS FlowStatusTlv; + //QMI_QOS_GET_QOS_INFO_TLV_GRANTED_FLOW TxGrantedFlowTlv; + //QMI_QOS_GET_QOS_INFO_TLV_GRANTED_FLOW RxGrantedFlowTlv; + //QMI_QOS_GET_QOS_INFO_TLV_FILTER_SPECS TxFilterSpecsTlv; + //QMI_QOS_GET_QOS_INFO_TLV_FILTER_SPECS RxFilterSpecsTlv; + //QMI_QOS_GET_QOS_INFO_TLV_EXT_ERROR_INFO ExtErrorInfoTlv; + //QMI_QOS_GET_QOS_INFO_TLV_5G_QCI Tx5GQciTlv; + //QMI_QOS_GET_QOS_INFO_TLV_5G_QCI Rx5GQciTlv; + //QMI_QOS_GET_QOS_INFO_TLV_AVG_WINDOW TxAvgWindowTlv; + //QMI_QOS_GET_QOS_INFO_TLV_AVG_WINDOW RxAvgWindowTlv; + //QMI_QOS_GET_QOS_INFO_TLV_TX_FILTER_MATCH_ALL TxFilterMatchAllTlv; +} __attribute__ ((packed)) QMI_QOS_GET_QOS_INFO_RESP_MSG, *PQMI_QOS_GET_QOS_INFO_RESP_MSG; + +#define QOS_IND_FLOW_STATE_ACTIVATED 0x00 +#define QOS_IND_FLOW_STATE_MODIFIED 0x01 +#define QOS_IND_FLOW_STATE_DELETED 0x02 +#define QOS_IND_FLOW_STATE_SUSPENDED 0x03 +#define QOS_IND_FLOW_STATE_ENABLED 0x04 +#define QOS_IND_FLOW_STATE_DISABLED 0x05 +#define QOS_IND_FLOW_STATE_INVALID 0x06 + +#define QOS_EVENT_RPT_IND_FLOW_ACTIVATED 0x01 +#define QOS_EVENT_RPT_IND_FLOW_MODIFIED 0x02 +#define QOS_EVENT_RPT_IND_FLOW_DELETED 0x03 +#define QOS_EVENT_RPT_IND_FLOW_SUSPENDED 0x04 +#define QOS_EVENT_RPT_IND_FLOW_ENABLED 0x05 +#define QOS_EVENT_RPT_IND_FLOW_DISABLED 0x06 + +#define QOS_EVENT_RPT_IND_TLV_PHY_LINK_STATE_TYPE 0x01 +#define QOS_EVENT_RPT_IND_TLV_GLOBAL_FL_RPT_STATE 0x10 +#define QOS_EVENT_RPT_IND_TLV_GLOBAL_FL_RPT_TYPE 0x10 +#define QOS_EVENT_RPT_IND_TLV_TX_FLOW_TYPE 0x11 +#define QOS_EVENT_RPT_IND_TLV_RX_FLOW_TYPE 0x12 +#define QOS_EVENT_RPT_IND_TLV_TX_FILTER_TYPE 0x13 +#define QOS_EVENT_RPT_IND_TLV_RX_FILTER_TYPE 0x14 +#define QOS_EVENT_RPT_IND_TLV_FLOW_SPEC 0x10 +#define QOS_EVENT_RPT_IND_TLV_FILTER_SPEC 0x10 + +typedef struct _QOS_EVENT_RPT_IND_TLV_PHY_LINK_STATE +{ + UCHAR TLVType; // 0x01 + USHORT TLVLength; // 1 + UCHAR PhyLinkState; // 0-dormant, 1-active +} QOS_EVENT_RPT_IND_TLV_PHY_LINK_STATE, *PQOS_EVENT_RPT_IND_TLV_PHY_LINK_STATE; + +typedef struct _QOS_EVENT_RPT_IND_TLV_GLOBAL_FL_RPT +{ + UCHAR TLVType; // 0x10 + USHORT TLVLength; // 6 + ULONG QosId; + UCHAR NewFlow; // 1: newly added flow; 0: existing flow + UCHAR StateChange; // 1: activated; 2: modified; 3: deleted; + // 4: suspended(delete); 5: enabled; 6: disabled +} QOS_EVENT_RPT_IND_TLV_GLOBAL_FL_RPT, *PQOS_EVENT_RPT_IND_TLV_GLOBAL_FL_RPT; + +// QOS Flow + +typedef struct _QOS_EVENT_RPT_IND_TLV_FLOW +{ + UCHAR TLVType; // 0x10-TX flow; 0x11-RX flow + USHORT TLVLength; // var + // embedded TLV's +} QOS_EVENT_RPT_IND_TLV_TX_FLOW, *PQOS_EVENT_RPT_IND_TLV_TX_FLOW; + +#define QOS_FLOW_TLV_IP_FLOW_IDX_TYPE 0x10 +#define QOS_FLOW_TLV_IP_FLOW_TRAFFIC_CLASS_TYPE 0x11 +#define QOS_FLOW_TLV_IP_FLOW_DATA_RATE_MIN_MAX_TYPE 0x12 +#define QOS_FLOW_TLV_IP_FLOW_DATA_RATE_TOKEN_BUCKET_TYPE 0x13 +#define QOS_FLOW_TLV_IP_FLOW_LATENCY_TYPE 0x14 +#define QOS_FLOW_TLV_IP_FLOW_JITTER_TYPE 0x15 +#define QOS_FLOW_TLV_IP_FLOW_PKT_ERR_RATE_TYPE 0x16 +#define QOS_FLOW_TLV_IP_FLOW_MIN_PKT_SIZE_TYPE 0x17 +#define QOS_FLOW_TLV_IP_FLOW_MAX_PKT_SIZE_TYPE 0x18 +#define QOS_FLOW_TLV_IP_FLOW_3GPP_BIT_ERR_RATE_TYPE 0x19 +#define QOS_FLOW_TLV_IP_FLOW_3GPP_TRAF_PRIORITY_TYPE 0x1A +#define QOS_FLOW_TLV_IP_FLOW_3GPP2_PROFILE_ID_TYPE 0x1B + +typedef struct _QOS_FLOW_TLV_IP_FLOW_IDX +{ + UCHAR TLVType; // 0x10 + USHORT TLVLength; // 1 + UCHAR IpFlowIndex; +} QOS_FLOW_TLV_IP_FLOW_IDX, *PQOS_FLOW_TLV_IP_FLOW_IDX; + +typedef struct _QOS_FLOW_TLV_IP_FLOW_TRAFFIC_CLASS +{ + UCHAR TLVType; // 0x11 + USHORT TLVLength; // 1 + UCHAR TrafficClass; +} QOS_FLOW_TLV_IP_FLOW_TRAFFIC_CLASS, *PQOS_FLOW_TLV_IP_FLOW_TRAFFIC_CLASS; + +typedef struct _QOS_FLOW_TLV_IP_FLOW_DATA_RATE_MIN_MAX +{ + UCHAR TLVType; // 0x12 + USHORT TLVLength; // 8 + ULONG DataRateMax; + ULONG GuaranteedRate; +} QOS_FLOW_TLV_IP_FLOW_DATA_RATE_MIN_MAX, *PQOS_FLOW_TLV_IP_FLOW_DATA_RATE_MIN_MAX; + +typedef struct _QOS_FLOW_TLV_IP_FLOW_DATA_RATE_TOKEN_BUCKET +{ + UCHAR TLVType; // 0x13 + USHORT TLVLength; // 12 + ULONG PeakRate; + ULONG TokenRate; + ULONG BucketSize; +} QOS_FLOW_TLV_IP_FLOW_DATA_RATE_TOKEN_BUCKET, *PQOS_FLOW_TLV_IP_FLOW_DATA_RATE_TOKEN_BUCKET; + +typedef struct _QOS_FLOW_TLV_IP_FLOW_LATENCY +{ + UCHAR TLVType; // 0x14 + USHORT TLVLength; // 4 + ULONG IpFlowLatency; +} QOS_FLOW_TLV_IP_FLOW_LATENCY, *PQOS_FLOW_TLV_IP_FLOW_LATENCY; + +typedef struct _QOS_FLOW_TLV_IP_FLOW_JITTER +{ + UCHAR TLVType; // 0x15 + USHORT TLVLength; // 4 + ULONG IpFlowJitter; +} QOS_FLOW_TLV_IP_FLOW_JITTER, *PQOS_FLOW_TLV_IP_FLOW_JITTER; + +typedef struct _QOS_FLOW_TLV_IP_FLOW_PKT_ERR_RATE +{ + UCHAR TLVType; // 0x16 + USHORT TLVLength; // 4 + USHORT ErrRateMultiplier; + USHORT ErrRateExponent; +} QOS_FLOW_TLV_IP_FLOW_PKT_ERR_RATE, *PQOS_FLOW_TLV_IP_FLOW_PKT_ERR_RATE; + +typedef struct _QOS_FLOW_TLV_IP_FLOW_MIN_PKT_SIZE +{ + UCHAR TLVType; // 0x17 + USHORT TLVLength; // 4 + ULONG MinPolicedPktSize; +} QOS_FLOW_TLV_IP_FLOW_MIN_PKT_SIZE, *PQOS_FLOW_TLV_IP_FLOW_MIN_PKT_SIZE; + +typedef struct _QOS_FLOW_TLV_IP_FLOW_MAX_PKT_SIZE +{ + UCHAR TLVType; // 0x18 + USHORT TLVLength; // 4 + ULONG MaxAllowedPktSize; +} QOS_FLOW_TLV_IP_FLOW_MAX_PKT_SIZE, *PQOS_FLOW_TLV_IP_FLOW_MAX_PKT_SIZE; + +typedef struct _QOS_FLOW_TLV_IP_FLOW_3GPP_BIT_ERR_RATE +{ + UCHAR TLVType; // 0x19 + USHORT TLVLength; // 1 + UCHAR ResidualBitErrorRate; +} QOS_FLOW_TLV_IP_FLOW_3GPP_BIT_ERR_RATE, *PQOS_FLOW_TLV_IP_FLOW_3GPP_BIT_ERR_RATE; + +typedef struct _QOS_FLOW_TLV_IP_FLOW_3GPP_TRAF_PRIORITY +{ + UCHAR TLVType; // 0x1A + USHORT TLVLength; // 1 + UCHAR TrafficHandlingPriority; +} QOS_FLOW_TLV_IP_FLOW_3GPP_TRAF_PRIORITY, *PQOS_FLOW_TLV_IP_FLOW_3GPP_TRAF_PRIORITY; + +typedef struct _QOS_FLOW_TLV_IP_FLOW_3GPP2_PROFILE_ID +{ + UCHAR TLVType; // 0x1B + USHORT TLVLength; // 2 + USHORT ProfileId; +} QOS_FLOW_TLV_IP_FLOW_3GPP2_PROFILE_ID, *PQOS_FLOW_TLV_IP_FLOW_3GPP2_PROFILE_ID; + +// QOS Filter + +#define QOS_FILTER_TLV_IP_FILTER_IDX_TYPE 0x10 +#define QOS_FILTER_TLV_IP_VERSION_TYPE 0x11 +#define QOS_FILTER_TLV_IPV4_SRC_ADDR_TYPE 0x12 +#define QOS_FILTER_TLV_IPV4_DEST_ADDR_TYPE 0x13 +#define QOS_FILTER_TLV_NEXT_HDR_PROTOCOL_TYPE 0x14 +#define QOS_FILTER_TLV_IPV4_TYPE_OF_SERVICE_TYPE 0x15 +#define QOS_FILTER_TLV_TCP_UDP_PORT_SRC_TCP_TYPE 0x1B +#define QOS_FILTER_TLV_TCP_UDP_PORT_DEST_TCP_TYPE 0x1C +#define QOS_FILTER_TLV_TCP_UDP_PORT_SRC_UDP_TYPE 0x1D +#define QOS_FILTER_TLV_TCP_UDP_PORT_DEST_UDP_TYPE 0x1E +#define QOS_FILTER_TLV_ICMP_FILTER_MSG_TYPE_TYPE 0x1F +#define QOS_FILTER_TLV_ICMP_FILTER_MSG_CODE_TYPE 0x20 +#define QOS_FILTER_TLV_TCP_UDP_PORT_SRC_TYPE 0x24 +#define QOS_FILTER_TLV_TCP_UDP_PORT_DEST_TYPE 0x25 + +typedef struct _QOS_EVENT_RPT_IND_TLV_FILTER +{ + UCHAR TLVType; // 0x12-TX filter; 0x13-RX filter + USHORT TLVLength; // var + // embedded TLV's +} QOS_EVENT_RPT_IND_TLV_RX_FILTER, *PQOS_EVENT_RPT_IND_TLV_RX_FILTER; + +typedef struct _QOS_FILTER_TLV_IP_FILTER_IDX +{ + UCHAR TLVType; // 0x10 + USHORT TLVLength; // 1 + UCHAR IpFilterIndex; +} QOS_FILTER_TLV_IP_FILTER_IDX, *PQOS_FILTER_TLV_IP_FILTER_IDX; + +typedef struct _QOS_FILTER_TLV_IP_VERSION +{ + UCHAR TLVType; // 0x11 + USHORT TLVLength; // 1 + UCHAR IpVersion; +} QOS_FILTER_TLV_IP_VERSION, *PQOS_FILTER_TLV_IP_VERSION; + +typedef struct _QOS_FILTER_TLV_IPV4_SRC_ADDR +{ + UCHAR TLVType; // 0x12 + USHORT TLVLength; // 8 + ULONG IpSrcAddr; + ULONG IpSrcSubnetMask; +} QOS_FILTER_TLV_IPV4_SRC_ADDR, *PQOS_FILTER_TLV_IPV4_SRC_ADDR; + +typedef struct _QOS_FILTER_TLV_IPV4_DEST_ADDR +{ + UCHAR TLVType; // 0x13 + USHORT TLVLength; // 8 + ULONG IpDestAddr; + ULONG IpDestSubnetMask; +} QOS_FILTER_TLV_IPV4_DEST_ADDR, *PQOS_FILTER_TLV_IPV4_DEST_ADDR; + +typedef struct _QOS_FILTER_TLV_NEXT_HDR_PROTOCOL +{ + UCHAR TLVType; // 0x14 + USHORT TLVLength; // 1 + UCHAR NextHdrProtocol; +} QOS_FILTER_TLV_NEXT_HDR_PROTOCOL, *PQOS_FILTER_TLV_NEXT_HDR_PROTOCOL; + +typedef struct _QOS_FILTER_TLV_IPV4_TYPE_OF_SERVICE +{ + UCHAR TLVType; // 0x15 + USHORT TLVLength; // 2 + UCHAR Ipv4TypeOfService; + UCHAR Ipv4TypeOfServiceMask; +} QOS_FILTER_TLV_IPV4_TYPE_OF_SERVICE, *PQOS_FILTER_TLV_IPV4_TYPE_OF_SERVICE; + +typedef struct _QOS_FILTER_TLV_TCP_UDP_PORT +{ + UCHAR TLVType; // source port: 0x1B-TCP; 0x1D-UDP + // dest port: 0x1C-TCP; 0x1E-UDP + USHORT TLVLength; // 4 + USHORT FilterPort; + USHORT FilterPortRange; +} QOS_FILTER_TLV_TCP_UDP_PORT, *PQOS_FILTER_TLV_TCP_UDP_PORT; + +typedef struct _QOS_FILTER_TLV_ICMP_FILTER_MSG_TYPE +{ + UCHAR TLVType; // 0x1F + USHORT TLVLength; // 1 + UCHAR IcmpFilterMsgType; +} QOS_FILTER_TLV_ICMP_FILTER_MSG_TYPE, *PQOS_FILTER_TLV_ICMP_FILTER_MSG_TYPE; + +typedef struct _QOS_FILTER_TLV_ICMP_FILTER_MSG_CODE +{ + UCHAR TLVType; // 0x20 + USHORT TLVLength; // 1 + UCHAR IcmpFilterMsgCode; +} QOS_FILTER_TLV_ICMP_FILTER_MSG_CODE, *PQOS_FILTER_TLV_ICMP_FILTER_MSG_CODE; + +#define QOS_FILTER_PRECEDENCE_INVALID 256 +#define QOS_FILTER_TLV_PRECEDENCE_TYPE 0x22 +#define QOS_FILTER_TLV_ID_TYPE 0x23 + +typedef struct _QOS_FILTER_TLV_PRECEDENCE +{ + UCHAR TLVType; // 0x22 + USHORT TLVLength; // 2 + USHORT Precedence; // precedence of the filter +} QOS_FILTER_TLV_PRECEDENCE, *PQOS_FILTER_TLV_PRECEDENCE; + +typedef struct _QOS_FILTER_TLV_ID +{ + UCHAR TLVType; // 0x23 + USHORT TLVLength; // 2 + USHORT FilterId; // filter ID +} QOS_FILTER_TLV_ID, *PQOS_FILTER_TLV_ID; + +#ifdef QCQOS_IPV6 + +#define QOS_FILTER_TLV_IPV6_SRC_ADDR_TYPE 0x16 +#define QOS_FILTER_TLV_IPV6_DEST_ADDR_TYPE 0x17 +#define QOS_FILTER_TLV_IPV6_NEXT_HDR_PROTOCOL_TYPE 0x14 // same as IPV4 +#define QOS_FILTER_TLV_IPV6_TRAFFIC_CLASS_TYPE 0x19 +#define QOS_FILTER_TLV_IPV6_FLOW_LABEL_TYPE 0x1A + +typedef struct _QOS_FILTER_TLV_IPV6_SRC_ADDR +{ + UCHAR TLVType; // 0x16 + USHORT TLVLength; // 17 + UCHAR IpSrcAddr[16]; + UCHAR IpSrcAddrPrefixLen; // [0..128] +} QOS_FILTER_TLV_IPV6_SRC_ADDR, *PQOS_FILTER_TLV_IPV6_SRC_ADDR; + +typedef struct _QOS_FILTER_TLV_IPV6_DEST_ADDR +{ + UCHAR TLVType; // 0x17 + USHORT TLVLength; // 17 + UCHAR IpDestAddr[16]; + UCHAR IpDestAddrPrefixLen; // [0..128] +} QOS_FILTER_TLV_IPV6_DEST_ADDR, *PQOS_FILTER_TLV_IPV6_DEST_ADDR; + +#define QOS_FILTER_IPV6_NEXT_HDR_PROTOCOL_TCP 0x06 +#define QOS_FILTER_IPV6_NEXT_HDR_PROTOCOL_UDP 0x11 + +typedef struct _QOS_FILTER_TLV_IPV6_TRAFFIC_CLASS +{ + UCHAR TLVType; // 0x19 + USHORT TLVLength; // 2 + UCHAR TrafficClass; + UCHAR TrafficClassMask; // compare the first 6 bits only +} QOS_FILTER_TLV_IPV6_TRAFFIC_CLASS, *PQOS_FILTER_TLV_IPV6_TRAFFIC_CLASS; + +typedef struct _QOS_FILTER_TLV_IPV6_FLOW_LABEL +{ + UCHAR TLVType; // 0x1A + USHORT TLVLength; // 4 + ULONG FlowLabel; +} QOS_FILTER_TLV_IPV6_FLOW_LABEL, *PQOS_FILTER_TLV_IPV6_FLOW_LABEL; + +#endif // QCQOS_IPV6 +#endif + +// ======================= WMS ============================== +#define QMIWMS_SET_EVENT_REPORT_REQ 0x0001 +#define QMIWMS_SET_EVENT_REPORT_RESP 0x0001 +#define QMIWMS_EVENT_REPORT_IND 0x0001 +#define QMIWMS_RAW_SEND_REQ 0x0020 +#define QMIWMS_RAW_SEND_RESP 0x0020 +#define QMIWMS_RAW_WRITE_REQ 0x0021 +#define QMIWMS_RAW_WRITE_RESP 0x0021 +#define QMIWMS_RAW_READ_REQ 0x0022 +#define QMIWMS_RAW_READ_RESP 0x0022 +#define QMIWMS_MODIFY_TAG_REQ 0x0023 +#define QMIWMS_MODIFY_TAG_RESP 0x0023 +#define QMIWMS_DELETE_REQ 0x0024 +#define QMIWMS_DELETE_RESP 0x0024 +#define QMIWMS_GET_MESSAGE_PROTOCOL_REQ 0x0030 +#define QMIWMS_GET_MESSAGE_PROTOCOL_RESP 0x0030 +#define QMIWMS_LIST_MESSAGES_REQ 0x0031 +#define QMIWMS_LIST_MESSAGES_RESP 0x0031 +#define QMIWMS_GET_SMSC_ADDRESS_REQ 0x0034 +#define QMIWMS_GET_SMSC_ADDRESS_RESP 0x0034 +#define QMIWMS_SET_SMSC_ADDRESS_REQ 0x0035 +#define QMIWMS_SET_SMSC_ADDRESS_RESP 0x0035 +#define QMIWMS_GET_STORE_MAX_SIZE_REQ 0x0036 +#define QMIWMS_GET_STORE_MAX_SIZE_RESP 0x0036 + + +#define WMS_MESSAGE_PROTOCOL_CDMA 0x00 +#define WMS_MESSAGE_PROTOCOL_WCDMA 0x01 + +#if 0 +typedef struct _QMIWMS_GET_MESSAGE_PROTOCOL_REQ_MSG +{ + USHORT Type; // QMUX type 0x0003 + USHORT Length; +} QMIWMS_GET_MESSAGE_PROTOCOL_REQ_MSG, *PQMIWMS_GET_MESSAGE_PROTOCOL_REQ_MSG; + +typedef struct _QMIWMS_GET_MESSAGE_PROTOCOL_RESP_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + USHORT QMUXResult; + USHORT QMUXError; + UCHAR TLV2Type; + USHORT TLV2Length; + UCHAR MessageProtocol; +} QMIWMS_GET_MESSAGE_PROTOCOL_RESP_MSG, *PQMIWMS_GET_MESSAGE_PROTOCOL_RESP_MSG; + +typedef struct _QMIWMS_GET_STORE_MAX_SIZE_REQ_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + UCHAR StorageType; +} QMIWMS_GET_STORE_MAX_SIZE_REQ_MSG, *PQMIWMS_GET_STORE_MAX_SIZE_REQ_MSG; + +typedef struct _QMIWMS_GET_STORE_MAX_SIZE_RESP_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + USHORT QMUXResult; + USHORT QMUXError; + UCHAR TLV2Type; + USHORT TLV2Length; + ULONG MemStoreMaxSize; +} QMIWMS_GET_STORE_MAX_SIZE_RESP_MSG, *PQMIWMS_GET_STORE_MAX_SIZE_RESP_MSG; + +typedef struct _REQUEST_TAG +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR TagType; +} REQUEST_TAG, *PREQUEST_TAG; + +typedef struct _QMIWMS_LIST_MESSAGES_REQ_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + UCHAR StorageType; +} QMIWMS_LIST_MESSAGES_REQ_MSG, *PQMIWMS_LIST_MESSAGES_REQ_MSG; + +typedef struct _QMIWMS_MESSAGE +{ + ULONG MessageIndex; + UCHAR TagType; +} QMIWMS_MESSAGE, *PQMIWMS_MESSAGE; + +typedef struct _QMIWMS_LIST_MESSAGES_RESP_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + USHORT QMUXResult; + USHORT QMUXError; + UCHAR TLV2Type; + USHORT TLV2Length; + ULONG NumMessages; +} QMIWMS_LIST_MESSAGES_RESP_MSG, *PQMIWMS_LIST_MESSAGES_RESP_MSG; + +typedef struct _QMIWMS_RAW_READ_REQ_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + UCHAR StorageType; + ULONG MemoryIndex; +} QMIWMS_RAW_READ_REQ_MSG, *PQMIWMS_RAW_READ_REQ_MSG; + +typedef struct _QMIWMS_RAW_READ_RESP_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + USHORT QMUXResult; + USHORT QMUXError; + UCHAR TLV2Type; + USHORT TLV2Length; + UCHAR TagType; + UCHAR Format; + USHORT MessageLength; + UCHAR Message; +} QMIWMS_RAW_READ_RESP_MSG, *PQMIWMS_RAW_READ_RESP_MSG; + +typedef struct _QMIWMS_MODIFY_TAG_REQ_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + UCHAR StorageType; + ULONG MemoryIndex; + UCHAR TagType; +} QMIWMS_MODIFY_TAG_REQ_MSG, *PQMIWMS_MODIFY_TAG_REQ_MSG; + +typedef struct _QMIWMS_MODIFY_TAG_RESP_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + USHORT QMUXResult; + USHORT QMUXError; +} QMIWMS_MODIFY_TAG_RESP_MSG, *PQMIWMS_MODIFY_TAG_RESP_MSG; + +typedef struct _QMIWMS_RAW_SEND_REQ_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + UCHAR SmsFormat; + USHORT SmsLength; + UCHAR SmsMessage; +} QMIWMS_RAW_SEND_REQ_MSG, *PQMIWMS_RAW_SEND_REQ_MSG; + +typedef struct _RAW_SEND_CAUSE_CODE +{ + UCHAR TLVType; + USHORT TLVLength; + USHORT CauseCode; +} RAW_SEND_CAUSE_CODE, *PRAW_SEND_CAUSE_CODE; + + +typedef struct _QMIWMS_RAW_SEND_RESP_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + USHORT QMUXResult; + USHORT QMUXError; +} QMIWMS_RAW_SEND_RESP_MSG, *PQMIWMS_RAW_SEND_RESP_MSG; + + +typedef struct _WMS_DELETE_MESSAGE_INDEX +{ + UCHAR TLVType; + USHORT TLVLength; + ULONG MemoryIndex; +} WMS_DELETE_MESSAGE_INDEX, *PWMS_DELETE_MESSAGE_INDEX; + +typedef struct _WMS_DELETE_MESSAGE_TAG +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR MessageTag; +} WMS_DELETE_MESSAGE_TAG, *PWMS_DELETE_MESSAGE_TAG; + +typedef struct _QMIWMS_DELETE_REQ_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + UCHAR StorageType; +} QMIWMS_DELETE_REQ_MSG, *PQMIWMS_DELETE_REQ_MSG; + +typedef struct _QMIWMS_DELETE_RESP_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + USHORT QMUXResult; + USHORT QMUXError; +} QMIWMS_DELETE_RESP_MSG, *PQMIWMS_DELETE_RESP_MSG; + + +typedef struct _QMIWMS_GET_SMSC_ADDRESS_REQ_MSG +{ + USHORT Type; + USHORT Length; +} QMIWMS_GET_SMSC_ADDRESS_REQ_MSG, *PQMIWMS_GET_SMSC_ADDRESS_REQ_MSG; + +typedef struct _QMIWMS_SMSC_ADDRESS +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR SMSCAddressType[3]; + UCHAR SMSCAddressLength; + UCHAR SMSCAddressDigits; +} QMIWMS_SMSC_ADDRESS, *PQMIWMS_SMSC_ADDRESS; + + +typedef struct _QMIWMS_GET_SMSC_ADDRESS_RESP_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + USHORT QMUXResult; + USHORT QMUXError; + UCHAR SMSCAddress; +} QMIWMS_GET_SMSC_ADDRESS_RESP_MSG, *PQMIWMS_GET_SMSC_ADDRESS_RESP_MSG; + +typedef struct _QMIWMS_SET_SMSC_ADDRESS_REQ_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + UCHAR SMSCAddress; +} QMIWMS_SET_SMSC_ADDRESS_REQ_MSG, *PQMIWMS_SET_SMSC_ADDRESS_REQ_MSG; + +typedef struct _QMIWMS_SET_SMSC_ADDRESS_RESP_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + USHORT QMUXResult; + USHORT QMUXError; +} QMIWMS_SET_SMSC_ADDRESS_RESP_MSG, *PQMIWMS_SET_SMSC_ADDRESS_RESP_MSG; + +typedef struct _QMIWMS_SET_EVENT_REPORT_REQ_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + UCHAR ReportNewMessage; +} QMIWMS_SET_EVENT_REPORT_REQ_MSG, *PQMIWMS_SET_EVENT_REPORT_REQ_MSG; + +typedef struct _QMIWMS_SET_EVENT_REPORT_RESP_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + USHORT QMUXResult; + USHORT QMUXError; +} QMIWMS_SET_EVENT_REPORT_RESP_MSG, *PQMIWMS_SET_EVENT_REPORT_RESP_MSG; + +typedef struct _QMIWMS_EVENT_REPORT_IND_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + UCHAR StorageType; + ULONG StorageIndex; +} QMIWMS_EVENT_REPORT_IND_MSG, *PQMIWMS_EVENT_REPORT_IND_MSG; +#endif + +// ======================= End of WMS ============================== + + +// ======================= NAS ============================== +#define QMINAS_SET_EVENT_REPORT_REQ 0x0002 +#define QMINAS_SET_EVENT_REPORT_RESP 0x0002 +#define QMINAS_EVENT_REPORT_IND 0x0002 +#define QMINAS_GET_SIGNAL_STRENGTH_REQ 0x0020 +#define QMINAS_GET_SIGNAL_STRENGTH_RESP 0x0020 +#define QMINAS_PERFORM_NETWORK_SCAN_REQ 0x0021 +#define QMINAS_PERFORM_NETWORK_SCAN_RESP 0x0021 +#define QMINAS_INITIATE_NW_REGISTER_REQ 0x0022 +#define QMINAS_INITIATE_NW_REGISTER_RESP 0x0022 +#define QMINAS_INITIATE_ATTACH_REQ 0x0023 +#define QMINAS_INITIATE_ATTACH_RESP 0x0023 +#define QMINAS_GET_SERVING_SYSTEM_REQ 0x0024 +#define QMINAS_GET_SERVING_SYSTEM_RESP 0x0024 +#define QMINAS_SERVING_SYSTEM_IND 0x0024 +#define QMINAS_GET_HOME_NETWORK_REQ 0x0025 +#define QMINAS_GET_HOME_NETWORK_RESP 0x0025 +#define QMINAS_GET_PREFERRED_NETWORK_REQ 0x0026 +#define QMINAS_GET_PREFERRED_NETWORK_RESP 0x0026 +#define QMINAS_SET_PREFERRED_NETWORK_REQ 0x0027 +#define QMINAS_SET_PREFERRED_NETWORK_RESP 0x0027 +#define QMINAS_GET_FORBIDDEN_NETWORK_REQ 0x0028 +#define QMINAS_GET_FORBIDDEN_NETWORK_RESP 0x0028 +#define QMINAS_SET_FORBIDDEN_NETWORK_REQ 0x0029 +#define QMINAS_SET_FORBIDDEN_NETWORK_RESP 0x0029 +#define QMINAS_SET_TECHNOLOGY_PREF_REQ 0x002A +#define QMINAS_SET_TECHNOLOGY_PREF_RESP 0x002A +#define QMINAS_GET_RF_BAND_INFO_REQ 0x0031 +#define QMINAS_GET_RF_BAND_INFO_RESP 0x0031 +#define QMINAS_GET_CELL_LOCATION_INFO_REQ 0x0043 +#define QMINAS_GET_CELL_LOCATION_INFO_RESP 0x0043 +#define QMINAS_GET_PLMN_NAME_REQ 0x0044 +#define QMINAS_GET_PLMN_NAME_RESP 0x0044 +#define QUECTEL_PACKET_TRANSFER_START_IND 0X100 +#define QUECTEL_PACKET_TRANSFER_END_IND 0X101 +#define QMINAS_GET_SYS_INFO_REQ 0x004D +#define QMINAS_GET_SYS_INFO_RESP 0x004D +#define QMINAS_SYS_INFO_IND 0x004E +#define QMINAS_GET_SIG_INFO_REQ 0x004F +#define QMINAS_GET_SIG_INFO_RESP 0x004F + +typedef struct _QMINAS_GET_HOME_NETWORK_REQ_MSG +{ + USHORT Type; // QMUX type 0x0003 + USHORT Length; +} __attribute__ ((packed)) QMINAS_GET_HOME_NETWORK_REQ_MSG, *PQMINAS_GET_HOME_NETWORK_REQ_MSG; + +typedef struct _HOME_NETWORK_SYSTEMID +{ + UCHAR TLVType; + USHORT TLVLength; + USHORT SystemID; + USHORT NetworkID; +} __attribute__ ((packed)) HOME_NETWORK_SYSTEMID, *PHOME_NETWORK_SYSTEMID; + +typedef struct _HOME_NETWORK +{ + UCHAR TLVType; + USHORT TLVLength; + USHORT MobileCountryCode; + USHORT MobileNetworkCode; + UCHAR NetworkDesclen; + UCHAR NetworkDesc; +} __attribute__ ((packed)) HOME_NETWORK, *PHOME_NETWORK; + +#if 0 +typedef struct _HOME_NETWORK_EXT +{ + UCHAR TLVType; + USHORT TLVLength; + USHORT MobileCountryCode; + USHORT MobileNetworkCode; + UCHAR NetworkDescDisp; + UCHAR NetworkDescEncoding; + UCHAR NetworkDesclen; + UCHAR NetworkDesc; +} HOME_NETWORK_EXT, *PHOME_NETWORK_EXT; + +typedef struct _QMINAS_GET_HOME_NETWORK_RESP_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + USHORT QMUXResult; + USHORT QMUXError; +} QMINAS_GET_HOME_NETWORK_RESP_MSG, *PQMINAS_GET_HOME_NETWORK_RESP_MSG; + +typedef struct _QMINAS_GET_PREFERRED_NETWORK_REQ_MSG +{ + USHORT Type; // QMUX type 0x0003 + USHORT Length; +} QMINAS_GET_PREFERRED_NETWORK_REQ_MSG, *PQMINAS_GET_PREFERRED_NETWORK_REQ_MSG; + + +typedef struct _PREFERRED_NETWORK +{ + USHORT MobileCountryCode; + USHORT MobileNetworkCode; + USHORT RadioAccess; +} PREFERRED_NETWORK, *PPREFERRED_NETWORK; + +typedef struct _QMINAS_GET_PREFERRED_NETWORK_RESP_MSG +{ + USHORT Type; // QMUX type 0x0003 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_FAULT + UCHAR TLV2Type; // 0x01 - required parameter + USHORT TLV2Length; // length of the mfr string + USHORT NumPreferredNetwork; +} QMINAS_GET_PREFERRED_NETWORK_RESP_MSG, *PQMINAS_GET_PREFERRED_NETWORK_RESP_MSG; + +typedef struct _QMINAS_GET_FORBIDDEN_NETWORK_REQ_MSG +{ + USHORT Type; // QMUX type 0x0003 + USHORT Length; +} QMINAS_GET_FORBIDDEN_NETWORK_REQ_MSG, *PQMINAS_GET_FORBIDDEN_NETWORK_REQ_MSG; + +typedef struct _FORBIDDEN_NETWORK +{ + USHORT MobileCountryCode; + USHORT MobileNetworkCode; +} FORBIDDEN_NETWORK, *PFORBIDDEN_NETWORK; + +typedef struct _QMINAS_GET_FORBIDDEN_NETWORK_RESP_MSG +{ + USHORT Type; // QMUX type 0x0003 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_FAULT + UCHAR TLV2Type; // 0x01 - required parameter + USHORT TLV2Length; // length of the mfr string + USHORT NumForbiddenNetwork; +} QMINAS_GET_FORBIDDEN_NETWORK_RESP_MSG, *PQMINAS_GET_FORBIDDEN_NETWORK_RESP_MSG; + +typedef struct _QMINAS_GET_SERVING_SYSTEM_REQ_MSG +{ + USHORT Type; // QMUX type 0x0003 + USHORT Length; +} QMINAS_GET_SERVING_SYSTEM_REQ_MSG, *PQMINAS_GET_SERVING_SYSTEM_REQ_MSG; + +typedef struct _QMINAS_ROAMING_INDICATOR_MSG +{ + UCHAR TLVType; // 0x01 - required parameter + USHORT TLVLength; // length of the mfr string + UCHAR RoamingIndicator; +} QMINAS_ROAMING_INDICATOR_MSG, *PQMINAS_ROAMING_INDICATOR_MSG; +#endif + +typedef struct _QMINAS_DATA_CAP +{ + UCHAR TLVType; // 0x01 - required parameter + USHORT TLVLength; // length of the mfr string + UCHAR DataCapListLen; + UCHAR DataCap; +} __attribute__ ((packed)) QMINAS_DATA_CAP, *PQMINAS_DATA_CAP; + +typedef struct _QMINAS_CURRENT_PLMN_MSG +{ + UCHAR TLVType; // 0x01 - required parameter + USHORT TLVLength; // length of the mfr string + USHORT MobileCountryCode; + USHORT MobileNetworkCode; + UCHAR NetworkDesclen; + UCHAR NetworkDesc; +} __attribute__ ((packed)) QMINAS_CURRENT_PLMN_MSG, *PQMINAS_CURRENT_PLMN_MSG; + +typedef struct _QMINAS_GET_SERVING_SYSTEM_RESP_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + USHORT QMUXResult; + USHORT QMUXError; +} __attribute__ ((packed)) QMINAS_GET_SERVING_SYSTEM_RESP_MSG, *PQMINAS_GET_SERVING_SYSTEM_RESP_MSG; + +typedef struct _SERVING_SYSTEM +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR RegistrationState; + UCHAR CSAttachedState; + UCHAR PSAttachedState; + UCHAR RegistredNetwork; + UCHAR InUseRadioIF; + UCHAR RadioIF; +} __attribute__ ((packed)) SERVING_SYSTEM, *PSERVING_SYSTEM; + +typedef struct _QMINAS_GET_SYS_INFO_RESP_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + USHORT QMUXResult; + USHORT QMUXError; +} __attribute__ ((packed)) QMINAS_GET_SYS_INFO_RESP_MSG, *PQMINAS_GET_SYS_INFO_RESP_MSG; + +typedef struct _QMINAS_SYS_INFO_IND_MSG +{ + USHORT Type; + USHORT Length; +} __attribute__ ((packed)) QMINAS_SYS_INFO_IND_MSG, *PQMINAS_SYS_INFO_IND_MSG; + +typedef struct _SERVICE_STATUS_INFO +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR SrvStatus; + UCHAR true_srv_status; + UCHAR IsPrefDataPath; +} __attribute__ ((packed)) SERVICE_STATUS_INFO, *PSERVICE_STATUS_INFO; + +typedef struct _CDMA_SYSTEM_INFO +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR SrvDomainValid; + UCHAR SrvDomain; + UCHAR SrvCapabilityValid; + UCHAR SrvCapability; + UCHAR RoamStatusValid; + UCHAR RoamStatus; + UCHAR IsSysForbiddenValid; + UCHAR IsSysForbidden; + UCHAR IsSysPrlMatchValid; + UCHAR IsSysPrlMatch; + UCHAR PRevInUseValid; + UCHAR PRevInUse; + UCHAR BSPRevValid; + UCHAR BSPRev; + UCHAR CCSSupportedValid; + UCHAR CCSSupported; + UCHAR CDMASysIdValid; + USHORT SID; + USHORT NID; + UCHAR BSInfoValid; + USHORT BaseID; + ULONG BaseLAT; + ULONG BaseLONG; + UCHAR PacketZoneValid; + USHORT PacketZone; + UCHAR NetworkIdValid; + UCHAR MCC[3]; + UCHAR MNC[3]; +} __attribute__ ((packed)) CDMA_SYSTEM_INFO, *PCDMA_SYSTEM_INFO; + +typedef struct _HDR_SYSTEM_INFO +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR SrvDomainValid; + UCHAR SrvDomain; + UCHAR SrvCapabilityValid; + UCHAR SrvCapability; + UCHAR RoamStatusValid; + UCHAR RoamStatus; + UCHAR IsSysForbiddenValid; + UCHAR IsSysForbidden; + UCHAR IsSysPrlMatchValid; + UCHAR IsSysPrlMatch; + UCHAR HdrPersonalityValid; + UCHAR HdrPersonality; + UCHAR HdrActiveProtValid; + UCHAR HdrActiveProt; + UCHAR is856SysIdValid; + UCHAR is856SysId[16]; +} __attribute__ ((packed)) HDR_SYSTEM_INFO, *PHDR_SYSTEM_INFO; + +typedef struct _GSM_SYSTEM_INFO +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR SrvDomainValid; + UCHAR SrvDomain; + UCHAR SrvCapabilityValid; + UCHAR SrvCapability; + UCHAR RoamStatusValid; + UCHAR RoamStatus; + UCHAR IsSysForbiddenValid; + UCHAR IsSysForbidden; + UCHAR LacValid; + USHORT Lac; + UCHAR CellIdValid; + ULONG CellId; + UCHAR RegRejectInfoValid; + UCHAR RejectSrvDomain; + UCHAR RejCause; + UCHAR NetworkIdValid; + UCHAR MCC[3]; + UCHAR MNC[3]; + UCHAR EgprsSuppValid; + UCHAR EgprsSupp; + UCHAR DtmSuppValid; + UCHAR DtmSupp; +} __attribute__ ((packed)) GSM_SYSTEM_INFO, *PGSM_SYSTEM_INFO; + +typedef struct _WCDMA_SYSTEM_INFO +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR SrvDomainValid; + UCHAR SrvDomain; + UCHAR SrvCapabilityValid; + UCHAR SrvCapability; + UCHAR RoamStatusValid; + UCHAR RoamStatus; + UCHAR IsSysForbiddenValid; + UCHAR IsSysForbidden; + UCHAR LacValid; + USHORT Lac; + UCHAR CellIdValid; + ULONG CellId; + UCHAR RegRejectInfoValid; + UCHAR RejectSrvDomain; + UCHAR RejCause; + UCHAR NetworkIdValid; + UCHAR MCC[3]; + UCHAR MNC[3]; + UCHAR HsCallStatusValid; + UCHAR HsCallStatus; + UCHAR HsIndValid; + UCHAR HsInd; + UCHAR PscValid; + UCHAR Psc; +} __attribute__ ((packed)) WCDMA_SYSTEM_INFO, *PWCDMA_SYSTEM_INFO; + +typedef struct _LTE_SYSTEM_INFO +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR SrvDomainValid; + UCHAR SrvDomain; + UCHAR SrvCapabilityValid; + UCHAR SrvCapability; + UCHAR RoamStatusValid; + UCHAR RoamStatus; + UCHAR IsSysForbiddenValid; + UCHAR IsSysForbidden; + UCHAR LacValid; + USHORT Lac; + UCHAR CellIdValid; + ULONG CellId; + UCHAR RegRejectInfoValid; + UCHAR RejectSrvDomain; + UCHAR RejCause; + UCHAR NetworkIdValid; + UCHAR MCC[3]; + UCHAR MNC[3]; + UCHAR TacValid; + USHORT Tac; +} __attribute__ ((packed)) LTE_SYSTEM_INFO, *PLTE_SYSTEM_INFO; + +typedef struct _TDSCDMA_SYSTEM_INFO +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR SrvDomainValid; + UCHAR SrvDomain; + UCHAR SrvCapabilityValid; + UCHAR SrvCapability; + UCHAR RoamStatusValid; + UCHAR RoamStatus; + UCHAR IsSysForbiddenValid; + UCHAR IsSysForbidden; + UCHAR LacValid; + USHORT Lac; + UCHAR CellIdValid; + ULONG CellId; + UCHAR RegRejectInfoValid; + UCHAR RejectSrvDomain; + UCHAR RejCause; + UCHAR NetworkIdValid; + UCHAR MCC[3]; + UCHAR MNC[3]; + UCHAR HsCallStatusValid; + UCHAR HsCallStatus; + UCHAR HsIndValid; + UCHAR HsInd; + UCHAR CellParameterIdValid; + USHORT CellParameterId; + UCHAR CellBroadcastCapValid; + ULONG CellBroadcastCap; + UCHAR CsBarStatusValid; + ULONG CsBarStatus; + UCHAR PsBarStatusValid; + ULONG PsBarStatus; + UCHAR CipherDomainValid; + UCHAR CipherDomain; +} __attribute__ ((packed)) TDSCDMA_SYSTEM_INFO, *PTDSCDMA_SYSTEM_INFO; + +typedef enum { + NAS_SYS_SRV_STATUS_NO_SRV_V01 = 0, + NAS_SYS_SRV_STATUS_LIMITED_V01 = 1, + NAS_SYS_SRV_STATUS_SRV_V01 = 2, + NAS_SYS_SRV_STATUS_LIMITED_REGIONAL_V01 = 3, + NAS_SYS_SRV_STATUS_PWR_SAVE_V01 = 4, +}nas_service_status_enum_type_v01; + +typedef enum { + SYS_SRV_DOMAIN_NO_SRV_V01 = 0, + SYS_SRV_DOMAIN_CS_ONLY_V01 = 1, + SYS_SRV_DOMAIN_PS_ONLY_V01 = 2, + SYS_SRV_DOMAIN_CS_PS_V01 = 3, + SYS_SRV_DOMAIN_CAMPED_V01 = 4, +}nas_service_domain_enum_type_v01; + +typedef enum { + QMI_NAS_RADIO_INTERFACE_UNKNOWN = -1, + QMI_NAS_RADIO_INTERFACE_NONE = 0x00, + QMI_NAS_RADIO_INTERFACE_CDMA_1X = 0x01, + QMI_NAS_RADIO_INTERFACE_CDMA_1XEVDO = 0x02, + QMI_NAS_RADIO_INTERFACE_AMPS = 0x03, + QMI_NAS_RADIO_INTERFACE_GSM = 0x04, + QMI_NAS_RADIO_INTERFACE_UMTS = 0x05, + QMI_NAS_RADIO_INTERFACE_LTE = 0x08, + QMI_NAS_RADIO_INTERFACE_TD_SCDMA = 0x09, + QMI_NAS_RADIO_INTERFACE_5GNR = 0x0C, +} QMI_NAS_RADIO_INTERFACE_E; + +typedef enum { + QMI_NAS_ACTIVE_BAND_BC_0 = 0, + QMI_NAS_ACTIVE_BAND_BC_1 = 1, + QMI_NAS_ACTIVE_BAND_BC_2 = 2, + QMI_NAS_ACTIVE_BAND_BC_3 = 3, + QMI_NAS_ACTIVE_BAND_BC_4 = 4, + QMI_NAS_ACTIVE_BAND_BC_5 = 5, + QMI_NAS_ACTIVE_BAND_BC_6 = 6, + QMI_NAS_ACTIVE_BAND_BC_7 = 7, + QMI_NAS_ACTIVE_BAND_BC_8 = 8, + QMI_NAS_ACTIVE_BAND_BC_9 = 9, + QMI_NAS_ACTIVE_BAND_BC_10 = 10, + QMI_NAS_ACTIVE_BAND_BC_11 = 11, + QMI_NAS_ACTIVE_BAND_BC_12 = 12, + QMI_NAS_ACTIVE_BAND_BC_13 = 13, + QMI_NAS_ACTIVE_BAND_BC_14 = 14, + QMI_NAS_ACTIVE_BAND_BC_15 = 15, + QMI_NAS_ACTIVE_BAND_BC_16 = 16, + QMI_NAS_ACTIVE_BAND_BC_17 = 17, + QMI_NAS_ACTIVE_BAND_BC_18 = 18, + QMI_NAS_ACTIVE_BAND_BC_19 = 19, + QMI_NAS_ACTIVE_BAND_GSM_450 = 40, + QMI_NAS_ACTIVE_BAND_GSM_480 = 41, + QMI_NAS_ACTIVE_BAND_GSM_750 = 42, + QMI_NAS_ACTIVE_BAND_GSM_850 = 43, + QMI_NAS_ACTIVE_BAND_GSM_900_EXTENDED = 44, + QMI_NAS_ACTIVE_BAND_GSM_900_PRIMARY = 45, + QMI_NAS_ACTIVE_BAND_GSM_900_RAILWAYS = 46, + QMI_NAS_ACTIVE_BAND_GSM_DCS_1800 = 47, + QMI_NAS_ACTIVE_BAND_GSM_PCS_1900 = 48, + QMI_NAS_ACTIVE_BAND_WCDMA_2100 = 80, + QMI_NAS_ACTIVE_BAND_WCDMA_PCS_1900 = 81, + QMI_NAS_ACTIVE_BAND_WCDMA_DCS_1800 = 82, + QMI_NAS_ACTIVE_BAND_WCDMA_1700_US = 83, + QMI_NAS_ACTIVE_BAND_WCDMA_850 = 84, + QMI_NAS_ACTIVE_BAND_WCDMA_800 = 85, + QMI_NAS_ACTIVE_BAND_WCDMA_2600 = 86, + QMI_NAS_ACTIVE_BAND_WCDMA_900 = 87, + QMI_NAS_ACTIVE_BAND_WCDMA_1700_JAPAN = 88, + QMI_NAS_ACTIVE_BAND_WCDMA_1500_JAPAN = 90, + QMI_NAS_ACTIVE_BAND_WCDMA_850_JAPAN = 91, + QMI_NAS_ACTIVE_BAND_EUTRAN_1 = 120, + QMI_NAS_ACTIVE_BAND_EUTRAN_2 = 121, + QMI_NAS_ACTIVE_BAND_EUTRAN_3 = 122, + QMI_NAS_ACTIVE_BAND_EUTRAN_4 = 123, + QMI_NAS_ACTIVE_BAND_EUTRAN_5 = 124, + QMI_NAS_ACTIVE_BAND_EUTRAN_6 = 125, + QMI_NAS_ACTIVE_BAND_EUTRAN_7 = 126, + QMI_NAS_ACTIVE_BAND_EUTRAN_8 = 127, + QMI_NAS_ACTIVE_BAND_EUTRAN_9 = 128, + QMI_NAS_ACTIVE_BAND_EUTRAN_10 = 129, + QMI_NAS_ACTIVE_BAND_EUTRAN_11 = 130, + QMI_NAS_ACTIVE_BAND_EUTRAN_12 = 131, + QMI_NAS_ACTIVE_BAND_EUTRAN_13 = 132, + QMI_NAS_ACTIVE_BAND_EUTRAN_14 = 133, + QMI_NAS_ACTIVE_BAND_EUTRAN_17 = 134, + QMI_NAS_ACTIVE_BAND_EUTRAN_18 = 143, + QMI_NAS_ACTIVE_BAND_EUTRAN_19 = 144, + QMI_NAS_ACTIVE_BAND_EUTRAN_20 = 145, + QMI_NAS_ACTIVE_BAND_EUTRAN_21 = 146, + QMI_NAS_ACTIVE_BAND_EUTRAN_23 = 152, + QMI_NAS_ACTIVE_BAND_EUTRAN_24 = 147, + QMI_NAS_ACTIVE_BAND_EUTRAN_25 = 148, + QMI_NAS_ACTIVE_BAND_EUTRAN_26 = 153, + QMI_NAS_ACTIVE_BAND_EUTRAN_27 = 164, + QMI_NAS_ACTIVE_BAND_EUTRAN_28 = 158, + QMI_NAS_ACTIVE_BAND_EUTRAN_29 = 159, + QMI_NAS_ACTIVE_BAND_EUTRAN_30 = 160, + QMI_NAS_ACTIVE_BAND_EUTRAN_31 = 165, + QMI_NAS_ACTIVE_BAND_EUTRAN_32 = 154, + QMI_NAS_ACTIVE_BAND_EUTRAN_33 = 135, + QMI_NAS_ACTIVE_BAND_EUTRAN_34 = 136, + QMI_NAS_ACTIVE_BAND_EUTRAN_35 = 137, + QMI_NAS_ACTIVE_BAND_EUTRAN_36 = 138, + QMI_NAS_ACTIVE_BAND_EUTRAN_37 = 139, + QMI_NAS_ACTIVE_BAND_EUTRAN_38 = 140, + QMI_NAS_ACTIVE_BAND_EUTRAN_39 = 141, + QMI_NAS_ACTIVE_BAND_EUTRAN_40 = 142, + QMI_NAS_ACTIVE_BAND_EUTRAN_41 = 149, + QMI_NAS_ACTIVE_BAND_EUTRAN_42 = 150, + QMI_NAS_ACTIVE_BAND_EUTRAN_43 = 151, + QMI_NAS_ACTIVE_BAND_EUTRAN_46 = 163, + QMI_NAS_ACTIVE_BAND_EUTRAN_47 = 166, + QMI_NAS_ACTIVE_BAND_EUTRAN_48 = 167, + QMI_NAS_ACTIVE_BAND_EUTRAN_66 = 161, + QMI_NAS_ACTIVE_BAND_EUTRAN_71 = 168, + QMI_NAS_ACTIVE_BAND_EUTRAN_125 = 155, + QMI_NAS_ACTIVE_BAND_EUTRAN_126 = 156, + QMI_NAS_ACTIVE_BAND_EUTRAN_127 = 157, + QMI_NAS_ACTIVE_BAND_EUTRAN_250 = 162, + QMI_NAS_ACTIVE_BAND_TDSCDMA_A = 200, + QMI_NAS_ACTIVE_BAND_TDSCDMA_B = 201, + QMI_NAS_ACTIVE_BAND_TDSCDMA_C = 202, + QMI_NAS_ACTIVE_BAND_TDSCDMA_D = 203, + QMI_NAS_ACTIVE_BAND_TDSCDMA_E = 204, + QMI_NAS_ACTIVE_BAND_TDSCDMA_F = 205, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_1 = 250, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_2 = 251, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_3 = 252, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_5 = 253, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_7 = 254, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_8 = 255, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_20 = 256, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_28 = 257, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_38 = 258, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_41 = 259, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_50 = 260, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_51 = 261, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_66 = 262, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_70 = 263, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_71 = 264, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_74 = 265, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_75 = 266, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_76 = 267, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_77 = 268, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_78 = 269, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_79 = 270, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_80 = 271, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_81 = 272, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_82 = 273, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_83 = 274, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_84 = 275, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_85 = 276, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_257= 277, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_258= 278, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_259= 279, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_260= 280, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_261= 281, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_12 = 282, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_25 = 283, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_34 = 284, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_39 = 285, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_40 = 286, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_65 = 287, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_86 = 288, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_48 = 289, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_14 = 290 +} QMI_NAS_ACTIVE_BAND_E; + +typedef struct { + UCHAR TLVType; + USHORT TLVLength; + + uint8_t srv_domain_valid; + uint8_t srv_domain; + uint8_t srv_capability_valid; + uint8_t srv_capability; + uint8_t roam_status_valid; + uint8_t roam_status; + uint8_t is_sys_forbidden_valid; + uint8_t is_sys_forbidden; + + uint8_t lac_valid; + uint16_t lac; + uint8_t cell_id_valid; + uint32_t cell_id; + uint8_t reg_reject_info_valid; + uint8_t reject_srv_domain; + uint8_t rej_cause; + uint8_t network_id_valid; + UCHAR MCC[3]; + UCHAR MNC[3]; + + uint8_t tac_valid; + uint16_t tac; +} __attribute__ ((packed)) NR5G_SYSTEM_INFO, *PNR5G_SYSTEM_INFO; + +#if 0 +typedef struct _QMINAS_SERVING_SYSTEM_IND_MSG +{ + USHORT Type; + USHORT Length; +} QMINAS_SERVING_SYSTEM_IND_MSG, *PQMINAS_SERVING_SYSTEM_IND_MSG; + +typedef struct _QMINAS_SET_PREFERRED_NETWORK_REQ_MSG +{ + USHORT Type; // QMUX type 0x0003 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT NumPreferredNetwork; + USHORT MobileCountryCode; + USHORT MobileNetworkCode; + USHORT RadioAccess; +} QMINAS_SET_PREFERRED_NETWORK_REQ_MSG, *PQMINAS_SET_PREFERRED_NETWORK_REQ_MSG; + +typedef struct _QMINAS_SET_PREFERRED_NETWORK_RESP_MSG +{ + USHORT Type; // QMUX type 0x0003 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_FAULT +} QMINAS_SET_PREFERRED_NETWORK_RESP_MSG, *PQMINAS_SET_PREFERRED_NETWORK_RESP_MSG; + +typedef struct _QMINAS_SET_FORBIDDEN_NETWORK_REQ_MSG +{ + USHORT Type; // QMUX type 0x0003 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT NumForbiddenNetwork; + USHORT MobileCountryCode; + USHORT MobileNetworkCode; +} QMINAS_SET_FORBIDDEN_NETWORK_REQ_MSG, *PQMINAS_SET_FORBIDDEN_NETWORK_REQ_MSG; + +typedef struct _QMINAS_SET_FORBIDDEN_NETWORK_RESP_MSG +{ + USHORT Type; // QMUX type 0x0003 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_FAULT +} QMINAS_SET_FORBIDDEN_NETWORK_RESP_MSG, *PQMINAS_SET_FORBIDDEN_NETWORK_RESP_MSG; + +typedef struct _QMINAS_PERFORM_NETWORK_SCAN_REQ_MSG +{ + USHORT Type; // QMUX type 0x0003 + USHORT Length; +} QMINAS_PERFORM_NETWORK_SCAN_REQ_MSG, *PQMINAS_PERFORM_NETWORK_SCAN_REQ_MSG; + +typedef struct _VISIBLE_NETWORK +{ + USHORT MobileCountryCode; + USHORT MobileNetworkCode; + UCHAR NetworkStatus; + UCHAR NetworkDesclen; +} VISIBLE_NETWORK, *PVISIBLE_NETWORK; + +typedef struct _QMINAS_PERFORM_NETWORK_SCAN_RESP_MSG +{ + USHORT Type; // QMUX type 0x0003 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_FAULT +} QMINAS_PERFORM_NETWORK_SCAN_RESP_MSG, *PQMINAS_PERFORM_NETWORK_SCAN_RESP_MSG; + +typedef struct _QMINAS_PERFORM_NETWORK_SCAN_NETWORK_INFO +{ + UCHAR TLVType; // 0x010 - required parameter + USHORT TLVLength; // length + USHORT NumNetworkInstances; +} QMINAS_PERFORM_NETWORK_SCAN_NETWORK_INFO, *PQMINAS_PERFORM_NETWORK_SCAN_NETWORK_INFO; + +typedef struct _QMINAS_PERFORM_NETWORK_SCAN_RAT_INFO +{ + UCHAR TLVType; // 0x011 - required parameter + USHORT TLVLength; // length + USHORT NumInst; +} QMINAS_PERFORM_NETWORK_SCAN_RAT_INFO, *PQMINAS_PERFORM_NETWORK_SCAN_RAT_INFO; + +typedef struct _QMINAS_PERFORM_NETWORK_SCAN_RAT +{ + USHORT MCC; + USHORT MNC; + UCHAR RAT; +} QMINAS_PERFORM_NETWORK_SCAN_RAT, *PQMINAS_PERFORM_NETWORK_SCAN_RAT; + + +typedef struct _QMINAS_MANUAL_NW_REGISTER +{ + UCHAR TLV2Type; // 0x02 - result code + USHORT TLV2Length; // 4 + USHORT MobileCountryCode; + USHORT MobileNetworkCode; + UCHAR RadioAccess; +} QMINAS_MANUAL_NW_REGISTER, *PQMINAS_MANUAL_NW_REGISTER; + +typedef struct _QMINAS_INITIATE_NW_REGISTER_REQ_MSG +{ + USHORT Type; // QMUX type 0x0003 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + UCHAR RegisterAction; +} QMINAS_INITIATE_NW_REGISTER_REQ_MSG, *PQMINAS_INITIATE_NW_REGISTER_REQ_MSG; + +typedef struct _QMINAS_INITIATE_NW_REGISTER_RESP_MSG +{ + USHORT Type; // QMUX type 0x0003 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_FAULT +} QMINAS_INITIATE_NW_REGISTER_RESP_MSG, *PQMINAS_INITIATE_NW_REGISTER_RESP_MSG; + +typedef struct _QMINAS_SET_TECHNOLOGY_PREF_REQ_MSG +{ + USHORT Type; // QMUX type 0x0003 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT TechPref; + UCHAR Duration; +} QMINAS_SET_TECHNOLOGY_PREF_REQ_MSG, *PQMINAS_SET_TECHNOLOGY_PREF_REQ_MSG; + +typedef struct _QMINAS_SET_TECHNOLOGY_PREF_RESP_MSG +{ + USHORT Type; // QMUX type 0x0003 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_FAULT +} QMINAS_SET_TECHNOLOGY_PREF_RESP_MSG, *PQMINAS_SET_TECHNOLOGY_PREF_RESP_MSG; + +typedef struct _QMINAS_GET_SIGNAL_STRENGTH_REQ_MSG +{ + USHORT Type; // QMUX type 0x0003 + USHORT Length; +} QMINAS_GET_SIGNAL_STRENGTH_REQ_MSG, *PQMINAS_GET_SIGNAL_STRENGTH_REQ_MSG; + +typedef struct _QMINAS_SIGNAL_STRENGTH +{ + CHAR SigStrength; + UCHAR RadioIf; +} QMINAS_SIGNAL_STRENGTH, *PQMINAS_SIGNAL_STRENGTH; + +typedef struct _QMINAS_SIGNAL_STRENGTH_LIST +{ + UCHAR TLV3Type; + USHORT TLV3Length; + USHORT NumInstance; +} QMINAS_SIGNAL_STRENGTH_LIST, *PQMINAS_SIGNAL_STRENGTH_LIST; + + +typedef struct _QMINAS_GET_SIGNAL_STRENGTH_RESP_MSG +{ + USHORT Type; // QMUX type 0x0003 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_FAULT + UCHAR TLV2Type; + USHORT TLV2Length; + CHAR SignalStrength; + UCHAR RadioIf; +} QMINAS_GET_SIGNAL_STRENGTH_RESP_MSG, *PQMINAS_GET_SIGNAL_STRENGTH_RESP_MSG; + + +typedef struct _QMINAS_SET_EVENT_REPORT_REQ_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + UCHAR ReportSigStrength; + UCHAR NumTresholds; + CHAR TresholdList[2]; +} QMINAS_SET_EVENT_REPORT_REQ_MSG, *PQMINAS_SET_EVENT_REPORT_REQ_MSG; + +typedef struct _QMINAS_SET_EVENT_REPORT_RESP_MSG +{ + USHORT Type; // QMUX type 0x0003 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_FAULT +} QMINAS_SET_EVENT_REPORT_RESP_MSG, *PQMINAS_SET_EVENT_REPORT_RESP_MSG; + +typedef struct _QMINAS_SIGNAL_STRENGTH_TLV +{ + UCHAR TLVType; + USHORT TLVLength; + CHAR SigStrength; + UCHAR RadioIf; +} QMINAS_SIGNAL_STRENGTH_TLV, *PQMINAS_SIGNAL_STRENGTH_TLV; + +typedef struct _QMINAS_REJECT_CAUSE_TLV +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR ServiceDomain; + USHORT RejectCause; +} QMINAS_REJECT_CAUSE_TLV, *PQMINAS_REJECT_CAUSE_TLV; + +typedef struct _QMINAS_EVENT_REPORT_IND_MSG +{ + USHORT Type; + USHORT Length; +} QMINAS_EVENT_REPORT_IND_MSG, *PQMINAS_EVENT_REPORT_IND_MSG; + +typedef struct _QMINAS_GET_RF_BAND_INFO_REQ_MSG +{ + USHORT Type; + USHORT Length; +} QMINAS_GET_RF_BAND_INFO_REQ_MSG, *PQMINAS_GET_RF_BAND_INFO_REQ_MSG; + +typedef struct _QMINASRF_BAND_INFO +{ + UCHAR RadioIf; + USHORT ActiveBand; + USHORT ActiveChannel; +} QMINASRF_BAND_INFO, *PQMINASRF_BAND_INFO; + +typedef struct _QMINAS_GET_RF_BAND_INFO_RESP_MSG +{ + USHORT Type; // QMUX type 0x0003 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_FAULT + UCHAR TLV2Type; + USHORT TLV2Length; + UCHAR NumInstances; +} QMINAS_GET_RF_BAND_INFO_RESP_MSG, *PQMINAS_GET_RF_BAND_INFO_RESP_MSG; + + +typedef struct _QMINAS_GET_PLMN_NAME_REQ_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + USHORT MCC; + USHORT MNC; +} QMINAS_GET_PLMN_NAME_REQ_MSG, *PQMINAS_GET_PLMN_NAME_REQ_MSG; + +typedef struct _QMINAS_GET_PLMN_NAME_RESP_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_FAULT +} QMINAS_GET_PLMN_NAME_RESP_MSG, *PQMINAS_GET_PLMN_NAME_RESP_MSG; + +typedef struct _QMINAS_GET_PLMN_NAME_SPN +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR SPN_Enc; + UCHAR SPN_Len; +} QMINAS_GET_PLMN_NAME_SPN, *PQMINAS_GET_PLMN_NAME_SPN; + +typedef struct _QMINAS_GET_PLMN_NAME_PLMN +{ + UCHAR PLMN_Enc; + UCHAR PLMN_Ci; + UCHAR PLMN_SpareBits; + UCHAR PLMN_Len; +} QMINAS_GET_PLMN_NAME_PLMN, *PQMINAS_GET_PLMN_NAME_PLMN; + +typedef struct _QMINAS_INITIATE_ATTACH_REQ_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + UCHAR PsAttachAction; +} QMINAS_INITIATE_ATTACH_REQ_MSG, *PQMINAS_INITIATE_ATTACH_REQ_MSG; + +typedef struct _QMINAS_INITIATE_ATTACH_RESP_MSG +{ + USHORT Type; // QMUX type 0x0003 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_FAULT +} QMINAS_INITIATE_ATTACH_RESP_MSG, *PQMINAS_INITIATE_ATTACH_RESP_MSG; +#endif +typedef struct { + UCHAR TLVType; + USHORT TLVLength; + CHAR rssi; + SHORT ecio; +} __attribute__ ((packed)) QMINAS_SIG_INFO_CDMA_TLV_MSG, *PQMINAS_SIG_INFO_CDMA_TLV_MSG; + +typedef struct { + UCHAR TLVType; + USHORT TLVLength; + CHAR rssi; + SHORT ecio; + CHAR sinr; + INT io; +} __attribute__ ((packed)) QMINAS_SIG_INFO_HDR_TLV_MSG, *PQMINAS_SIG_INFO_HDR_TLV_MSG; + +typedef struct { + UCHAR TLVType; + USHORT TLVLength; + CHAR rssi; +} __attribute__ ((packed)) QMINAS_SIG_INFO_GSM_TLV_MSG, *PQMINAS_SIG_INFO_GSM_TLV_MSG; + +typedef struct { + UCHAR TLVType; + USHORT TLVLength; + CHAR rssi; + SHORT ecio; +} __attribute__ ((packed)) QMINAS_SIG_INFO_WCDMA_TLV_MSG, *PQMINAS_SIG_INFO_WCDMA_TLV_MSG; + +typedef struct { + UCHAR TLVType; + USHORT TLVLength; + CHAR rssi; + CHAR rsrq; + SHORT rsrp; + SHORT snr; +} __attribute__ ((packed)) QMINAS_SIG_INFO_LTE_TLV_MSG, *PQMINAS_SIG_INFO_LTE_TLV_MSG; + +typedef struct { + UCHAR TLVType; + USHORT TLVLength; + CHAR rscp; +} __attribute__ ((packed)) QMINAS_SIG_INFO_TDSCDMA_TLV_MSG, *PQMINAS_SIG_INFO_TDSCDMA_TLV_MSG; + +typedef struct { + UCHAR TLVType; + USHORT TLVLength; + SHORT rsrp; + SHORT snr; +} __attribute__ ((packed)) QMINAS_SIG_INFO_5G_NSA_TLV_MSG, *PQMINAS_SIG_INFO_5G_NSA_TLV_MSG; + +typedef struct { + UCHAR TLVType; + USHORT TLVLength; + SHORT nr5g_rsrq; +} __attribute__ ((packed)) QMINAS_SIG_INFO_5G_SA_TLV_MSG, *PQMINAS_SIG_INFO_5G_SA_TLV_MSG; + +typedef struct { + uint8 radio_if; + uint16 active_band; + uint16 active_channel; +} __attribute__ ((packed)) NasGetRfBandInfo; + +typedef struct { + UCHAR TLVType; + USHORT TLVLength; + + uint8 num_instances; + NasGetRfBandInfo bands_array[0]; +} __attribute__ ((packed)) NasGetRfBandInfoList; + +typedef struct { + uint8 radio_if; + uint16 dedicated_band; +} __attribute__ ((packed)) NasGetRfBandInfoDedicated; + +typedef struct { + UCHAR TLVType; + USHORT TLVLength; + + uint8 num_instances; + NasGetRfBandInfoDedicated bands_array[0]; +} __attribute__ ((packed)) NasGetRfBandInfoDedicatedList; + +typedef struct { + uint8 radio_if; + uint16 active_band; + uint32 active_channel; +} __attribute__ ((packed)) NasGetRfBandInfoExtended; + +typedef struct { + UCHAR TLVType; + USHORT TLVLength; + + uint8 num_instances; + NasGetRfBandInfoExtended bands_array[0]; +} __attribute__ ((packed)) NasGetRfBandInfoExtendedList; + +typedef struct { + uint8 radio_if; + uint32 bandwidth; +} __attribute__ ((packed)) NasGetRfBandInfoBandWidth; + +typedef struct { + UCHAR TLVType; + USHORT TLVLength; + + uint8 num_instances; + NasGetRfBandInfoBandWidth bands_array[0]; +} __attribute__ ((packed)) NasGetRfBandInfoBandWidthList; + +typedef struct { + UCHAR TLVType; + USHORT TLVLength; + + uint8 plmn[3]; + uint8 tac[3]; + uint64 global_cell_id; + uint16 physical_cell_id; + int16 rsrq; + int16 rsrp; + int16 snr; +} __attribute__ ((packed)) NasGetCellLocationNr5gServingCell; + +typedef struct { + uint16 physical_cell_id; + int16 rsrq; + int16 rsrp; + int16 rssi; + int16 cell_selection_rx_level; +} __attribute__ ((packed)) NasGetCellLocationLteInfoCell; + +typedef struct { + UCHAR TLVType; + USHORT TLVLength; + + uint8 ue_in_idle; + uint8 plmn[3]; + uint16 tracking_area_code; + uint32 global_cell_id; + uint16 absolute_rf_channel_number; + uint16 serving_cell_id; + uint8 cell_reselection_priority; + uint8 s_non_intra_search_threshold; + uint8 serving_cell_low_threshold; + uint8 s_intra_search_threshold; + uint8 cells_len; + NasGetCellLocationLteInfoCell cells_array[0]; +} __attribute__ ((packed)) NasGetCellLocationLteInfoIntrafrequency; + +typedef struct _QmiMessageNasGetCellLocationInfoOutputInterfrequencyLteInfoFrequencyElement { + uint16 eutra_absolute_rf_channel_number; + uint8 cell_selection_rx_level_low_threshold; + uint8 cell_selection_rx_level_high_threshold; + uint8 cell_reselection_priority; + uint8 cells_len; + NasGetCellLocationLteInfoCell cells_array[0]; +} __attribute__ ((packed)) NasGetCellLocationLteInfoInterfrequencyFrequencyElement; + +typedef struct { + UCHAR TLVType; + USHORT TLVLength; + + uint8 ue_in_idle; + uint8 freqs_len; + NasGetCellLocationLteInfoInterfrequencyFrequencyElement freqs[0]; +} __attribute__ ((packed)) NasGetCellLocationLteInfoInterfrequency; + +// ======================= End of NAS ============================== + +// ======================= UIM ============================== +#define QMIUIM_READ_TRANSPARENT_REQ 0x0020 +#define QMIUIM_READ_TRANSPARENT_RESP 0x0020 +#define QMIUIM_READ_TRANSPARENT_IND 0x0020 +#define QMIUIM_READ_RECORD_REQ 0x0021 +#define QMIUIM_READ_RECORD_RESP 0x0021 +#define QMIUIM_READ_RECORD_IND 0x0021 +#define QMIUIM_WRITE_TRANSPARENT_REQ 0x0022 +#define QMIUIM_WRITE_TRANSPARENT_RESP 0x0022 +#define QMIUIM_WRITE_TRANSPARENT_IND 0x0022 +#define QMIUIM_WRITE_RECORD_REQ 0x0023 +#define QMIUIM_WRITE_RECORD_RESP 0x0023 +#define QMIUIM_WRITE_RECORD_IND 0x0023 +#define QMIUIM_SET_PIN_PROTECTION_REQ 0x0025 +#define QMIUIM_SET_PIN_PROTECTION_RESP 0x0025 +#define QMIUIM_SET_PIN_PROTECTION_IND 0x0025 +#define QMIUIM_VERIFY_PIN_REQ 0x0026 +#define QMIUIM_VERIFY_PIN_RESP 0x0026 +#define QMIUIM_VERIFY_PIN_IND 0x0026 +#define QMIUIM_UNBLOCK_PIN_REQ 0x0027 +#define QMIUIM_UNBLOCK_PIN_RESP 0x0027 +#define QMIUIM_UNBLOCK_PIN_IND 0x0027 +#define QMIUIM_CHANGE_PIN_REQ 0x0028 +#define QMIUIM_CHANGE_PIN_RESP 0x0028 +#define QMIUIM_CHANGE_PIN_IND 0x0028 +#define QMIUIM_DEPERSONALIZATION_REQ 0x0029 +#define QMIUIM_DEPERSONALIZATION_RESP 0x0029 +#define QMIUIM_EVENT_REG_REQ 0x002E +#define QMIUIM_EVENT_REG_RESP 0x002E +#define QMIUIM_GET_CARD_STATUS_REQ 0x002F +#define QMIUIM_GET_CARD_STATUS_RESP 0x002F +#define QMIUIM_STATUS_CHANGE_IND 0x0032 +#define QMIUIM_POWER_DOWN 0x0030 +#define QMIUIM_POWER_UP 0x0031 + + +typedef struct _QMIUIM_GET_CARD_STATUS_RESP_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + USHORT QMUXResult; + USHORT QMUXError; +} __attribute__ ((packed)) QMIUIM_GET_CARD_STATUS_RESP_MSG, *PQMIUIM_GET_CARD_STATUS_RESP_MSG; + +#define UIM_CARD_STATE_ABSENT 0x00 +#define UIM_CARD_STATE_PRESENT 0x01 +#define UIM_CARD_STATE_ERROR 0x02 + +typedef struct _QMIUIM_CARD_STATUS +{ + UCHAR TLVType; + USHORT TLVLength; + USHORT IndexGWPri; + USHORT Index1XPri; + USHORT IndexGWSec; + USHORT Index1XSec; + UCHAR NumSlot; + UCHAR CardState; + UCHAR UPINState; + UCHAR UPINRetries; + UCHAR UPUKRetries; + UCHAR ErrorCode; + UCHAR NumApp; + UCHAR AppType; + UCHAR AppState; + UCHAR PersoState; + UCHAR PersoFeature; + UCHAR PersoRetries; + UCHAR PersoUnblockRetries; + UCHAR AIDLength; +} __attribute__ ((packed)) QMIUIM_CARD_STATUS, *PQMIUIM_CARD_STATUS; + +typedef struct _QMIUIM_PIN_STATE +{ + UCHAR UnivPIN; + UCHAR PIN1State; + UCHAR PIN1Retries; + UCHAR PUK1Retries; + UCHAR PIN2State; + UCHAR PIN2Retries; + UCHAR PUK2Retries; +} __attribute__ ((packed)) QMIUIM_PIN_STATE, *PQMIUIM_PIN_STATE; + +typedef struct _QMIUIM_VERIFY_PIN_REQ_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + UCHAR Session_Type; + UCHAR Aid_Len; + UCHAR TLV2Type; + USHORT TLV2Length; + UCHAR PINID; + UCHAR PINLen; + UCHAR PINValue; +} __attribute__ ((packed)) QMIUIM_VERIFY_PIN_REQ_MSG, *PQMIUIM_VERIFY_PIN_REQ_MSG; + +typedef struct _QMIUIM_VERIFY_PIN_RESP_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + USHORT QMUXResult; + USHORT QMUXError; + UCHAR TLV2Type; + USHORT TLV2Length; + UCHAR PINVerifyRetriesLeft; + UCHAR PINUnblockRetriesLeft; +} __attribute__ ((packed)) QMIUIM_VERIFY_PIN_RESP_MSG, *PQMIUIM_VERIFY_PIN_RESP_MSG; + +typedef struct _QMIUIM_READ_TRANSPARENT_REQ_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + UCHAR Session_Type; + UCHAR Aid_Len; + UCHAR TLV2Type; + USHORT TLV2Length; + USHORT file_id; + UCHAR path_len; + UCHAR path[]; +} __attribute__ ((packed)) QMIUIM_READ_TRANSPARENT_REQ_MSG, *PQMIUIM_READ_TRANSPARENT_REQ_MSG; + +typedef struct _READ_TRANSPARENT_TLV +{ + UCHAR TLVType; + USHORT TLVLength; + USHORT Offset; + USHORT Length; +} __attribute__ ((packed)) READ_TRANSPARENT_TLV, *PREAD_TRANSPARENT_TLV; + +typedef struct _QMIUIM_CONTENT +{ + UCHAR TLVType; + USHORT TLVLength; + USHORT content_len; + UCHAR content[]; +} __attribute__ ((packed)) QMIUIM_CONTENT, *PQMIUIM_CONTENT; + +typedef struct _QMIUIM_READ_TRANSPARENT_RESP_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + USHORT QMUXResult; + USHORT QMUXError; +} __attribute__ ((packed)) QMIUIM_READ_TRANSPARENT_RESP_MSG, *PQMIUIM_READ_TRANSPARENT_RESP_MSG; + +typedef struct _QMIUIM_SET_CARD_SLOT_REQ_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + UCHAR slot; +} __attribute__ ((packed)) QMIUIM_SET_CARD_SLOT_REQ_MSG, *PQMIUIM_SET_CARD_SLOT_REQ_MSG; + +// ======================= COEX ============================== +#define QMI_COEX_GET_WWAN_STATE_REQ 0x22 +#define QMI_COEX_GET_WWAN_STATE_RESP 0x22 + +typedef struct { + + uint32_t freq; + /**< Band center frequency in MHz. */ + + uint32_t bandwidth; + /**< Bandwidth in MHz. */ +}coex_band_type_v01; /* Type */ + +typedef struct _QMI_COEX_GET_WWAN_STATE_RESP_MSG_LTE_BAND +{ + UCHAR TLVType; + USHORT TLVLength; + coex_band_type_v01 ul_band; + coex_band_type_v01 dl_band; +} __attribute__ ((packed)) QMI_COEX_GET_WWAN_STATE_RESP_MSG_LTE_BAND, *PQMI_COEX_GET_WWAN_STATE_RESP_MSG_LTE_BAND; + + +typedef struct _QMUX_MSG +{ + QCQMUX_HDR QMUXHdr; + union + { + // Message Header + QCQMUX_MSG_HDR QMUXMsgHdr; + QCQMUX_MSG_HDR_RESP QMUXMsgHdrResp; + + // QMIWDS Message +#if 0 + QMIWDS_GET_PKT_SRVC_STATUS_REQ_MSG PacketServiceStatusReq; + QMIWDS_GET_PKT_SRVC_STATUS_RESP_MSG PacketServiceStatusRsp; + QMIWDS_GET_PKT_SRVC_STATUS_IND_MSG PacketServiceStatusInd; + QMIWDS_EVENT_REPORT_IND_MSG EventReportInd; + QMIWDS_GET_CURRENT_CHANNEL_RATE_REQ_MSG GetCurrChannelRateReq; + QMIWDS_GET_CURRENT_CHANNEL_RATE_RESP_MSG GetCurrChannelRateRsp; + QMIWDS_GET_PKT_STATISTICS_REQ_MSG GetPktStatsReq; + QMIWDS_GET_PKT_STATISTICS_RESP_MSG GetPktStatsRsp; + QMIWDS_SET_EVENT_REPORT_REQ_MSG EventReportReq; + QMIWDS_SET_EVENT_REPORT_RESP_MSG EventReportRsp; +#endif + //#ifdef QC_IP_MODE + QMIWDS_GET_RUNTIME_SETTINGS_REQ_MSG GetRuntimeSettingsReq; + QMIWDS_GET_RUNTIME_SETTINGS_RESP_MSG GetRuntimeSettingsRsp; + //#endif // QC_IP_MODE + QMIWDS_SET_CLIENT_IP_FAMILY_PREF_REQ_MSG SetClientIpFamilyPrefReq; + QMIWDS_SET_CLIENT_IP_FAMILY_PREF_RESP_MSG SetClientIpFamilyPrefResp; + QMIWDS_SET_AUTO_CONNECT_REQ_MSG SetAutoConnectReq; +#if 0 + QMIWDS_GET_MIP_MODE_REQ_MSG GetMipModeReq; + QMIWDS_GET_MIP_MODE_RESP_MSG GetMipModeResp; +#endif + QMIWDS_START_NETWORK_INTERFACE_REQ_MSG StartNwInterfaceReq; + QMIWDS_START_NETWORK_INTERFACE_RESP_MSG StartNwInterfaceResp; + QMIWDS_STOP_NETWORK_INTERFACE_REQ_MSG StopNwInterfaceReq; + QMIWDS_STOP_NETWORK_INTERFACE_RESP_MSG StopNwInterfaceResp; + QMIWDS_GET_DEFAULT_SETTINGS_REQ_MSG GetDefaultSettingsReq; + QMIWDS_GET_DEFAULT_SETTINGS_RESP_MSG GetDefaultSettingsResp; + QMIWDS_MODIFY_PROFILE_SETTINGS_REQ_MSG ModifyProfileSettingsReq; + QMIWDS_MODIFY_PROFILE_SETTINGS_RESP_MSG ModifyProfileSettingsResp; + QMIWDS_GET_PROFILE_SETTINGS_REQ_MSG GetProfileSettingsReq; + QMIWDS_CREATE_PROFILE_SETTINGS_REQ_MSG CreatetProfileSettingsReq; +#if 0 + QMIWDS_GET_DATA_BEARER_REQ_MSG GetDataBearerReq; + QMIWDS_GET_DATA_BEARER_RESP_MSG GetDataBearerResp; + QMIWDS_DUN_CALL_INFO_REQ_MSG DunCallInfoReq; + QMIWDS_DUN_CALL_INFO_RESP_MSG DunCallInfoResp; +#endif + QMIWDS_BIND_MUX_DATA_PORT_REQ_MSG BindMuxDataPortReq; + + // QMIDMS Messages +#if 0 + QMIDMS_GET_DEVICE_MFR_REQ_MSG GetDeviceMfrReq; + QMIDMS_GET_DEVICE_MFR_RESP_MSG GetDeviceMfrRsp; + QMIDMS_GET_DEVICE_MODEL_ID_REQ_MSG GetDeviceModeIdReq; + QMIDMS_GET_DEVICE_MODEL_ID_RESP_MSG GetDeviceModeIdRsp; + QMIDMS_GET_DEVICE_REV_ID_REQ_MSG GetDeviceRevIdReq; + QMIDMS_GET_DEVICE_REV_ID_RESP_MSG GetDeviceRevIdRsp; + QMIDMS_GET_MSISDN_REQ_MSG GetMsisdnReq; + QMIDMS_GET_MSISDN_RESP_MSG GetMsisdnRsp; + QMIDMS_GET_DEVICE_SERIAL_NUMBERS_REQ_MSG GetDeviceSerialNumReq; + QMIDMS_GET_DEVICE_SERIAL_NUMBERS_RESP_MSG GetDeviceSerialNumRsp; + QMIDMS_GET_DEVICE_CAP_REQ_MSG GetDeviceCapReq; + QMIDMS_GET_DEVICE_CAP_RESP_MSG GetDeviceCapResp; + QMIDMS_GET_BAND_CAP_REQ_MSG GetBandCapReq; + QMIDMS_GET_BAND_CAP_RESP_MSG GetBandCapRsp; + QMIDMS_GET_ACTIVATED_STATUS_REQ_MSG GetActivatedStatusReq; + QMIDMS_GET_ACTIVATED_STATUS_RESP_MSG GetActivatedStatusResp; + QMIDMS_GET_OPERATING_MODE_REQ_MSG GetOperatingModeReq; + QMIDMS_GET_OPERATING_MODE_RESP_MSG GetOperatingModeResp; +#endif + QMIDMS_SET_OPERATING_MODE_REQ_MSG SetOperatingModeReq; + QMIDMS_SET_OPERATING_MODE_RESP_MSG SetOperatingModeResp; +#if 0 + QMIDMS_UIM_GET_ICCID_REQ_MSG GetICCIDReq; + QMIDMS_UIM_GET_ICCID_RESP_MSG GetICCIDResp; + QMIDMS_ACTIVATE_AUTOMATIC_REQ_MSG ActivateAutomaticReq; + QMIDMS_ACTIVATE_AUTOMATIC_RESP_MSG ActivateAutomaticResp; + QMIDMS_ACTIVATE_MANUAL_REQ_MSG ActivateManualReq; + QMIDMS_ACTIVATE_MANUAL_RESP_MSG ActivateManualResp; +#endif + QMIDMS_UIM_GET_PIN_STATUS_REQ_MSG UIMGetPinStatusReq; + QMIDMS_UIM_GET_PIN_STATUS_RESP_MSG UIMGetPinStatusResp; + QMIDMS_UIM_VERIFY_PIN_REQ_MSG UIMVerifyPinReq; + QMIDMS_UIM_VERIFY_PIN_RESP_MSG UIMVerifyPinResp; +#if 0 + QMIDMS_UIM_SET_PIN_PROTECTION_REQ_MSG UIMSetPinProtectionReq; + QMIDMS_UIM_SET_PIN_PROTECTION_RESP_MSG UIMSetPinProtectionResp; + QMIDMS_UIM_CHANGE_PIN_REQ_MSG UIMChangePinReq; + QMIDMS_UIM_CHANGE_PIN_RESP_MSG UIMChangePinResp; + QMIDMS_UIM_UNBLOCK_PIN_REQ_MSG UIMUnblockPinReq; + QMIDMS_UIM_UNBLOCK_PIN_RESP_MSG UIMUnblockPinResp; + QMIDMS_SET_EVENT_REPORT_REQ_MSG DmsSetEventReportReq; + QMIDMS_SET_EVENT_REPORT_RESP_MSG DmsSetEventReportResp; + QMIDMS_EVENT_REPORT_IND_MSG DmsEventReportInd; +#endif + QMIDMS_UIM_GET_STATE_REQ_MSG UIMGetStateReq; + QMIDMS_UIM_GET_STATE_RESP_MSG UIMGetStateResp; + QMIDMS_UIM_GET_IMSI_REQ_MSG UIMGetIMSIReq; + QMIDMS_UIM_GET_IMSI_RESP_MSG UIMGetIMSIResp; +#if 0 + QMIDMS_UIM_GET_CK_STATUS_REQ_MSG UIMGetCkStatusReq; + QMIDMS_UIM_GET_CK_STATUS_RESP_MSG UIMGetCkStatusResp; + QMIDMS_UIM_SET_CK_PROTECTION_REQ_MSG UIMSetCkProtectionReq; + QMIDMS_UIM_SET_CK_PROTECTION_RESP_MSG UIMSetCkProtectionResp; + QMIDMS_UIM_UNBLOCK_CK_REQ_MSG UIMUnblockCkReq; + QMIDMS_UIM_UNBLOCK_CK_RESP_MSG UIMUnblockCkResp; +#endif + + // QMIQOS Messages +#if 1 + QMI_QOS_SET_EVENT_REPORT_REQ_MSG QosSetEventReportReq; + QMI_QOS_SET_EVENT_REPORT_RESP_MSG QosSetEventReportRsp; + QMI_QOS_SET_EVENT_REPORT_IND_MSG QosSetEventReportInd; + QMI_QOS_BIND_DATA_PORT_REQ_MSG QosBindDataPortReq; + QMI_QOS_BIND_DATA_PORT_RESP_MSG QosBindDataPortRsp; + QMI_QOS_INDICATION_REGISTER_REQ_MSG QosIndRegReq; + QMI_QOS_INDICATION_REGISTER_RESP_MSG QosIndRegRsp; + QMI_QOS_GLOBAL_QOS_FLOW_IND_MSG QosGlobalQosFlowInd; + QMI_QOS_GET_QOS_INFO_REQ_MSG QosGetQosInfoReq; + QMI_QOS_GET_QOS_INFO_RESP_MSG QosGetQosInfoRsp; +#endif + + // QMIWMS Messages +#if 0 + QMIWMS_GET_MESSAGE_PROTOCOL_REQ_MSG GetMessageProtocolReq; + QMIWMS_GET_MESSAGE_PROTOCOL_RESP_MSG GetMessageProtocolResp; + QMIWMS_GET_SMSC_ADDRESS_REQ_MSG GetSMSCAddressReq; + QMIWMS_GET_SMSC_ADDRESS_RESP_MSG GetSMSCAddressResp; + QMIWMS_SET_SMSC_ADDRESS_REQ_MSG SetSMSCAddressReq; + QMIWMS_SET_SMSC_ADDRESS_RESP_MSG SetSMSCAddressResp; + QMIWMS_GET_STORE_MAX_SIZE_REQ_MSG GetStoreMaxSizeReq; + QMIWMS_GET_STORE_MAX_SIZE_RESP_MSG GetStoreMaxSizeResp; + QMIWMS_LIST_MESSAGES_REQ_MSG ListMessagesReq; + QMIWMS_LIST_MESSAGES_RESP_MSG ListMessagesResp; + QMIWMS_RAW_READ_REQ_MSG RawReadMessagesReq; + QMIWMS_RAW_READ_RESP_MSG RawReadMessagesResp; + QMIWMS_SET_EVENT_REPORT_REQ_MSG WmsSetEventReportReq; + QMIWMS_SET_EVENT_REPORT_RESP_MSG WmsSetEventReportResp; + QMIWMS_EVENT_REPORT_IND_MSG WmsEventReportInd; + QMIWMS_DELETE_REQ_MSG WmsDeleteReq; + QMIWMS_DELETE_RESP_MSG WmsDeleteResp; + QMIWMS_RAW_SEND_REQ_MSG RawSendMessagesReq; + QMIWMS_RAW_SEND_RESP_MSG RawSendMessagesResp; + QMIWMS_MODIFY_TAG_REQ_MSG WmsModifyTagReq; + QMIWMS_MODIFY_TAG_RESP_MSG WmsModifyTagResp; +#endif + + // QMINAS Messages +#if 0 + QMINAS_GET_HOME_NETWORK_REQ_MSG GetHomeNetworkReq; + QMINAS_GET_HOME_NETWORK_RESP_MSG GetHomeNetworkResp; + QMINAS_GET_PREFERRED_NETWORK_REQ_MSG GetPreferredNetworkReq; + QMINAS_GET_PREFERRED_NETWORK_RESP_MSG GetPreferredNetworkResp; + QMINAS_GET_FORBIDDEN_NETWORK_REQ_MSG GetForbiddenNetworkReq; + QMINAS_GET_FORBIDDEN_NETWORK_RESP_MSG GetForbiddenNetworkResp; + QMINAS_GET_SERVING_SYSTEM_REQ_MSG GetServingSystemReq; +#endif + QMINAS_GET_SERVING_SYSTEM_RESP_MSG GetServingSystemResp; + QMINAS_GET_SYS_INFO_RESP_MSG GetSysInfoResp; + QMINAS_SYS_INFO_IND_MSG NasSysInfoInd; +#if 0 + QMINAS_SERVING_SYSTEM_IND_MSG NasServingSystemInd; + QMINAS_SET_PREFERRED_NETWORK_REQ_MSG SetPreferredNetworkReq; + QMINAS_SET_PREFERRED_NETWORK_RESP_MSG SetPreferredNetworkResp; + QMINAS_SET_FORBIDDEN_NETWORK_REQ_MSG SetForbiddenNetworkReq; + QMINAS_SET_FORBIDDEN_NETWORK_RESP_MSG SetForbiddenNetworkResp; + QMINAS_PERFORM_NETWORK_SCAN_REQ_MSG PerformNetworkScanReq; + QMINAS_PERFORM_NETWORK_SCAN_RESP_MSG PerformNetworkScanResp; + QMINAS_INITIATE_NW_REGISTER_REQ_MSG InitiateNwRegisterReq; + QMINAS_INITIATE_NW_REGISTER_RESP_MSG InitiateNwRegisterResp; + QMINAS_SET_TECHNOLOGY_PREF_REQ_MSG SetTechnologyPrefReq; + QMINAS_SET_TECHNOLOGY_PREF_RESP_MSG SetTechnologyPrefResp; + QMINAS_GET_SIGNAL_STRENGTH_REQ_MSG GetSignalStrengthReq; + QMINAS_GET_SIGNAL_STRENGTH_RESP_MSG GetSignalStrengthResp; + QMINAS_SET_EVENT_REPORT_REQ_MSG SetEventReportReq; + QMINAS_SET_EVENT_REPORT_RESP_MSG SetEventReportResp; + QMINAS_EVENT_REPORT_IND_MSG NasEventReportInd; + QMINAS_GET_RF_BAND_INFO_REQ_MSG GetRFBandInfoReq; + QMINAS_GET_RF_BAND_INFO_RESP_MSG GetRFBandInfoResp; + QMINAS_INITIATE_ATTACH_REQ_MSG InitiateAttachReq; + QMINAS_INITIATE_ATTACH_RESP_MSG InitiateAttachResp; + QMINAS_GET_PLMN_NAME_REQ_MSG GetPLMNNameReq; + QMINAS_GET_PLMN_NAME_RESP_MSG GetPLMNNameResp; +#endif + + // QMIUIM Messages + QMIUIM_GET_CARD_STATUS_RESP_MSG UIMGetCardStatus; + QMIUIM_VERIFY_PIN_REQ_MSG UIMUIMVerifyPinReq; + QMIUIM_VERIFY_PIN_RESP_MSG UIMUIMVerifyPinResp; +#if 0 + QMIUIM_SET_PIN_PROTECTION_REQ_MSG UIMUIMSetPinProtectionReq; + QMIUIM_SET_PIN_PROTECTION_RESP_MSG UIMUIMSetPinProtectionResp; + QMIUIM_CHANGE_PIN_REQ_MSG UIMUIMChangePinReq; + QMIUIM_CHANGE_PIN_RESP_MSG UIMUIMChangePinResp; + QMIUIM_UNBLOCK_PIN_REQ_MSG UIMUIMUnblockPinReq; + QMIUIM_UNBLOCK_PIN_RESP_MSG UIMUIMUnblockPinResp; +#endif + QMIUIM_READ_TRANSPARENT_REQ_MSG UIMUIMReadTransparentReq; + QMIUIM_READ_TRANSPARENT_RESP_MSG UIMUIMReadTransparentResp; + QMIUIM_SET_CARD_SLOT_REQ_MSG UIMSetCardSlotReq; + + QMIWDS_ADMIN_SET_DATA_FORMAT_REQ_MSG SetDataFormatReq; + QMI_WDA_SET_LOOPBACK_CONFIG_REQ_MSG SetLoopBackReq; + QMI_WDA_SET_LOOPBACK_CONFIG_IND_MSG SetLoopBackInd; + }; +} __attribute__ ((packed)) QMUX_MSG, *PQMUX_MSG; + +typedef struct _QCQMIMSG { + QCQMI_HDR QMIHdr; + union { + QMICTL_MSG CTLMsg; + QMUX_MSG MUXMsg; + }; +} __attribute__ ((packed)) QCQMIMSG, *PQCQMIMSG; + +#pragma pack(pop) + +#endif // MPQMUX_H + diff --git a/package/wwan/driver/quectel_cm_5G/src/Makefile b/package/wwan/driver/quectel_cm_5G/src/Makefile new file mode 100644 index 000000000..9c48439bf --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/Makefile @@ -0,0 +1,45 @@ +ifneq ($(CROSS_COMPILE),) +CROSS-COMPILE:=$(CROSS_COMPILE) +endif +#CROSS-COMPILE:=/workspace/buildroot/buildroot-qemu_mips_malta_defconfig/output/host/usr/bin/mips-buildroot-linux-uclibc- +#CROSS-COMPILE:=/workspace/buildroot/buildroot-qemu_arm_vexpress_defconfig/output/host/usr/bin/arm-buildroot-linux-uclibcgnueabi- +#CROSS-COMPILE:=/workspace/buildroot-git/qemu_mips64_malta/output/host/usr/bin/mips-gnu-linux- +ifeq ($(CC),cc) +CC:=$(CROSS-COMPILE)gcc +endif +LD:=$(CROSS-COMPILE)ld + +QL_CM_SRC=QmiWwanCM.c GobiNetCM.c main.c MPQMUX.c QMIThread.c util.c qmap_bridge_mode.c mbim-cm.c device.c +QL_CM_SRC+=atc.c atchannel.c at_tok.c +#QL_CM_SRC+=qrtr.c rmnetctl.c +ifeq (1,1) +QL_CM_DHCP=udhcpc.c +else +LIBMNL=libmnl/ifutils.c libmnl/attr.c libmnl/callback.c libmnl/nlmsg.c libmnl/socket.c +DHCP=libmnl/dhcp/dhcpclient.c libmnl/dhcp/dhcpmsg.c libmnl/dhcp/packet.c +QL_CM_DHCP=udhcpc_netlink.c +QL_CM_DHCP+=${LIBMNL} +endif + +LDFLAGS += -lpthread -ldl -lrt + +release: clean qmi-proxy mbim-proxy atc-proxy #qrtr-proxy + $(CC) ${CFLAGS} ${QL_CM_SRC} ${QL_CM_DHCP} -o quectel-CM ${LDFLAGS} + +debug: clean + $(CC) ${CFLAGS} -g -DCM_DEBUG ${QL_CM_SRC} ${QL_CM_DHCP} -o quectel-CM -lpthread -ldl -lrt + +qmi-proxy: + $(CC) ${CFLAGS} quectel-qmi-proxy.c -o quectel-qmi-proxy ${LDFLAGS} + +mbim-proxy: + $(CC) ${CFLAGS} quectel-mbim-proxy.c -o quectel-mbim-proxy ${LDFLAGS} + +qrtr-proxy: + $(CC) ${CFLAGS} quectel-qrtr-proxy.c -o quectel-qrtr-proxy ${LDFLAGS} + +atc-proxy: + $(CC) ${CFLAGS} quectel-atc-proxy.c atchannel.c at_tok.c util.c -o quectel-atc-proxy ${LDFLAGS} + +clean: + rm -rf *.o libmnl/*.o quectel-CM quectel-qmi-proxy quectel-mbim-proxy quectel-atc-proxy diff --git a/package/wwan/driver/quectel_cm_5G/src/Makefile.am b/package/wwan/driver/quectel_cm_5G/src/Makefile.am new file mode 100644 index 000000000..87e526669 --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/Makefile.am @@ -0,0 +1,22 @@ +bin_PROGRAMS = quectel-CM +QL_CM_SRC=QmiWwanCM.c GobiNetCM.c main.c MPQMUX.c QMIThread.c util.c qmap_bridge_mode.c mbim-cm.c device.c +QL_CM_SRC+=atc.c atchannel.c at_tok.c +#QL_CM_SRC+=qrtr.c rmnetctl.c +QL_CM_DHCP=udhcpc.c +if USE_QRTR +quectel_CM_CFLAGS = -DCONFIG_QRTR +QL_CM_SRC += qrtr.c rmnetctl.c +if USE_MSM_IPC +quectel_CM_CFLAGS += -DUSE_LINUX_MSM_IPC +endif +endif + +quectel_CM_SOURCES = ${QL_CM_SRC} ${QL_CM_DHCP} + +bin_PROGRAMS += quectel-qmi-proxy +quectel_qmi_proxy_SOURCES = quectel-qmi-proxy.c + +bin_PROGRAMS += quectel-mbim-proxy +quectel_mbim_proxy_SOURCES = quectel-mbim-proxy.c +LIBS = -l pthread +CFLAGS = -Wall -Wextra -Werror -O1 diff --git a/package/wwan/driver/quectel_cm_5G/src/NOTICE b/package/wwan/driver/quectel_cm_5G/src/NOTICE new file mode 100644 index 000000000..0a062cf0b --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/NOTICE @@ -0,0 +1,7 @@ +This program is totally open souce code, and public domain software for customers of Quectel company. + +The APIs of QMI WWAMN interfaces are defined by Qualcomm. And this program complies with Qualcomm QMI WWAN interfaces specification. + +Customers are free to modify the source codes and redistribute them. + +For those who is not Quectel's customer, all rights are closed, and any copying and commercial development over this progrma is not allowed. diff --git a/package/wwan/driver/quectel_cm_5G/src/QMIThread.c b/package/wwan/driver/quectel_cm_5G/src/QMIThread.c new file mode 100644 index 000000000..40ea8d2dc --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/QMIThread.c @@ -0,0 +1,2993 @@ +/****************************************************************************** + @file QMIThread.c + @brief QMI WWAN connectivity manager. + + DESCRIPTION + Connectivity Management Tool for USB network adapter of Quectel wireless cellular modules. + + INITIALIZATION AND SEQUENCING REQUIREMENTS + None. + + --------------------------------------------------------------------------- + Copyright (c) 2016 - 2020 Quectel Wireless Solution, Co., Ltd. All Rights Reserved. + Quectel Wireless Solution Proprietary and Confidential. + --------------------------------------------------------------------------- +******************************************************************************/ +#include "QMIThread.h" +#include + +#ifndef MIN +#define MIN(a, b) ((a) < (b)? (a): (b)) +#endif + +#define qmi_rsp_check_and_return() do { \ + if (err < 0 || pResponse == NULL) { \ + dbg_time("%s err = %d", __func__, err); \ + return err; \ + } \ + pMUXMsg = &pResponse->MUXMsg; \ + if (le16_to_cpu(pMUXMsg->QMUXMsgHdrResp.QMUXResult) || le16_to_cpu(pMUXMsg->QMUXMsgHdrResp.QMUXError)) { \ + USHORT QMUXError = le16_to_cpu(pMUXMsg->QMUXMsgHdrResp.QMUXError); \ + dbg_time("%s QMUXResult = 0x%x, QMUXError = 0x%x", __func__, \ + le16_to_cpu(pMUXMsg->QMUXMsgHdrResp.QMUXResult), QMUXError); \ + free(pResponse); \ + return QMUXError; \ + } \ +} while(0) + +#define qmi_rsp_check() do { \ + if (err < 0 || pResponse == NULL) { \ + dbg_time("%s err = %d", __func__, err); \ + return err; \ + } \ + pMUXMsg = &pResponse->MUXMsg; \ + if (le16_to_cpu(pMUXMsg->QMUXMsgHdrResp.QMUXResult) || le16_to_cpu(pMUXMsg->QMUXMsgHdrResp.QMUXError)) { \ + USHORT QMUXError = le16_to_cpu(pMUXMsg->QMUXMsgHdrResp.QMUXError); \ + dbg_time("%s QMUXResult = 0x%x, QMUXError = 0x%x", __func__, \ + le16_to_cpu(pMUXMsg->QMUXMsgHdrResp.QMUXResult), QMUXError); \ + } \ +} while(0) + +static uint32_t WdsConnectionIPv4Handle = 0; +static uint32_t WdsConnectionIPv6Handle = 0; +static int s_is_cdma = 0; +static int s_5g_type = WWAN_DATA_CLASS_NONE; +static int s_hdr_personality = 0; // 0x01-HRPD, 0x02-eHRPD +static char *qstrcpy(char *to, const char *from) { //no __strcpy_chk + char *save = to; + for (; (*to = *from) != '\0'; ++from, ++to); + return(save); +} + +static void uchar2char(char *dst_ptr, size_t dst_len, const UCHAR *src_ptr, size_t src_len) { + size_t copy = MIN(dst_len-1, src_len); + + if (copy) + memcpy(dst_ptr, src_ptr, copy); + dst_ptr[copy] = 0; +} + +static int s_9x07 = 1; + +typedef USHORT (*CUSTOMQMUX)(PQMUX_MSG pMUXMsg, void *arg); + +// To retrieve the ith (Index) TLV +PQMI_TLV_HDR GetTLV (PQCQMUX_MSG_HDR pQMUXMsgHdr, int TLVType) { + int TLVFind = 0; + USHORT Length = le16_to_cpu(pQMUXMsgHdr->Length); + PQMI_TLV_HDR pTLVHdr = (PQMI_TLV_HDR)(pQMUXMsgHdr + 1); + + while (Length >= sizeof(QMI_TLV_HDR)) { + TLVFind++; + if (TLVType > 0x1000) { + if ((TLVFind + 0x1000) == TLVType) + return pTLVHdr; + } else if (pTLVHdr->TLVType == TLVType) { + return pTLVHdr; + } + + Length -= (le16_to_cpu((pTLVHdr->TLVLength)) + sizeof(QMI_TLV_HDR)); + pTLVHdr = (PQMI_TLV_HDR)(((UCHAR *)pTLVHdr) + le16_to_cpu(pTLVHdr->TLVLength) + sizeof(QMI_TLV_HDR)); + } + + return NULL; +} + +static USHORT GetQMUXTransactionId(void) { + static int TransactionId = 0; + if (++TransactionId > 0xFFFF) + TransactionId = 1; + return TransactionId; +} + +static PQCQMIMSG ComposeQMUXMsg(UCHAR QMIType, USHORT Type, CUSTOMQMUX customQmuxMsgFunction, void *arg) { + UCHAR QMIBuf[WDM_DEFAULT_BUFSIZE]; + PQCQMIMSG pRequest = (PQCQMIMSG)QMIBuf; + int Length; + + memset(QMIBuf, 0x00, sizeof(QMIBuf)); + pRequest->QMIHdr.IFType = USB_CTL_MSG_TYPE_QMI; + pRequest->QMIHdr.CtlFlags = 0x00; + pRequest->QMIHdr.QMIType = QMIType; + + pRequest->MUXMsg.QMUXHdr.CtlFlags = QMUX_CTL_FLAG_SINGLE_MSG | QMUX_CTL_FLAG_TYPE_CMD; + pRequest->MUXMsg.QMUXHdr.TransactionId = cpu_to_le16(GetQMUXTransactionId()); + pRequest->MUXMsg.QMUXMsgHdr.Type = cpu_to_le16(Type); + if (customQmuxMsgFunction) + pRequest->MUXMsg.QMUXMsgHdr.Length = cpu_to_le16(customQmuxMsgFunction(&pRequest->MUXMsg, arg) - sizeof(QCQMUX_MSG_HDR)); + else + pRequest->MUXMsg.QMUXMsgHdr.Length = cpu_to_le16(0x0000); + + pRequest->QMIHdr.Length = cpu_to_le16(le16_to_cpu(pRequest->MUXMsg.QMUXMsgHdr.Length) + sizeof(QCQMUX_MSG_HDR) + sizeof(QCQMUX_HDR) + + sizeof(QCQMI_HDR) - 1); + Length = le16_to_cpu(pRequest->QMIHdr.Length) + 1; + + pRequest = (PQCQMIMSG)malloc(Length); + if (pRequest == NULL) { + dbg_time("%s fail to malloc", __func__); + } else { + memcpy(pRequest, QMIBuf, Length); + } + + return pRequest; +} + +#if 0 +static USHORT NasSetEventReportReq(PQMUX_MSG pMUXMsg, void *arg) { + pMUXMsg->SetEventReportReq.TLVType = 0x10; + pMUXMsg->SetEventReportReq.TLVLength = 0x04; + pMUXMsg->SetEventReportReq.ReportSigStrength = 0x00; + pMUXMsg->SetEventReportReq.NumTresholds = 2; + pMUXMsg->SetEventReportReq.TresholdList[0] = -113; + pMUXMsg->SetEventReportReq.TresholdList[1] = -50; + return sizeof(QMINAS_SET_EVENT_REPORT_REQ_MSG); +} + +static USHORT WdsSetEventReportReq(PQMUX_MSG pMUXMsg, void *arg) { + pMUXMsg->EventReportReq.TLVType = 0x10; // 0x10 -- current channel rate indicator + pMUXMsg->EventReportReq.TLVLength = 0x0001; // 1 + pMUXMsg->EventReportReq.Mode = 0x00; // 0-do not report; 1-report when rate changes + + pMUXMsg->EventReportReq.TLV2Type = 0x11; // 0x11 + pMUXMsg->EventReportReq.TLV2Length = 0x0005; // 5 + pMUXMsg->EventReportReq.StatsPeriod = 0x00; // seconds between reports; 0-do not report + pMUXMsg->EventReportReq.StatsMask = 0x000000ff; // + + pMUXMsg->EventReportReq.TLV3Type = 0x12; // 0x12 -- current data bearer indicator + pMUXMsg->EventReportReq.TLV3Length = 0x0001; // 1 + pMUXMsg->EventReportReq.Mode3 = 0x01; // 0-do not report; 1-report when changes + + pMUXMsg->EventReportReq.TLV4Type = 0x13; // 0x13 -- dormancy status indicator + pMUXMsg->EventReportReq.TLV4Length = 0x0001; // 1 + pMUXMsg->EventReportReq.DormancyStatus = 0x00; // 0-do not report; 1-report when changes + return sizeof(QMIWDS_SET_EVENT_REPORT_REQ_MSG); +} + +static USHORT DmsSetEventReportReq(PQMUX_MSG pMUXMsg) { + PPIN_STATUS pPinState = (PPIN_STATUS)(&pMUXMsg->DmsSetEventReportReq + 1); + PUIM_STATE pUimState = (PUIM_STATE)(pPinState + 1); + // Pin State + pPinState->TLVType = 0x12; + pPinState->TLVLength = 0x01; + pPinState->ReportPinState = 0x01; + // UIM State + pUimState->TLVType = 0x15; + pUimState->TLVLength = 0x01; + pUimState->UIMState = 0x01; + return sizeof(QMIDMS_SET_EVENT_REPORT_REQ_MSG) + sizeof(PIN_STATUS) + sizeof(UIM_STATE); +} +#endif + +static USHORT WdsStartNwInterfaceReq(PQMUX_MSG pMUXMsg, void *arg) { + PQMIWDS_TECHNOLOGY_PREFERECE pTechPref; + PQMIWDS_AUTH_PREFERENCE pAuthPref; + PQMIWDS_USERNAME pUserName; + PQMIWDS_PASSWD pPasswd; + PQMIWDS_APNNAME pApnName; + PQMIWDS_IP_FAMILY_TLV pIpFamily; + USHORT TLVLength = 0; + UCHAR *pTLV; + PROFILE_T *profile = (PROFILE_T *)arg; + const char *profile_user = profile->user; + const char *profile_password = profile->password; + int profile_auth = profile->auth; + + if (s_is_cdma && (profile_user == NULL || profile_user[0] == '\0') && (profile_password == NULL || profile_password[0] == '\0')) { + profile_user = "ctnet@mycdma.cn"; + profile_password = "vnet.mobi"; + profile_auth = 2; //chap + } + + pTLV = (UCHAR *)(&pMUXMsg->StartNwInterfaceReq + 1); + pMUXMsg->StartNwInterfaceReq.Length = 0; + + // Set technology Preferece + pTechPref = (PQMIWDS_TECHNOLOGY_PREFERECE)(pTLV + TLVLength); + pTechPref->TLVType = 0x30; + pTechPref->TLVLength = cpu_to_le16(0x01); + if (s_is_cdma == 0) + pTechPref->TechPreference = 0x01; + else + pTechPref->TechPreference = 0x02; + TLVLength +=(le16_to_cpu(pTechPref->TLVLength) + sizeof(QCQMICTL_TLV_HDR)); + + // Set APN Name + if (profile->apn && !s_is_cdma) { //cdma no apn + pApnName = (PQMIWDS_APNNAME)(pTLV + TLVLength); + pApnName->TLVType = 0x14; + pApnName->TLVLength = cpu_to_le16(strlen(profile->apn)); + qstrcpy((char *)&pApnName->ApnName, profile->apn); + TLVLength +=(le16_to_cpu(pApnName->TLVLength) + sizeof(QCQMICTL_TLV_HDR)); + } + + // Set User Name + if (profile_user) { + pUserName = (PQMIWDS_USERNAME)(pTLV + TLVLength); + pUserName->TLVType = 0x17; + pUserName->TLVLength = cpu_to_le16(strlen(profile_user)); + qstrcpy((char *)&pUserName->UserName, profile_user); + TLVLength += (le16_to_cpu(pUserName->TLVLength) + sizeof(QCQMICTL_TLV_HDR)); + } + + // Set Password + if (profile_password) { + pPasswd = (PQMIWDS_PASSWD)(pTLV + TLVLength); + pPasswd->TLVType = 0x18; + pPasswd->TLVLength = cpu_to_le16(strlen(profile_password)); + qstrcpy((char *)&pPasswd->Passwd, profile_password); + TLVLength += (le16_to_cpu(pPasswd->TLVLength) + sizeof(QCQMICTL_TLV_HDR)); + } + + // Set Auth Protocol + if (profile_user && profile_password) { + pAuthPref = (PQMIWDS_AUTH_PREFERENCE)(pTLV + TLVLength); + pAuthPref->TLVType = 0x16; + pAuthPref->TLVLength = cpu_to_le16(0x01); + pAuthPref->AuthPreference = profile_auth; // 0 ~ None, 1 ~ Pap, 2 ~ Chap, 3 ~ MsChapV2 + TLVLength += (le16_to_cpu(pAuthPref->TLVLength) + sizeof(QCQMICTL_TLV_HDR)); + } + + // Add IP Family Preference + pIpFamily = (PQMIWDS_IP_FAMILY_TLV)(pTLV + TLVLength); + pIpFamily->TLVType = 0x19; + pIpFamily->TLVLength = cpu_to_le16(0x01); + pIpFamily->IpFamily = profile->curIpFamily; + TLVLength += (le16_to_cpu(pIpFamily->TLVLength) + sizeof(QCQMICTL_TLV_HDR)); + + //Set Profile Index + if (profile->pdp && !s_is_cdma) { //cdma only support one pdp, so no need to set profile index + PQMIWDS_PROFILE_IDENTIFIER pProfileIndex = (PQMIWDS_PROFILE_IDENTIFIER)(pTLV + TLVLength); + pProfileIndex->TLVLength = cpu_to_le16(0x01); + pProfileIndex->TLVType = 0x31; + pProfileIndex->ProfileIndex = profile->pdp; + if (s_is_cdma && s_hdr_personality == 0x02) { + pProfileIndex->TLVType = 0x32; //profile_index_3gpp2 + pProfileIndex->ProfileIndex = 101; + } + TLVLength += (le16_to_cpu(pProfileIndex->TLVLength) + sizeof(QCQMICTL_TLV_HDR)); + } + + return sizeof(QMIWDS_START_NETWORK_INTERFACE_REQ_MSG) + TLVLength; +} + +static USHORT WdsStopNwInterfaceReq(PQMUX_MSG pMUXMsg, void *arg) { + pMUXMsg->StopNwInterfaceReq.TLVType = 0x01; + pMUXMsg->StopNwInterfaceReq.TLVLength = cpu_to_le16(0x04); + if (*((int *)arg) == IpFamilyV4) + pMUXMsg->StopNwInterfaceReq.Handle = cpu_to_le32(WdsConnectionIPv4Handle); + else + pMUXMsg->StopNwInterfaceReq.Handle = cpu_to_le32(WdsConnectionIPv6Handle); + return sizeof(QMIWDS_STOP_NETWORK_INTERFACE_REQ_MSG); +} + +static USHORT WdsSetClientIPFamilyPref(PQMUX_MSG pMUXMsg, void *arg) { + pMUXMsg->SetClientIpFamilyPrefReq.TLVType = 0x01; + pMUXMsg->SetClientIpFamilyPrefReq.TLVLength = cpu_to_le16(0x01); + pMUXMsg->SetClientIpFamilyPrefReq.IpPreference = *((UCHAR *)arg); + return sizeof(QMIWDS_SET_CLIENT_IP_FAMILY_PREF_REQ_MSG); +} + +static USHORT WdsSetAutoConnect(PQMUX_MSG pMUXMsg, void *arg) { + pMUXMsg->SetAutoConnectReq.TLVType = 0x01; + pMUXMsg->SetAutoConnectReq.TLVLength = cpu_to_le16(0x01); + pMUXMsg->SetAutoConnectReq.autoconnect_setting = *((UCHAR *)arg); + return sizeof(QMIWDS_SET_AUTO_CONNECT_REQ_MSG); +} + +enum peripheral_ep_type { + DATA_EP_TYPE_RESERVED = 0x0, + DATA_EP_TYPE_HSIC = 0x1, + DATA_EP_TYPE_HSUSB = 0x2, + DATA_EP_TYPE_PCIE = 0x3, + DATA_EP_TYPE_EMBEDDED = 0x4, + DATA_EP_TYPE_BAM_DMUX = 0x5, +}; + +static USHORT WdsSetQMUXBindMuxDataPort(PQMUX_MSG pMUXMsg, void *arg) { + QMAP_SETTING *qmap_settings = (QMAP_SETTING *)arg; + + pMUXMsg->BindMuxDataPortReq.TLVType = 0x10; + pMUXMsg->BindMuxDataPortReq.TLVLength = cpu_to_le16(0x08); + pMUXMsg->BindMuxDataPortReq.ep_type = cpu_to_le32(qmap_settings->ep_type); + pMUXMsg->BindMuxDataPortReq.iface_id = cpu_to_le32(qmap_settings->iface_id); + pMUXMsg->BindMuxDataPortReq.TLV2Type = 0x11; + pMUXMsg->BindMuxDataPortReq.TLV2Length = cpu_to_le16(0x01); + pMUXMsg->BindMuxDataPortReq.MuxId = qmap_settings->MuxId; + pMUXMsg->BindMuxDataPortReq.TLV3Type = 0x13; + pMUXMsg->BindMuxDataPortReq.TLV3Length = cpu_to_le16(0x04); + pMUXMsg->BindMuxDataPortReq.client_type = cpu_to_le32(1); //WDS_CLIENT_TYPE_TETHERED + + return sizeof(QMIWDS_BIND_MUX_DATA_PORT_REQ_MSG); +} + +static int qmap_version = 0x05; +static USHORT WdaSetDataFormat(PQMUX_MSG pMUXMsg, void *arg) { + QMAP_SETTING *qmap_settings = (QMAP_SETTING *)arg; + + if (qmap_settings->rx_urb_size == 0) { + PQMIWDS_ADMIN_SET_DATA_FORMAT_TLV_QOS pWdsAdminQosTlv; + PQMIWDS_ADMIN_SET_DATA_FORMAT_TLV linkProto; + PQMIWDS_ADMIN_SET_DATA_FORMAT_TLV dlTlp; + + pWdsAdminQosTlv = (PQMIWDS_ADMIN_SET_DATA_FORMAT_TLV_QOS)(&pMUXMsg->QMUXMsgHdr + 1); + pWdsAdminQosTlv->TLVType = 0x10; + pWdsAdminQosTlv->TLVLength = cpu_to_le16(0x0001); + pWdsAdminQosTlv->QOSSetting = 0; /* no-QOS header */ + + linkProto = (PQMIWDS_ADMIN_SET_DATA_FORMAT_TLV)(pWdsAdminQosTlv + 1); + linkProto->TLVType = 0x11; + linkProto->TLVLength = cpu_to_le16(4); + linkProto->Value = cpu_to_le32(0x01); /* Set Ethernet mode */ + + dlTlp = (PQMIWDS_ADMIN_SET_DATA_FORMAT_TLV)(linkProto + 1);; + dlTlp->TLVType = 0x13; + dlTlp->TLVLength = cpu_to_le16(4); + dlTlp->Value = cpu_to_le32(0x00); + + if (sizeof(*linkProto) != 7 ) + dbg_time("%s sizeof(*linkProto) = %zu, is not 7!", __func__, sizeof(*linkProto) ); + + return sizeof(QCQMUX_MSG_HDR) + sizeof(*pWdsAdminQosTlv) + sizeof(*linkProto) + sizeof(*dlTlp); + } + else { + //Indicates whether the Quality of Service(QOS) data format is used by the client. + pMUXMsg->SetDataFormatReq.QosDataFormatTlv.TLVType = 0x10; + pMUXMsg->SetDataFormatReq.QosDataFormatTlv.TLVLength = cpu_to_le16(0x0001); + pMUXMsg->SetDataFormatReq.QosDataFormatTlv.QOSSetting = 0; /* no-QOS header */ + + //Underlying Link Layer Protocol + pMUXMsg->SetDataFormatReq.UnderlyingLinkLayerProtocolTlv.TLVType = 0x11; + pMUXMsg->SetDataFormatReq.UnderlyingLinkLayerProtocolTlv.TLVLength = cpu_to_le16(4); + pMUXMsg->SetDataFormatReq.UnderlyingLinkLayerProtocolTlv.Value = cpu_to_le32(0x02); /* Set IP mode */ + + //Uplink (UL) data aggregation protocol to be used for uplink data transfer. + pMUXMsg->SetDataFormatReq.UplinkDataAggregationProtocolTlv.TLVType = 0x12; + pMUXMsg->SetDataFormatReq.UplinkDataAggregationProtocolTlv.TLVLength = cpu_to_le16(4); + pMUXMsg->SetDataFormatReq.UplinkDataAggregationProtocolTlv.Value = cpu_to_le32(qmap_version); //UL QMAP is enabled + + //Downlink (DL) data aggregation protocol to be used for downlink data transfer + pMUXMsg->SetDataFormatReq.DownlinkDataAggregationProtocolTlv.TLVType = 0x13; + pMUXMsg->SetDataFormatReq.DownlinkDataAggregationProtocolTlv.TLVLength = cpu_to_le16(4); + pMUXMsg->SetDataFormatReq.DownlinkDataAggregationProtocolTlv.Value = cpu_to_le32(qmap_version); //DL QMAP is enabled + + //Maximum number of datagrams in a single aggregated packet on downlink + pMUXMsg->SetDataFormatReq.DownlinkDataAggregationMaxDatagramsTlv.TLVType = 0x15; + pMUXMsg->SetDataFormatReq.DownlinkDataAggregationMaxDatagramsTlv.TLVLength = cpu_to_le16(4); + pMUXMsg->SetDataFormatReq.DownlinkDataAggregationMaxDatagramsTlv.Value = cpu_to_le32(qmap_settings->rx_urb_size/512); + + //Maximum size in bytes of a single aggregated packet allowed on downlink + pMUXMsg->SetDataFormatReq.DownlinkDataAggregationMaxSizeTlv.TLVType = 0x16; + pMUXMsg->SetDataFormatReq.DownlinkDataAggregationMaxSizeTlv.TLVLength = cpu_to_le16(4); + pMUXMsg->SetDataFormatReq.DownlinkDataAggregationMaxSizeTlv.Value = cpu_to_le32(qmap_settings->rx_urb_size); + + //Peripheral End Point ID + pMUXMsg->SetDataFormatReq.epTlv.TLVType = 0x17; + pMUXMsg->SetDataFormatReq.epTlv.TLVLength = cpu_to_le16(8); + pMUXMsg->SetDataFormatReq.epTlv.ep_type = cpu_to_le32(qmap_settings->ep_type); + pMUXMsg->SetDataFormatReq.epTlv.iface_id = cpu_to_le32(qmap_settings->iface_id); + +#ifdef QUECTEL_UL_DATA_AGG + if (!qmap_settings->ul_data_aggregation_max_datagrams) { + return ((size_t)&((QMIWDS_ADMIN_SET_DATA_FORMAT_REQ_MSG *)0)->DlMinimumPassingTlv); + } + + //Maximum number of datagrams in a single aggregated packet on uplink + pMUXMsg->SetDataFormatReq.DlMinimumPassingTlv.TLVType = 0x19; + pMUXMsg->SetDataFormatReq.DlMinimumPassingTlv.TLVLength = cpu_to_le16(4); + pMUXMsg->SetDataFormatReq.DlMinimumPassingTlv.Value = cpu_to_le32(qmap_settings->dl_minimum_padding); + + //Maximum number of datagrams in a single aggregated packet on uplink + pMUXMsg->SetDataFormatReq.UplinkDataAggregationMaxDatagramsTlv.TLVType = 0x1B; + pMUXMsg->SetDataFormatReq.UplinkDataAggregationMaxDatagramsTlv.TLVLength = cpu_to_le16(4); + pMUXMsg->SetDataFormatReq.UplinkDataAggregationMaxDatagramsTlv.Value = cpu_to_le32(qmap_settings->ul_data_aggregation_max_datagrams); + + //Maximum size in bytes of a single aggregated packet allowed on downlink + pMUXMsg->SetDataFormatReq.UplinkDataAggregationMaxSizeTlv.TLVType = 0x1C; + pMUXMsg->SetDataFormatReq.UplinkDataAggregationMaxSizeTlv.TLVLength = cpu_to_le16(4); + pMUXMsg->SetDataFormatReq.UplinkDataAggregationMaxSizeTlv.Value = cpu_to_le32(qmap_settings->ul_data_aggregation_max_size); +#endif + + return sizeof(QMIWDS_ADMIN_SET_DATA_FORMAT_REQ_MSG); + } +} + +#ifdef CONFIG_SIM +static USHORT DmsUIMVerifyPinReqSend(PQMUX_MSG pMUXMsg, void *arg) { + pMUXMsg->UIMVerifyPinReq.TLVType = 0x01; + pMUXMsg->UIMVerifyPinReq.PINID = 0x01; //Pin1, not Puk + pMUXMsg->UIMVerifyPinReq.PINLen = strlen((const char *)arg); + qstrcpy((char *)&pMUXMsg->UIMVerifyPinReq.PINValue, ((const char *)arg)); + pMUXMsg->UIMVerifyPinReq.TLVLength = cpu_to_le16(2 + strlen((const char *)arg)); + return sizeof(QMIDMS_UIM_VERIFY_PIN_REQ_MSG) + (strlen((const char *)arg) - 1); +} + +static USHORT UimVerifyPinReqSend(PQMUX_MSG pMUXMsg, void *arg) +{ + pMUXMsg->UIMUIMVerifyPinReq.TLVType = 0x01; + pMUXMsg->UIMUIMVerifyPinReq.TLVLength = cpu_to_le16(0x02); + pMUXMsg->UIMUIMVerifyPinReq.Session_Type = 0x00; + pMUXMsg->UIMUIMVerifyPinReq.Aid_Len = 0x00; + pMUXMsg->UIMUIMVerifyPinReq.TLV2Type = 0x02; + pMUXMsg->UIMUIMVerifyPinReq.TLV2Length = cpu_to_le16(2 + strlen((const char *)arg)); + pMUXMsg->UIMUIMVerifyPinReq.PINID = 0x01; //Pin1, not Puk + pMUXMsg->UIMUIMVerifyPinReq.PINLen= strlen((const char *)arg); + qstrcpy((char *)&pMUXMsg->UIMUIMVerifyPinReq.PINValue, ((const char *)arg)); + return sizeof(QMIUIM_VERIFY_PIN_REQ_MSG) + (strlen((const char *)arg) - 1); +} + +#ifdef CONFIG_IMSI_ICCID +static USHORT UimReadTransparentIMSIReqSend(PQMUX_MSG pMUXMsg, void *arg) { + PREAD_TRANSPARENT_TLV pReadTransparent; + + pMUXMsg->UIMUIMReadTransparentReq.TLVType = 0x01; + pMUXMsg->UIMUIMReadTransparentReq.TLVLength = cpu_to_le16(0x02); + if (!strcmp((char *)arg, "EF_ICCID")) { + pMUXMsg->UIMUIMReadTransparentReq.Session_Type = 0x06; + pMUXMsg->UIMUIMReadTransparentReq.Aid_Len = 0x00; + + pMUXMsg->UIMUIMReadTransparentReq.TLV2Type = 0x02; + pMUXMsg->UIMUIMReadTransparentReq.file_id = cpu_to_le16(0x2FE2); + pMUXMsg->UIMUIMReadTransparentReq.path_len = 0x02; + pMUXMsg->UIMUIMReadTransparentReq.path[0] = 0x00; + pMUXMsg->UIMUIMReadTransparentReq.path[1] = 0x3F; + } + else if(!strcmp((char *)arg, "EF_IMSI")) { + pMUXMsg->UIMUIMReadTransparentReq.Session_Type = 0x00; + pMUXMsg->UIMUIMReadTransparentReq.Aid_Len = 0x00; + + pMUXMsg->UIMUIMReadTransparentReq.TLV2Type = 0x02; + pMUXMsg->UIMUIMReadTransparentReq.file_id = cpu_to_le16(0x6F07); + pMUXMsg->UIMUIMReadTransparentReq.path_len = 0x04; + pMUXMsg->UIMUIMReadTransparentReq.path[0] = 0x00; + pMUXMsg->UIMUIMReadTransparentReq.path[1] = 0x3F; + pMUXMsg->UIMUIMReadTransparentReq.path[2] = 0xFF; + pMUXMsg->UIMUIMReadTransparentReq.path[3] = 0x7F; + } + + pMUXMsg->UIMUIMReadTransparentReq.TLV2Length = cpu_to_le16(3 + pMUXMsg->UIMUIMReadTransparentReq.path_len); + + pReadTransparent = (PREAD_TRANSPARENT_TLV)(&pMUXMsg->UIMUIMReadTransparentReq.path[pMUXMsg->UIMUIMReadTransparentReq.path_len]); + pReadTransparent->TLVType = 0x03; + pReadTransparent->TLVLength = cpu_to_le16(0x04); + pReadTransparent->Offset = cpu_to_le16(0x00); + pReadTransparent->Length = cpu_to_le16(0x00); + + return (sizeof(QMIUIM_READ_TRANSPARENT_REQ_MSG) + pMUXMsg->UIMUIMReadTransparentReq.path_len + sizeof(READ_TRANSPARENT_TLV)); +} +#endif +#endif + +#ifdef CONFIG_APN +static USHORT WdsCreateProfileSettingsReqSend(PQMUX_MSG pMUXMsg, void *arg) { + PROFILE_T *profile = (PROFILE_T *)arg; + pMUXMsg->CreatetProfileSettingsReq.Length = cpu_to_le16(sizeof(QMIWDS_CREATE_PROFILE_SETTINGS_REQ_MSG) - 4); + pMUXMsg->CreatetProfileSettingsReq.TLVType = 0x01; + pMUXMsg->CreatetProfileSettingsReq.TLVLength = cpu_to_le16(0x01); + pMUXMsg->CreatetProfileSettingsReq.ProfileType = 0x00; // 0 ~ 3GPP, 1 ~ 3GPP2 + pMUXMsg->CreatetProfileSettingsReq.TLV2Type = 0x25; + pMUXMsg->CreatetProfileSettingsReq.TLV2Length = cpu_to_le16(0x01); + pMUXMsg->CreatetProfileSettingsReq.pdp_context = profile->pdp; // 0 ~ 3GPP, 1 ~ 3GPP2 + return sizeof(QMIWDS_CREATE_PROFILE_SETTINGS_REQ_MSG); +} + +static USHORT WdsGetProfileSettingsReqSend(PQMUX_MSG pMUXMsg, void *arg) { + PROFILE_T *profile = (PROFILE_T *)arg; + pMUXMsg->GetProfileSettingsReq.Length = cpu_to_le16(sizeof(QMIWDS_GET_PROFILE_SETTINGS_REQ_MSG) - 4); + pMUXMsg->GetProfileSettingsReq.TLVType = 0x01; + pMUXMsg->GetProfileSettingsReq.TLVLength = cpu_to_le16(0x02); + pMUXMsg->GetProfileSettingsReq.ProfileType = 0x00; // 0 ~ 3GPP, 1 ~ 3GPP2 + pMUXMsg->GetProfileSettingsReq.ProfileIndex = profile->pdp; + return sizeof(QMIWDS_GET_PROFILE_SETTINGS_REQ_MSG); +} + +static USHORT WdsModifyProfileSettingsReq(PQMUX_MSG pMUXMsg, void *arg) { + USHORT TLVLength = 0; + UCHAR *pTLV; + PROFILE_T *profile = (PROFILE_T *)arg; + PQMIWDS_PDPTYPE pPdpType; + + pMUXMsg->ModifyProfileSettingsReq.Length = cpu_to_le16(sizeof(QMIWDS_MODIFY_PROFILE_SETTINGS_REQ_MSG) - 4); + pMUXMsg->ModifyProfileSettingsReq.TLVType = 0x01; + pMUXMsg->ModifyProfileSettingsReq.TLVLength = cpu_to_le16(0x02); + pMUXMsg->ModifyProfileSettingsReq.ProfileType = 0x00; // 0 ~ 3GPP, 1 ~ 3GPP2 + pMUXMsg->ModifyProfileSettingsReq.ProfileIndex = profile->pdp; + + pTLV = (UCHAR *)(&pMUXMsg->ModifyProfileSettingsReq + 1); + + pPdpType = (PQMIWDS_PDPTYPE)(pTLV + TLVLength); + pPdpType->TLVType = 0x11; + pPdpType->TLVLength = cpu_to_le16(0x01); + pPdpType->PdpType = profile->iptype; + TLVLength +=(le16_to_cpu(pPdpType->TLVLength) + sizeof(QCQMICTL_TLV_HDR)); + + // Set APN Name + if (profile->apn) { + PQMIWDS_APNNAME pApnName = (PQMIWDS_APNNAME)(pTLV + TLVLength); + pApnName->TLVType = 0x14; + pApnName->TLVLength = cpu_to_le16(strlen(profile->apn)); + qstrcpy((char *)&pApnName->ApnName, profile->apn); + TLVLength +=(le16_to_cpu(pApnName->TLVLength) + sizeof(QCQMICTL_TLV_HDR)); + } + + // Set User Name + if (profile->user) { + PQMIWDS_USERNAME pUserName = (PQMIWDS_USERNAME)(pTLV + TLVLength); + pUserName->TLVType = 0x1B; + pUserName->TLVLength = cpu_to_le16(strlen(profile->user)); + qstrcpy((char *)&pUserName->UserName, profile->user); + TLVLength += (le16_to_cpu(pUserName->TLVLength) + sizeof(QCQMICTL_TLV_HDR)); + } + + // Set Password + if (profile->password) { + PQMIWDS_PASSWD pPasswd = (PQMIWDS_PASSWD)(pTLV + TLVLength); + pPasswd->TLVType = 0x1C; + pPasswd->TLVLength = cpu_to_le16(strlen(profile->password)); + qstrcpy((char *)&pPasswd->Passwd, profile->password); + TLVLength +=(le16_to_cpu(pPasswd->TLVLength) + sizeof(QCQMICTL_TLV_HDR)); + } + + // Set Auth Protocol + if (profile->user && profile->password) { + PQMIWDS_AUTH_PREFERENCE pAuthPref = (PQMIWDS_AUTH_PREFERENCE)(pTLV + TLVLength); + pAuthPref->TLVType = 0x1D; + pAuthPref->TLVLength = cpu_to_le16(0x01); + pAuthPref->AuthPreference = profile->auth; // 0 ~ None, 1 ~ Pap, 2 ~ Chap, 3 ~ MsChapV2 + TLVLength += (le16_to_cpu(pAuthPref->TLVLength) + sizeof(QCQMICTL_TLV_HDR)); + } + + return sizeof(QMIWDS_MODIFY_PROFILE_SETTINGS_REQ_MSG) + TLVLength; +} +#endif + +static USHORT WdsGetRuntimeSettingReq(PQMUX_MSG pMUXMsg, void *arg) +{ + (void)arg; + pMUXMsg->GetRuntimeSettingsReq.TLVType = 0x10; + pMUXMsg->GetRuntimeSettingsReq.TLVLength = cpu_to_le16(0x04); + // the following mask also applies to IPV6 + pMUXMsg->GetRuntimeSettingsReq.Mask = cpu_to_le32(QMIWDS_GET_RUNTIME_SETTINGS_MASK_IPV4DNS_ADDR | + QMIWDS_GET_RUNTIME_SETTINGS_MASK_IPV4_ADDR | + QMIWDS_GET_RUNTIME_SETTINGS_MASK_MTU | + QMIWDS_GET_RUNTIME_SETTINGS_MASK_IPV4GATEWAY_ADDR) | + QMIWDS_GET_RUNTIME_SETTINGS_MASK_PCSCF_SV_ADDR | + QMIWDS_GET_RUNTIME_SETTINGS_MASK_PCSCF_DOM_NAME; + + return sizeof(QMIWDS_GET_RUNTIME_SETTINGS_REQ_MSG); +} + +static PQCQMIMSG s_pRequest; +static PQCQMIMSG s_pResponse; + +static int is_response(const PQCQMIMSG pRequest, const PQCQMIMSG pResponse) { + if ((pRequest->QMIHdr.QMIType == pResponse->QMIHdr.QMIType) + && (pRequest->QMIHdr.ClientId == pResponse->QMIHdr.ClientId)) { + USHORT requestTID, responseTID; + if (pRequest->QMIHdr.QMIType == QMUX_TYPE_CTL) { + requestTID = pRequest->CTLMsg.QMICTLMsgHdr.TransactionId; + responseTID = pResponse->CTLMsg.QMICTLMsgHdr.TransactionId; + } else { + requestTID = le16_to_cpu(pRequest->MUXMsg.QMUXHdr.TransactionId); + responseTID = le16_to_cpu(pResponse->MUXMsg.QMUXHdr.TransactionId); + } + return (requestTID == responseTID); + } + return 0; +} + +int (*qmidev_send)(PQCQMIMSG pRequest); + +int QmiThreadSendQMITimeout(PQCQMIMSG pRequest, PQCQMIMSG *ppResponse, unsigned msecs, const char *funcname) { + int ret; + + if (!pRequest) + return -EINVAL; + + pthread_mutex_lock(&cm_command_mutex); + + if (ppResponse) + *ppResponse = NULL; + + dump_qmi(pRequest, le16_to_cpu(pRequest->QMIHdr.Length) + 1); + + s_pRequest = pRequest; + s_pResponse = NULL; + + ret = qmidev_send(pRequest); + + if (ret == 0) { + ret = pthread_cond_timeout_np(&cm_command_cond, &cm_command_mutex, msecs); + if (!ret) { + if (s_pResponse && ppResponse) { + *ppResponse = s_pResponse; + } else { + if (s_pResponse) { + free(s_pResponse); + s_pResponse = NULL; + } + } + } else { + dbg_time("%s message timeout", funcname); + } + } + + pthread_mutex_unlock(&cm_command_mutex); + + return ret; +} + +void QmiThreadRecvQMI(PQCQMIMSG pResponse) { + pthread_mutex_lock(&cm_command_mutex); + if (pResponse == NULL) { + if (s_pRequest) { + free(s_pRequest); + s_pRequest = NULL; + s_pResponse = NULL; + pthread_cond_signal(&cm_command_cond); + } + pthread_mutex_unlock(&cm_command_mutex); + return; + } + dump_qmi(pResponse, le16_to_cpu(pResponse->QMIHdr.Length) + 1); + if (s_pRequest && is_response(s_pRequest, pResponse)) { + free(s_pRequest); + s_pRequest = NULL; + s_pResponse = malloc(le16_to_cpu(pResponse->QMIHdr.Length) + 1); + if (s_pResponse != NULL) { + memcpy(s_pResponse, pResponse, le16_to_cpu(pResponse->QMIHdr.Length) + 1); + } + pthread_cond_signal(&cm_command_cond); + } else if ((pResponse->QMIHdr.QMIType == QMUX_TYPE_CTL) + && (le16_to_cpu(pResponse->CTLMsg.QMICTLMsgHdrRsp.QMICTLType == QMICTL_REVOKE_CLIENT_ID_IND))) { + qmidevice_send_event_to_main(MODEM_REPORT_RESET_EVENT); + } else if ((pResponse->QMIHdr.QMIType == QMUX_TYPE_NAS) + && (le16_to_cpu(pResponse->MUXMsg.QMUXMsgHdrResp.Type) == QMINAS_SERVING_SYSTEM_IND)) { + qmidevice_send_event_to_main(RIL_UNSOL_RESPONSE_VOICE_NETWORK_STATE_CHANGED); + } else if ((pResponse->QMIHdr.QMIType == QMUX_TYPE_WDS) + && (le16_to_cpu(pResponse->MUXMsg.QMUXMsgHdrResp.Type) == QMIWDS_GET_PKT_SRVC_STATUS_IND)) { + qmidevice_send_event_to_main(RIL_UNSOL_DATA_CALL_LIST_CHANGED); + } else if ((pResponse->QMIHdr.QMIType == QMUX_TYPE_NAS) + && (le16_to_cpu(pResponse->MUXMsg.QMUXMsgHdrResp.Type) == QMINAS_SYS_INFO_IND)) { + qmidevice_send_event_to_main(RIL_UNSOL_RESPONSE_VOICE_NETWORK_STATE_CHANGED); + } else if ((pResponse->QMIHdr.QMIType == QMUX_TYPE_WDS_ADMIN) + && (le16_to_cpu(pResponse->MUXMsg.QMUXMsgHdrResp.Type) == QMI_WDA_SET_LOOPBACK_CONFIG_IND)) { + qmidevice_send_event_to_main_ext(RIL_UNSOL_LOOPBACK_CONFIG_IND, + &pResponse->MUXMsg.SetLoopBackInd, sizeof(pResponse->MUXMsg.SetLoopBackInd)); + } +#ifdef CONFIG_REG_QOS_IND + else if ((pResponse->QMIHdr.QMIType == QMUX_TYPE_QOS) + && (le16_to_cpu(pResponse->MUXMsg.QMUXMsgHdrResp.Type) == QMI_QOS_GLOBAL_QOS_FLOW_IND)) { + UINT qos_id = 0; + UCHAR new_flow = ql_get_global_qos_flow_ind_qos_id(pResponse, &qos_id); + if (qos_id != 0 && new_flow == 1) + qmidevice_send_event_to_main_ext(RIL_UNSOL_GLOBAL_QOS_FLOW_IND_QOS_ID, &qos_id, sizeof(qos_id)); +#ifdef CONFIG_GET_QOS_DATA_RATE + if (new_flow) { + ULONG64 max_data_rate[2] = {0}; + if (ql_get_global_qos_flow_ind_data_rate(pResponse, (void *)max_data_rate) == 0){} + } +#endif + } +#endif + else { + if (debug_qmi) + dbg_time("nobody care this qmi msg!!"); + } + pthread_mutex_unlock(&cm_command_mutex); +} + +#ifdef CONFIG_COEX_WWAN_STATE +static int requestGetCoexWWANState(void) { + PQCQMIMSG pRequest; + PQCQMIMSG pResponse; + PQMUX_MSG pMUXMsg; + PQMI_COEX_GET_WWAN_STATE_RESP_MSG_LTE_BAND pLteBand; + static QMI_COEX_GET_WWAN_STATE_RESP_MSG_LTE_BAND oldLteBand = {-1, -1}; + int err; + + pRequest = ComposeQMUXMsg(QMUX_TYPE_COEX, QMI_COEX_GET_WWAN_STATE_REQ, NULL, NULL); + err = QmiThreadSendQMI(pRequest, &pResponse); + + if (err < 0 || pResponse == NULL) { + dbg_time("%s err = %d", __func__, err); + return err; + } + + pMUXMsg = &pResponse->MUXMsg; + if (le16_to_cpu(pMUXMsg->QMUXMsgHdrResp.QMUXResult) || le16_to_cpu(pMUXMsg->QMUXMsgHdrResp.QMUXError)) { + dbg_time("%s QMUXResult = 0x%x, QMUXError = 0x%x", __func__, le16_to_cpu(pMUXMsg->QMUXMsgHdrResp.QMUXResult), le16_to_cpu(pMUXMsg->QMUXMsgHdrResp.QMUXError)); + err = le16_to_cpu(pMUXMsg->QMUXMsgHdrResp.QMUXError); + free(pResponse); + return err; + } + pLteBand = (PQMI_COEX_GET_WWAN_STATE_RESP_MSG_LTE_BAND)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x10); + + if (pLteBand && memcmp(pLteBand, &oldLteBand, sizeof(oldLteBand))) { + oldLteBand = *pLteBand; + dbg_time("%s ul_freq %d ul_bandwidth %d", __func__, le32_to_cpu(pLteBand->ul_band.freq), le32_to_cpu(pLteBand->ul_band.bandwidth)); + dbg_time("%s dl_freq %d dl_bandwidth %d", __func__, le32_to_cpu(pLteBand->dl_band.freq), le32_to_cpu(pLteBand->dl_band.bandwidth)); + } + free(pResponse); + return 0; +} +#endif + +static int requestSetEthMode(PROFILE_T *profile) { + PQCQMIMSG pRequest; + PQCQMIMSG pResponse = NULL; + PQMUX_MSG pMUXMsg; + int err; + PQMIWDS_ADMIN_SET_DATA_FORMAT_TLV linkProto; + UCHAR IpPreference; + UCHAR autoconnect_setting = 0; + QMAP_SETTING qmap_settings = {0}; + + qmap_settings.size = sizeof(qmap_settings); + + if (profile->qmap_mode) { + profile->rawIP = 1; + s_9x07 = profile->rawIP; + + qmap_settings.MuxId = profile->muxid; + + if (profile->hardware_interface == HARDWARE_PCIE) { //SDX20_PCIE + qmap_settings.rx_urb_size = profile->qmap_size; //SDX24&SDX55 support 32KB + qmap_settings.ep_type = DATA_EP_TYPE_PCIE; + qmap_settings.iface_id = 0x04; + } + else { // for MDM9x07&MDM9x40&SDX20 USB + qmap_settings.rx_urb_size = profile->qmap_size; //SDX24&SDX55 support 32KB + qmap_settings.ep_type = DATA_EP_TYPE_HSUSB; + qmap_settings.iface_id = 0x04; + } + + qmap_settings.ul_data_aggregation_max_datagrams = 11; //by test result, 11 can get best TPUT + qmap_settings.ul_data_aggregation_max_size = 8*1024; + qmap_settings.dl_minimum_padding = 0; //no effect when register to real netowrk + if(profile->qmap_version != 0x09) + profile->qmap_version = 0x05; + + qmap_version = profile->qmap_version; + if (profile->rmnet_info.size) { + qmap_settings.rx_urb_size = profile->rmnet_info.rx_urb_size; + qmap_settings.ep_type = profile->rmnet_info.ep_type; + qmap_settings.iface_id = profile->rmnet_info.iface_id; + qmap_settings.dl_minimum_padding = profile->rmnet_info.dl_minimum_padding; + qmap_version = profile->rmnet_info.qmap_version; + } + + if (!profile->wda_client) { + if (qmidev_is_gobinet(profile->qmichannel)) { + //when QMAP enabled, set data format in GobiNet driver + } + else if (profile->proxy[0]) { + /* the first running 'quectel-cm' had alloc wda client and set data format, + so we can ingore to set data format here. */ + } + goto skip_WdaSetDataFormat; + } + } + + pRequest = ComposeQMUXMsg(QMUX_TYPE_WDS_ADMIN, QMIWDS_ADMIN_SET_DATA_FORMAT_REQ, WdaSetDataFormat, (void *)&qmap_settings); + err = QmiThreadSendQMI(pRequest, &pResponse); + qmi_rsp_check_and_return(); + + linkProto = (PQMIWDS_ADMIN_SET_DATA_FORMAT_TLV)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x11); + if (linkProto != NULL) { + profile->rawIP = (le32_to_cpu(linkProto->Value) == 2); + s_9x07 = profile->rawIP; //MDM90x7 only support RAW IP, do not support Eth + } + + linkProto = (PQMIWDS_ADMIN_SET_DATA_FORMAT_TLV)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x16); + if (linkProto != NULL && profile->qmap_mode) { + qmap_settings.rx_urb_size = le32_to_cpu(linkProto->Value); + dbg_time("qmap_settings.rx_urb_size = %u", qmap_settings.rx_urb_size); //must same as rx_urb_size defined in GobiNet&qmi_wwan driver + } + +#ifdef QUECTEL_UL_DATA_AGG + if (qmap_settings.ul_data_aggregation_max_datagrams) + { + linkProto = (PQMIWDS_ADMIN_SET_DATA_FORMAT_TLV)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x17); + if (linkProto != NULL) { + qmap_settings.ul_data_aggregation_max_datagrams = MIN(qmap_settings.ul_data_aggregation_max_datagrams, le32_to_cpu(linkProto->Value)); + dbg_time("qmap_settings.ul_data_aggregation_max_datagrams = %u", qmap_settings.ul_data_aggregation_max_datagrams); + } + + linkProto = (PQMIWDS_ADMIN_SET_DATA_FORMAT_TLV)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x18); + if (linkProto != NULL) { + qmap_settings.ul_data_aggregation_max_size = MIN(qmap_settings.ul_data_aggregation_max_size, le32_to_cpu(linkProto->Value)); + dbg_time("qmap_settings.ul_data_aggregation_max_size = %u", qmap_settings.ul_data_aggregation_max_size); + } + + linkProto = (PQMIWDS_ADMIN_SET_DATA_FORMAT_TLV)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x1A); + if (linkProto != NULL) { + qmap_settings.dl_minimum_padding = le32_to_cpu(linkProto->Value); + dbg_time("qmap_settings.dl_minimum_padding = %u", qmap_settings.dl_minimum_padding); + } + + if (qmap_settings.ul_data_aggregation_max_datagrams > 1) { + ql_set_driver_qmap_setting(profile, &qmap_settings); + } + } +#endif + + free(pResponse); + +skip_WdaSetDataFormat: + if (profile->enable_ipv4) { + if (profile->qmapnet_adapter[0]) { + // bind wds mux data port + pRequest = ComposeQMUXMsg(QMUX_TYPE_WDS, QMIWDS_BIND_MUX_DATA_PORT_REQ , WdsSetQMUXBindMuxDataPort, (void *)&qmap_settings); + err = QmiThreadSendQMI(pRequest, &pResponse); + qmi_rsp_check_and_return(); + if (pResponse) free(pResponse); + } + + // set ipv4 + IpPreference = IpFamilyV4; + pRequest = ComposeQMUXMsg(QMUX_TYPE_WDS, QMIWDS_SET_CLIENT_IP_FAMILY_PREF_REQ, WdsSetClientIPFamilyPref, (void *)&IpPreference); + err = QmiThreadSendQMI(pRequest, &pResponse); + if (pResponse) free(pResponse); + } + + if (profile->enable_ipv6) { + if (profile->qmapnet_adapter[0]) { + // bind wds ipv6 mux data port + pRequest = ComposeQMUXMsg(QMUX_TYPE_WDS_IPV6, QMIWDS_BIND_MUX_DATA_PORT_REQ , WdsSetQMUXBindMuxDataPort, (void *)&qmap_settings); + err = QmiThreadSendQMI(pRequest, &pResponse); + qmi_rsp_check_and_return(); + if (pResponse) free(pResponse); + } + + // set ipv6 + IpPreference = IpFamilyV6; + pRequest = ComposeQMUXMsg(QMUX_TYPE_WDS_IPV6, QMIWDS_SET_CLIENT_IP_FAMILY_PREF_REQ, WdsSetClientIPFamilyPref, (void *)&IpPreference); + err = QmiThreadSendQMI(pRequest, &pResponse); + qmi_rsp_check_and_return(); + if (pResponse) free(pResponse); + } + + pRequest = ComposeQMUXMsg(QMUX_TYPE_WDS, QMIWDS_SET_AUTO_CONNECT_REQ , WdsSetAutoConnect, (void *)&autoconnect_setting); + QmiThreadSendQMI(pRequest, &pResponse); + if (pResponse) free(pResponse); + + return 0; +} + +#ifdef CONFIG_SIM +static int requestGetPINStatus(SIM_Status *pSIMStatus) { + PQCQMIMSG pRequest; + PQCQMIMSG pResponse; + PQMUX_MSG pMUXMsg; + int err; + PQMIDMS_UIM_PIN_STATUS pPin1Status = NULL; + //PQMIDMS_UIM_PIN_STATUS pPin2Status = NULL; + + if (s_9x07) + pRequest = ComposeQMUXMsg(QMUX_TYPE_UIM, QMIUIM_GET_CARD_STATUS_REQ, NULL, NULL); + else + pRequest = ComposeQMUXMsg(QMUX_TYPE_DMS, QMIDMS_UIM_GET_PIN_STATUS_REQ, NULL, NULL); + err = QmiThreadSendQMI(pRequest, &pResponse); + qmi_rsp_check_and_return(); + + pPin1Status = (PQMIDMS_UIM_PIN_STATUS)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x11); + //pPin2Status = (PQMIDMS_UIM_PIN_STATUS)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x12); + + if (pPin1Status != NULL) { + if (pPin1Status->PINStatus == QMI_PIN_STATUS_NOT_VERIF) { + *pSIMStatus = SIM_PIN; + } else if (pPin1Status->PINStatus == QMI_PIN_STATUS_BLOCKED) { + *pSIMStatus = SIM_PUK; + } else if (pPin1Status->PINStatus == QMI_PIN_STATUS_PERM_BLOCKED) { + *pSIMStatus = SIM_BAD; + } + } + + free(pResponse); + return 0; +} + +static int requestGetSIMStatus(SIM_Status *pSIMStatus) { //RIL_REQUEST_GET_SIM_STATUS + PQCQMIMSG pRequest; + PQCQMIMSG pResponse; + PQMUX_MSG pMUXMsg; + int err; + const char * SIM_Status_String[] = { + "SIM_ABSENT", + "SIM_NOT_READY", + "SIM_READY", /* SIM_READY means the radio state is RADIO_STATE_SIM_READY */ + "SIM_PIN", + "SIM_PUK", + "SIM_NETWORK_PERSONALIZATION" + }; + + if (s_9x07) + pRequest = ComposeQMUXMsg(QMUX_TYPE_UIM, QMIUIM_GET_CARD_STATUS_REQ, NULL, NULL); + else + pRequest = ComposeQMUXMsg(QMUX_TYPE_DMS, QMIDMS_UIM_GET_STATE_REQ, NULL, NULL); + + err = QmiThreadSendQMI(pRequest, &pResponse); + qmi_rsp_check_and_return(); + + *pSIMStatus = SIM_ABSENT; + if (s_9x07) + { + PQMIUIM_CARD_STATUS pCardStatus = NULL; + PQMIUIM_PIN_STATE pPINState = NULL; + UCHAR CardState = 0x01; + UCHAR PIN1State = QMI_PIN_STATUS_NOT_VERIF; + //UCHAR PIN1Retries; + //UCHAR PUK1Retries; + //UCHAR PIN2State; + //UCHAR PIN2Retries; + //UCHAR PUK2Retries; + + pCardStatus = (PQMIUIM_CARD_STATUS)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x10); + if (pCardStatus != NULL) + { + pPINState = (PQMIUIM_PIN_STATE)((PUCHAR)pCardStatus + sizeof(QMIUIM_CARD_STATUS) + pCardStatus->AIDLength); + CardState = pCardStatus->CardState; + if (CardState == UIM_CARD_STATE_PRESENT) { + if (pPINState->UnivPIN == 1) + { + PIN1State = pCardStatus->UPINState; + //PIN1Retries = pCardStatus->UPINRetries; + //PUK1Retries = pCardStatus->UPUKRetries; + } + else + { + PIN1State = pPINState->PIN1State; + //PIN1Retries = pPINState->PIN1Retries; + //PUK1Retries = pPINState->PUK1Retries; + } + //PIN2State = pPINState->PIN2State; + //PIN2Retries = pPINState->PIN2Retries; + //PUK2Retries = pPINState->PUK2Retries; + } + } + + *pSIMStatus = SIM_ABSENT; + if ((CardState == 0x01) && ((PIN1State == QMI_PIN_STATUS_VERIFIED)|| (PIN1State == QMI_PIN_STATUS_DISABLED))) + { + *pSIMStatus = SIM_READY; + } + else if (CardState == 0x01) + { + if (PIN1State == QMI_PIN_STATUS_NOT_VERIF) + { + *pSIMStatus = SIM_PIN; + } + if ( PIN1State == QMI_PIN_STATUS_BLOCKED) + { + *pSIMStatus = SIM_PUK; + } + else if (PIN1State == QMI_PIN_STATUS_PERM_BLOCKED) + { + *pSIMStatus = SIM_BAD; + } + else if (PIN1State == QMI_PIN_STATUS_NOT_INIT || PIN1State == QMI_PIN_STATUS_VERIFIED || PIN1State == QMI_PIN_STATUS_DISABLED) + { + *pSIMStatus = SIM_READY; + } + } + else if (CardState == 0x00 || CardState == 0x02) + { + } + else + { + } + } + else + { + //UIM state. Values: + // 0x00 UIM initialization completed + // 0x01 UIM is locked or the UIM failed + // 0x02 UIM is not present + // 0x03 Reserved + // 0xFF UIM state is currently + //unavailable + if (pResponse->MUXMsg.UIMGetStateResp.UIMState == 0x00) { + *pSIMStatus = SIM_READY; + } else if (pResponse->MUXMsg.UIMGetStateResp.UIMState == 0x01) { + *pSIMStatus = SIM_ABSENT; + err = requestGetPINStatus(pSIMStatus); + } else if ((pResponse->MUXMsg.UIMGetStateResp.UIMState == 0x02) || (pResponse->MUXMsg.UIMGetStateResp.UIMState == 0xFF)) { + *pSIMStatus = SIM_ABSENT; + } else { + *pSIMStatus = SIM_ABSENT; + } + } + dbg_time("%s SIMStatus: %s", __func__, SIM_Status_String[*pSIMStatus]); + + free(pResponse); + + return 0; +} + +static int requestEnterSimPin(const char *pPinCode) { + PQCQMIMSG pRequest; + PQCQMIMSG pResponse; + PQMUX_MSG pMUXMsg; + int err; + + if (s_9x07) + pRequest = ComposeQMUXMsg(QMUX_TYPE_UIM, QMIUIM_VERIFY_PIN_REQ, UimVerifyPinReqSend, (void *)pPinCode); + else + pRequest = ComposeQMUXMsg(QMUX_TYPE_DMS, QMIDMS_UIM_VERIFY_PIN_REQ, DmsUIMVerifyPinReqSend, (void *)pPinCode); + err = QmiThreadSendQMI(pRequest, &pResponse); + qmi_rsp_check_and_return(); + + free(pResponse); + return 0; +} +#endif + +#ifdef CONFIG_IMSI_ICCID +static int requestGetICCID(void) { //RIL_REQUEST_GET_IMSI + PQCQMIMSG pRequest; + PQCQMIMSG pResponse; + PQMUX_MSG pMUXMsg; + PQMIUIM_CONTENT pUimContent; + int err; + + if (s_9x07) { + pRequest = ComposeQMUXMsg(QMUX_TYPE_UIM, QMIUIM_READ_TRANSPARENT_REQ, UimReadTransparentIMSIReqSend, (void *)"EF_ICCID"); + err = QmiThreadSendQMI(pRequest, &pResponse); + } else { + return 0; + } + qmi_rsp_check_and_return(); + + pUimContent = (PQMIUIM_CONTENT)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x11); + if (pUimContent != NULL) { + static char DeviceICCID[32] = {'\0'}; + int i = 0, j = 0; + + for (i = 0, j = 0; i < le16_to_cpu(pUimContent->content_len); ++i) { + char charmaps[] = "0123456789ABCDEF"; + + DeviceICCID[j++] = charmaps[(pUimContent->content[i] & 0x0F)]; + DeviceICCID[j++] = charmaps[((pUimContent->content[i] & 0xF0) >> 0x04)]; + } + DeviceICCID[j] = '\0'; + + dbg_time("%s DeviceICCID: %s", __func__, DeviceICCID); + } + + free(pResponse); + return 0; +} + +static int requestGetIMSI(void) { //RIL_REQUEST_GET_IMSI + PQCQMIMSG pRequest; + PQCQMIMSG pResponse; + PQMUX_MSG pMUXMsg; + PQMIUIM_CONTENT pUimContent; + int err; + + if (s_9x07) { + pRequest = ComposeQMUXMsg(QMUX_TYPE_UIM, QMIUIM_READ_TRANSPARENT_REQ, UimReadTransparentIMSIReqSend, (void *)"EF_IMSI"); + err = QmiThreadSendQMI(pRequest, &pResponse); + } else { + return 0; + } + qmi_rsp_check_and_return(); + + pUimContent = (PQMIUIM_CONTENT)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x11); + if (pUimContent != NULL) { + static char DeviceIMSI[32] = {'\0'}; + int i = 0, j = 0; + + for (i = 0, j = 0; i < le16_to_cpu(pUimContent->content[0]); ++i) { + if (i != 0) + DeviceIMSI[j++] = (pUimContent->content[i+1] & 0x0F) + '0'; + DeviceIMSI[j++] = ((pUimContent->content[i+1] & 0xF0) >> 0x04) + '0'; + } + DeviceIMSI[j] = '\0'; + + dbg_time("%s DeviceIMSI: %s", __func__, DeviceIMSI); + } + + free(pResponse); + return 0; +} +#endif + +#if 1 +static void quectel_convert_cdma_mcc_2_ascii_mcc( USHORT *p_mcc, USHORT mcc ) +{ + unsigned int d1, d2, d3, buf = mcc + 111; + + if ( mcc == 0x3FF ) // wildcard + { + *p_mcc = 3; + } + else + { + d3 = buf % 10; + buf = ( d3 == 0 ) ? (buf-10)/10 : buf/10; + + d2 = buf % 10; + buf = ( d2 == 0 ) ? (buf-10)/10 : buf/10; + + d1 = ( buf == 10 ) ? 0 : buf; + +//dbg_time("d1:%d, d2:%d,d3:%d",d1,d2,d3); + if ( d1<10 && d2<10 && d3<10 ) + { + *p_mcc = d1*100+d2*10+d3; +#if 0 + *(p_mcc+0) = '0' + d1; + *(p_mcc+1) = '0' + d2; + *(p_mcc+2) = '0' + d3; +#endif + } + else + { + //dbg_time( "invalid digits %d %d %d", d1, d2, d3 ); + *p_mcc = 0; + } + } +} + +static void quectel_convert_cdma_mnc_2_ascii_mnc( USHORT *p_mnc, USHORT imsi_11_12) +{ + unsigned int d1, d2, buf = imsi_11_12 + 11; + + if ( imsi_11_12 == 0x7F ) // wildcard + { + *p_mnc = 7; + } + else + { + d2 = buf % 10; + buf = ( d2 == 0 ) ? (buf-10)/10 : buf/10; + + d1 = ( buf == 10 ) ? 0 : buf; + + if ( d1<10 && d2<10 ) + { + *p_mnc = d1*10 + d2; + } + else + { + //dbg_time( "invalid digits %d %d", d1, d2, 0 ); + *p_mnc = 0; + } + } +} + +static int requestGetHomeNetwork(USHORT *p_mcc, USHORT *p_mnc, USHORT *p_sid, USHORT *p_nid) { + PQCQMIMSG pRequest; + PQCQMIMSG pResponse; + PQMUX_MSG pMUXMsg; + int err; + PHOME_NETWORK pHomeNetwork; + PHOME_NETWORK_SYSTEMID pHomeNetworkSystemID; + + pRequest = ComposeQMUXMsg(QMUX_TYPE_NAS, QMINAS_GET_HOME_NETWORK_REQ, NULL, NULL); + err = QmiThreadSendQMI(pRequest, &pResponse); + qmi_rsp_check_and_return(); + + pHomeNetwork = (PHOME_NETWORK)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x01); + if (pHomeNetwork && p_mcc && p_mnc ) { + *p_mcc = le16_to_cpu(pHomeNetwork->MobileCountryCode); + *p_mnc = le16_to_cpu(pHomeNetwork->MobileNetworkCode); + //dbg_time("%s MobileCountryCode: %d, MobileNetworkCode: %d", __func__, *pMobileCountryCode, *pMobileNetworkCode); + } + + pHomeNetworkSystemID = (PHOME_NETWORK_SYSTEMID)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x10); + if (pHomeNetworkSystemID && p_sid && p_nid) { + *p_sid = le16_to_cpu(pHomeNetworkSystemID->SystemID); //china-hefei: sid 14451 + *p_nid = le16_to_cpu(pHomeNetworkSystemID->NetworkID); + //dbg_time("%s SystemID: %d, NetworkID: %d", __func__, *pSystemID, *pNetworkID); + } + + free(pResponse); + + return 0; +} +#endif + +#if 0 +// Lookup table for carriers known to produce SIMs which incorrectly indicate MNC length. +static const char * MCCMNC_CODES_HAVING_3DIGITS_MNC[] = { + "302370", "302720", "310260", + "405025", "405026", "405027", "405028", "405029", "405030", "405031", "405032", + "405033", "405034", "405035", "405036", "405037", "405038", "405039", "405040", + "405041", "405042", "405043", "405044", "405045", "405046", "405047", "405750", + "405751", "405752", "405753", "405754", "405755", "405756", "405799", "405800", + "405801", "405802", "405803", "405804", "405805", "405806", "405807", "405808", + "405809", "405810", "405811", "405812", "405813", "405814", "405815", "405816", + "405817", "405818", "405819", "405820", "405821", "405822", "405823", "405824", + "405825", "405826", "405827", "405828", "405829", "405830", "405831", "405832", + "405833", "405834", "405835", "405836", "405837", "405838", "405839", "405840", + "405841", "405842", "405843", "405844", "405845", "405846", "405847", "405848", + "405849", "405850", "405851", "405852", "405853", "405875", "405876", "405877", + "405878", "405879", "405880", "405881", "405882", "405883", "405884", "405885", + "405886", "405908", "405909", "405910", "405911", "405912", "405913", "405914", + "405915", "405916", "405917", "405918", "405919", "405920", "405921", "405922", + "405923", "405924", "405925", "405926", "405927", "405928", "405929", "405930", + "405931", "405932", "502142", "502143", "502145", "502146", "502147", "502148" +}; + +static const char * MCC_CODES_HAVING_3DIGITS_MNC[] = { + "302", //Canada + "310", //United States of America + "311", //United States of America + "312", //United States of America + "313", //United States of America + "314", //United States of America + "315", //United States of America + "316", //United States of America + "334", //Mexico + "338", //Jamaica + "342", //Barbados + "344", //Antigua and Barbuda + "346", //Cayman Islands + "348", //British Virgin Islands + "365", //Anguilla + "708", //Honduras (Republic of) + "722", //Argentine Republic + "732" //Colombia (Republic of) +}; + +int requestGetIMSI(const char **pp_imsi, USHORT *pMobileCountryCode, USHORT *pMobileNetworkCode) { + PQCQMIMSG pRequest; + PQCQMIMSG pResponse; + PQMUX_MSG pMUXMsg; + int err; + + if (pp_imsi) *pp_imsi = NULL; + if (pMobileCountryCode) *pMobileCountryCode = 0; + if (pMobileNetworkCode) *pMobileNetworkCode = 0; + + pRequest = ComposeQMUXMsg(QMUX_TYPE_DMS, QMIDMS_UIM_GET_IMSI_REQ, NULL, NULL); + err = QmiThreadSendQMI(pRequest, &pResponse); + qmi_rsp_check_and_return(); + + if (pMUXMsg->UIMGetIMSIResp.TLV2Type == 0x01 && le16_to_cpu(pMUXMsg->UIMGetIMSIResp.TLV2Length) >= 5) { + int mnc_len = 2; + unsigned i; + char tmp[4]; + + if (pp_imsi) *pp_imsi = strndup((const char *)(&pMUXMsg->UIMGetIMSIResp.IMSI), le16_to_cpu(pMUXMsg->UIMGetIMSIResp.TLV2Length)); + + for (i = 0; i < sizeof(MCCMNC_CODES_HAVING_3DIGITS_MNC)/sizeof(MCCMNC_CODES_HAVING_3DIGITS_MNC[0]); i++) { + if (!strncmp((const char *)(&pMUXMsg->UIMGetIMSIResp.IMSI), MCCMNC_CODES_HAVING_3DIGITS_MNC[i], 6)) { + mnc_len = 3; + break; + } + } + if (mnc_len == 2) { + for (i = 0; i < sizeof(MCC_CODES_HAVING_3DIGITS_MNC)/sizeof(MCC_CODES_HAVING_3DIGITS_MNC[0]); i++) { + if (!strncmp((const char *)(&pMUXMsg->UIMGetIMSIResp.IMSI), MCC_CODES_HAVING_3DIGITS_MNC[i], 3)) { + mnc_len = 3; + break; + } + } + } + + tmp[0] = (&pMUXMsg->UIMGetIMSIResp.IMSI)[0]; + tmp[1] = (&pMUXMsg->UIMGetIMSIResp.IMSI)[1]; + tmp[2] = (&pMUXMsg->UIMGetIMSIResp.IMSI)[2]; + tmp[3] = 0; + if (pMobileCountryCode) *pMobileCountryCode = atoi(tmp); + tmp[0] = (&pMUXMsg->UIMGetIMSIResp.IMSI)[3]; + tmp[1] = (&pMUXMsg->UIMGetIMSIResp.IMSI)[4]; + tmp[2] = 0; + if (mnc_len == 3) { + tmp[2] = (&pMUXMsg->UIMGetIMSIResp.IMSI)[6]; + } + if (pMobileNetworkCode) *pMobileNetworkCode = atoi(tmp); + } + + free(pResponse); + + return 0; +} +#endif + +static struct wwan_data_class_str class2str[] = { + {WWAN_DATA_CLASS_NONE, "UNKNOWN"}, + {WWAN_DATA_CLASS_GPRS, "GPRS"}, + {WWAN_DATA_CLASS_EDGE, "EDGE"}, + {WWAN_DATA_CLASS_UMTS, "UMTS"}, + {WWAN_DATA_CLASS_HSDPA, "HSDPA"}, + {WWAN_DATA_CLASS_HSUPA, "HSUPA"}, + {WWAN_DATA_CLASS_LTE, "LTE"}, + {WWAN_DATA_CLASS_5G_NSA, "5G_NSA"}, + {WWAN_DATA_CLASS_5G_SA, "5G_SA"}, + {WWAN_DATA_CLASS_1XRTT, "1XRTT"}, + {WWAN_DATA_CLASS_1XEVDO, "1XEVDO"}, + {WWAN_DATA_CLASS_1XEVDO_REVA, "1XEVDO_REVA"}, + {WWAN_DATA_CLASS_1XEVDV, "1XEVDV"}, + {WWAN_DATA_CLASS_3XRTT, "3XRTT"}, + {WWAN_DATA_CLASS_1XEVDO_REVB, "1XEVDO_REVB"}, + {WWAN_DATA_CLASS_UMB, "UMB"}, + {WWAN_DATA_CLASS_CUSTOM, "CUSTOM"}, +}; + +static const char *wwan_data_class2str(ULONG class) +{ + unsigned int i = 0; + for (i = 0; i < sizeof(class2str)/sizeof(class2str[0]); i++) { + if (class2str[i].class == class) { + return class2str[i].str; + } + } + return "UNKNOWN"; +} + +static USHORT char2ushort(UCHAR str[3]) { + int i; + char temp[4]; + USHORT ret= 0; + + memcpy(temp, str, 3); + temp[3] = '\0'; + + for (i = 0; i < 4; i++) { + if ((UCHAR)temp[i] == 0xFF) { + temp[i] = '\0'; + } + } + ret = (USHORT)atoi(temp); + + return ret; +} + +static int requestRegistrationState2(UCHAR *pPSAttachedState) { + PQCQMIMSG pRequest; + PQCQMIMSG pResponse; + PQMUX_MSG pMUXMsg; + int err; + USHORT MobileCountryCode = 0; + USHORT MobileNetworkCode = 0; + const char *pDataCapStr = "UNKNOW"; + LONG remainingLen; + PSERVICE_STATUS_INFO pServiceStatusInfo; + int is_lte = 0; + PCDMA_SYSTEM_INFO pCdmaSystemInfo; + PHDR_SYSTEM_INFO pHdrSystemInfo; + PGSM_SYSTEM_INFO pGsmSystemInfo; + PWCDMA_SYSTEM_INFO pWcdmaSystemInfo; + PLTE_SYSTEM_INFO pLteSystemInfo; + PTDSCDMA_SYSTEM_INFO pTdscdmaSystemInfo; + PNR5G_SYSTEM_INFO pNr5gSystemInfo; + UCHAR DeviceClass = 0; + ULONG DataCapList = 0; + + /* Additional LTE System Info - Availability of Dual connectivity of E-UTRA with NR5G */ + uint8_t endc_available_valid = 0; /**< Must be set to true if endc_available is being passed */ + uint8_t endc_available = 0x00; + /**< + Upper layer indication in LTE SIB2. Values: \n + - 0x00 -- 5G Not available \n + - 0x01 -- 5G Available + + */ + /* Additional LTE System Info - DCNR restriction Info */ + uint8_t restrict_dcnr_valid = 0; /**< Must be set to true if restrict_dcnr is being passed */ + uint8_t restrict_dcnr = 0x01; + /**< + DCNR restriction in NAS attach/TAU accept. Values: \n + - 0x00 -- Not restricted \n + - 0x01 -- Restricted + */ + + *pPSAttachedState = 0; + pRequest = ComposeQMUXMsg(QMUX_TYPE_NAS, QMINAS_GET_SYS_INFO_REQ, NULL, NULL); + err = QmiThreadSendQMI(pRequest, &pResponse); + qmi_rsp_check_and_return(); + + pServiceStatusInfo = (PSERVICE_STATUS_INFO)(((PCHAR)&pMUXMsg->GetSysInfoResp) + QCQMUX_MSG_HDR_SIZE); + remainingLen = le16_to_cpu(pMUXMsg->GetSysInfoResp.Length); + + s_is_cdma = 0; + s_5g_type = WWAN_DATA_CLASS_NONE; + s_hdr_personality = 0; + while (remainingLen > 0) { + switch (pServiceStatusInfo->TLVType) { + case 0x10: // CDMA + if (pServiceStatusInfo->SrvStatus == 0x02) { + DataCapList = WWAN_DATA_CLASS_1XRTT| + WWAN_DATA_CLASS_1XEVDO| + WWAN_DATA_CLASS_1XEVDO_REVA| + WWAN_DATA_CLASS_1XEVDV| + WWAN_DATA_CLASS_1XEVDO_REVB; + DeviceClass = DEVICE_CLASS_CDMA; + s_is_cdma = (0 == is_lte); + } + break; + case 0x11: // HDR + if (pServiceStatusInfo->SrvStatus == 0x02) { + DataCapList = WWAN_DATA_CLASS_3XRTT| + WWAN_DATA_CLASS_UMB; + DeviceClass = DEVICE_CLASS_CDMA; + s_is_cdma = (0 == is_lte); + } + break; + case 0x12: // GSM + if (pServiceStatusInfo->SrvStatus == 0x02) { + DataCapList = WWAN_DATA_CLASS_GPRS| + WWAN_DATA_CLASS_EDGE; + DeviceClass = DEVICE_CLASS_GSM; + } + break; + case 0x13: // WCDMA + if (pServiceStatusInfo->SrvStatus == 0x02) { + DataCapList = WWAN_DATA_CLASS_UMTS; + DeviceClass = DEVICE_CLASS_GSM; + } + break; + case 0x14: // LTE + if (pServiceStatusInfo->SrvStatus == 0x02) { + DataCapList = WWAN_DATA_CLASS_LTE; + DeviceClass = DEVICE_CLASS_GSM; + is_lte = 1; + s_is_cdma = 0; + } + break; + case 0x4A: // NR5G Service Status Info + if (pServiceStatusInfo->SrvStatus == NAS_SYS_SRV_STATUS_SRV_V01) { + DataCapList |= WWAN_DATA_CLASS_5G_SA; + DeviceClass = DEVICE_CLASS_GSM; + is_lte = 1; + s_is_cdma = 0; + } + break; + case 0x4B: // NR5G System Info + pNr5gSystemInfo = (PNR5G_SYSTEM_INFO)pServiceStatusInfo; + if (pNr5gSystemInfo->srv_domain_valid == 0x01) { + if (pNr5gSystemInfo->srv_domain & SYS_SRV_DOMAIN_PS_ONLY_V01) { + *pPSAttachedState = 1; + } + } + + if (pNr5gSystemInfo->network_id_valid == 0x01) { + MobileCountryCode = (USHORT)char2ushort(pNr5gSystemInfo->MCC); + MobileNetworkCode = (USHORT)char2ushort(pNr5gSystemInfo->MNC); + } + break; + case 0x4E: //Additional LTE System Info - Availability of Dual Connectivity of E-UTRA with NR5G + endc_available_valid = 1; + endc_available = pServiceStatusInfo->SrvStatus; + break; + + case 0x4F: //Additional LTE System Info - DCNR restriction Info + restrict_dcnr_valid = 1; + restrict_dcnr = pServiceStatusInfo->SrvStatus; + break; + + case 0x24: // TDSCDMA + if (pServiceStatusInfo->SrvStatus == 0x02) { + pDataCapStr = "TD-SCDMA"; + } + break; + case 0x15: // CDMA + // CDMA_SYSTEM_INFO + pCdmaSystemInfo = (PCDMA_SYSTEM_INFO)pServiceStatusInfo; + if (pCdmaSystemInfo->SrvDomainValid == 0x01) { + if (pCdmaSystemInfo->SrvDomain & 0x02) { + *pPSAttachedState = 1; + s_is_cdma = (0 == is_lte); + } + } +#if 0 + if (pCdmaSystemInfo->SrvCapabilityValid == 0x01) { + *pPSAttachedState = 0; + if (pCdmaSystemInfo->SrvCapability & 0x02) { + *pPSAttachedState = 1; + s_is_cdma = (0 == is_lte); + } + } +#endif + if (pCdmaSystemInfo->NetworkIdValid == 0x01) { + MobileCountryCode = (USHORT)char2ushort(pCdmaSystemInfo->MCC); + MobileNetworkCode = (USHORT)char2ushort(pCdmaSystemInfo->MNC); + } + break; + case 0x16: // HDR + // HDR_SYSTEM_INFO + pHdrSystemInfo = (PHDR_SYSTEM_INFO)pServiceStatusInfo; + if (pHdrSystemInfo->SrvDomainValid == 0x01) { + if (pHdrSystemInfo->SrvDomain & 0x02) { + *pPSAttachedState = 1; + s_is_cdma = (0 == is_lte); + } + } +#if 0 + if (pHdrSystemInfo->SrvCapabilityValid == 0x01) { + *pPSAttachedState = 0; + if (pHdrSystemInfo->SrvCapability & 0x02) { + *pPSAttachedState = 1; + s_is_cdma = (0 == is_lte); + } + } +#endif + if (*pPSAttachedState && pHdrSystemInfo->HdrPersonalityValid == 0x01) { + if (pHdrSystemInfo->HdrPersonality == 0x03) + s_hdr_personality = 0x02; + //else if (pHdrSystemInfo->HdrPersonality == 0x02) + // s_hdr_personality = 0x01; + } + USHORT cmda_mcc = 0, cdma_mnc = 0; + if(!requestGetHomeNetwork(&cmda_mcc, &cdma_mnc,NULL, NULL) && cmda_mcc) { + quectel_convert_cdma_mcc_2_ascii_mcc(&MobileCountryCode, cmda_mcc); + quectel_convert_cdma_mnc_2_ascii_mnc(&MobileNetworkCode, cdma_mnc); + } + break; + case 0x17: // GSM + // GSM_SYSTEM_INFO + pGsmSystemInfo = (PGSM_SYSTEM_INFO)pServiceStatusInfo; + if (pGsmSystemInfo->SrvDomainValid == 0x01) { + if (pGsmSystemInfo->SrvDomain & 0x02) { + *pPSAttachedState = 1; + } + } +#if 0 + if (pGsmSystemInfo->SrvCapabilityValid == 0x01) { + *pPSAttachedState = 0; + if (pGsmSystemInfo->SrvCapability & 0x02) { + *pPSAttachedState = 1; + } + } +#endif + if (pGsmSystemInfo->NetworkIdValid == 0x01) { + MobileCountryCode = (USHORT)char2ushort(pGsmSystemInfo->MCC); + MobileNetworkCode = (USHORT)char2ushort(pGsmSystemInfo->MNC); + } + break; + case 0x18: // WCDMA + // WCDMA_SYSTEM_INFO + pWcdmaSystemInfo = (PWCDMA_SYSTEM_INFO)pServiceStatusInfo; + if (pWcdmaSystemInfo->SrvDomainValid == 0x01) { + if (pWcdmaSystemInfo->SrvDomain & 0x02) { + *pPSAttachedState = 1; + } + } +#if 0 + if (pWcdmaSystemInfo->SrvCapabilityValid == 0x01) { + *pPSAttachedState = 0; + if (pWcdmaSystemInfo->SrvCapability & 0x02) { + *pPSAttachedState = 1; + } + } +#endif + if (pWcdmaSystemInfo->NetworkIdValid == 0x01) { + MobileCountryCode = (USHORT)char2ushort(pWcdmaSystemInfo->MCC); + MobileNetworkCode = (USHORT)char2ushort(pWcdmaSystemInfo->MNC); + } + break; + case 0x19: // LTE_SYSTEM_INFO + // LTE_SYSTEM_INFO + pLteSystemInfo = (PLTE_SYSTEM_INFO)pServiceStatusInfo; + if (pLteSystemInfo->SrvDomainValid == 0x01) { + if (pLteSystemInfo->SrvDomain & 0x02) { + *pPSAttachedState = 1; + is_lte = 1; + s_is_cdma = 0; + } + } +#if 0 + if (pLteSystemInfo->SrvCapabilityValid == 0x01) { + *pPSAttachedState = 0; + if (pLteSystemInfo->SrvCapability & 0x02) { + *pPSAttachedState = 1; + is_lte = 1; + s_is_cdma = 0; + } + } +#endif + if (pLteSystemInfo->NetworkIdValid == 0x01) { + MobileCountryCode = (USHORT)char2ushort(pLteSystemInfo->MCC); + MobileNetworkCode = (USHORT)char2ushort(pLteSystemInfo->MNC); + } + break; + case 0x25: // TDSCDMA + // TDSCDMA_SYSTEM_INFO + pTdscdmaSystemInfo = (PTDSCDMA_SYSTEM_INFO)pServiceStatusInfo; + if (pTdscdmaSystemInfo->SrvDomainValid == 0x01) { + if (pTdscdmaSystemInfo->SrvDomain & 0x02) { + *pPSAttachedState = 1; + } + } +#if 0 + if (pTdscdmaSystemInfo->SrvCapabilityValid == 0x01) { + *pPSAttachedState = 0; + if (pTdscdmaSystemInfo->SrvCapability & 0x02) { + *pPSAttachedState = 1; + } + } +#endif + if (pTdscdmaSystemInfo->NetworkIdValid == 0x01) { + MobileCountryCode = (USHORT)char2ushort(pTdscdmaSystemInfo->MCC); + MobileNetworkCode = (USHORT)char2ushort(pTdscdmaSystemInfo->MNC); + } + break; + default: + break; + } /* switch (pServiceStatusInfo->TLYType) */ + + remainingLen -= (le16_to_cpu(pServiceStatusInfo->TLVLength) + 3); + pServiceStatusInfo = (PSERVICE_STATUS_INFO)((PCHAR)&pServiceStatusInfo->TLVLength + le16_to_cpu(pServiceStatusInfo->TLVLength) + sizeof(USHORT)); + } /* while (remainingLen > 0) */ + + if (DataCapList & WWAN_DATA_CLASS_LTE) { + if (endc_available_valid && restrict_dcnr_valid) { + if (endc_available && !restrict_dcnr) { + DataCapList |= WWAN_DATA_CLASS_5G_NSA; + } + } + } + + if (DeviceClass == DEVICE_CLASS_CDMA) { + if (s_hdr_personality == 2) { + pDataCapStr = s_hdr_personality == 2 ? "eHRPD" : "HRPD"; + } else if (DataCapList & WWAN_DATA_CLASS_1XEVDO_REVB) { + pDataCapStr = wwan_data_class2str(WWAN_DATA_CLASS_1XEVDO_REVB); + } else if (DataCapList & WWAN_DATA_CLASS_1XEVDO_REVA) { + pDataCapStr = wwan_data_class2str(WWAN_DATA_CLASS_1XEVDO_REVA); + } else if (DataCapList & WWAN_DATA_CLASS_1XEVDO) { + pDataCapStr = wwan_data_class2str(WWAN_DATA_CLASS_1XEVDO); + } else if (DataCapList & WWAN_DATA_CLASS_1XRTT) { + pDataCapStr = wwan_data_class2str(WWAN_DATA_CLASS_1XRTT); + } else if (DataCapList & WWAN_DATA_CLASS_3XRTT) { + pDataCapStr = wwan_data_class2str(WWAN_DATA_CLASS_3XRTT); + } else if (DataCapList & WWAN_DATA_CLASS_UMB) { + pDataCapStr = wwan_data_class2str(WWAN_DATA_CLASS_UMB); + } + } else { + if (DataCapList & WWAN_DATA_CLASS_5G_SA) { + s_5g_type = WWAN_DATA_CLASS_5G_SA; + pDataCapStr = wwan_data_class2str(WWAN_DATA_CLASS_5G_SA); + } else if (DataCapList & WWAN_DATA_CLASS_5G_NSA) { + s_5g_type = WWAN_DATA_CLASS_5G_NSA; + pDataCapStr = wwan_data_class2str(WWAN_DATA_CLASS_5G_NSA); + } else if (DataCapList & WWAN_DATA_CLASS_LTE) { + pDataCapStr = wwan_data_class2str(WWAN_DATA_CLASS_LTE); + } else if ((DataCapList & WWAN_DATA_CLASS_HSDPA) && (DataCapList & WWAN_DATA_CLASS_HSUPA)) { + pDataCapStr = "HSDPA_HSUPA"; + } else if (DataCapList & WWAN_DATA_CLASS_HSDPA) { + pDataCapStr = wwan_data_class2str(WWAN_DATA_CLASS_HSDPA); + } else if (DataCapList & WWAN_DATA_CLASS_HSUPA) { + pDataCapStr = wwan_data_class2str(WWAN_DATA_CLASS_HSUPA); + } else if (DataCapList & WWAN_DATA_CLASS_UMTS) { + pDataCapStr = wwan_data_class2str(WWAN_DATA_CLASS_UMTS); + } else if (DataCapList & WWAN_DATA_CLASS_EDGE) { + pDataCapStr = wwan_data_class2str(WWAN_DATA_CLASS_EDGE); + } else if (DataCapList & WWAN_DATA_CLASS_GPRS) { + pDataCapStr = wwan_data_class2str(WWAN_DATA_CLASS_GPRS); + } + } + + dbg_time("%s MCC: %d, MNC: %d, PS: %s, DataCap: %s", __func__, + MobileCountryCode, MobileNetworkCode, (*pPSAttachedState == 1) ? "Attached" : "Detached" , pDataCapStr); + + free(pResponse); + + return 0; +} + +static int requestRegistrationState(UCHAR *pPSAttachedState) { + PQCQMIMSG pRequest; + PQCQMIMSG pResponse; + PQMUX_MSG pMUXMsg; + int err; + PQMINAS_CURRENT_PLMN_MSG pCurrentPlmn; + PSERVING_SYSTEM pServingSystem; + PQMINAS_DATA_CAP pDataCap; + USHORT MobileCountryCode = 0; + USHORT MobileNetworkCode = 0; + const char *pDataCapStr = "UNKNOW"; + + if (s_9x07) { + return requestRegistrationState2(pPSAttachedState); + } + + pRequest = ComposeQMUXMsg(QMUX_TYPE_NAS, QMINAS_GET_SERVING_SYSTEM_REQ, NULL, NULL); + err = QmiThreadSendQMI(pRequest, &pResponse); + qmi_rsp_check_and_return(); + + pCurrentPlmn = (PQMINAS_CURRENT_PLMN_MSG)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x12); + if (pCurrentPlmn) { + MobileCountryCode = le16_to_cpu(pCurrentPlmn->MobileCountryCode); + MobileNetworkCode = le16_to_cpu(pCurrentPlmn->MobileNetworkCode); + } + + *pPSAttachedState = 0; + pServingSystem = (PSERVING_SYSTEM)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x01); + if (pServingSystem) { + //Packet-switched domain attach state of the mobile. + //0x00 PS_UNKNOWN ?Unknown or not applicable + //0x01 PS_ATTACHED ?Attached + //0x02 PS_DETACHED ?Detached + *pPSAttachedState = pServingSystem->RegistrationState; + if (pServingSystem->RegistrationState == 0x01) //0x01 ?C REGISTERED ?C Registered with a network + *pPSAttachedState = pServingSystem->PSAttachedState; + else { + //MobileCountryCode = MobileNetworkCode = 0; + *pPSAttachedState = 0x02; + } + } + + pDataCap = (PQMINAS_DATA_CAP)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x11); + if (pDataCap && pDataCap->DataCapListLen) { + UCHAR *DataCap = &pDataCap->DataCap; + if (pDataCap->DataCapListLen == 2) { + if ((DataCap[0] == 0x06) && ((DataCap[1] == 0x08) || (DataCap[1] == 0x0A))) + DataCap[0] = DataCap[1]; + } + switch (DataCap[0]) { + case 0x01: pDataCapStr = "GPRS"; break; + case 0x02: pDataCapStr = "EDGE"; break; + case 0x03: pDataCapStr = "HSDPA"; break; + case 0x04: pDataCapStr = "HSUPA"; break; + case 0x05: pDataCapStr = "UMTS"; break; + case 0x06: pDataCapStr = "1XRTT"; break; + case 0x07: pDataCapStr = "1XEVDO"; break; + case 0x08: pDataCapStr = "1XEVDO_REVA"; break; + case 0x09: pDataCapStr = "GPRS"; break; + case 0x0A: pDataCapStr = "1XEVDO_REVB"; break; + case 0x0B: pDataCapStr = "LTE"; break; + case 0x0C: pDataCapStr = "HSDPA"; break; + case 0x0D: pDataCapStr = "HSDPA"; break; + default: pDataCapStr = "UNKNOW"; break; + } + } + + if (pServingSystem && pServingSystem->RegistrationState == 0x01 && pServingSystem->InUseRadioIF && pServingSystem->RadioIF == 0x09) { + pDataCapStr = "TD-SCDMA"; + } + + s_is_cdma = 0; + if (pServingSystem && pServingSystem->RegistrationState == 0x01 && pServingSystem->InUseRadioIF && (pServingSystem->RadioIF == 0x01 || pServingSystem->RadioIF == 0x02)) { + USHORT cmda_mcc = 0, cdma_mnc = 0; + s_is_cdma = 1; + if(!requestGetHomeNetwork(&cmda_mcc, &cdma_mnc,NULL, NULL) && cmda_mcc) { + quectel_convert_cdma_mcc_2_ascii_mcc(&MobileCountryCode, cmda_mcc); + quectel_convert_cdma_mnc_2_ascii_mnc(&MobileNetworkCode, cdma_mnc); + } + if (1) { + PQCQMUX_TLV pTLV = (PQCQMUX_TLV)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x23); + if (pTLV) + s_hdr_personality = pTLV->Value; + else + s_hdr_personality = 0; + if (s_hdr_personality == 2) + pDataCapStr = "eHRPD"; + } + } + + dbg_time("%s MCC: %d, MNC: %d, PS: %s, DataCap: %s", __func__, + MobileCountryCode, MobileNetworkCode, (*pPSAttachedState == 1) ? "Attached" : "Detached" , pDataCapStr); + + free(pResponse); + + return 0; +} + +static int requestQueryDataCall(UCHAR *pConnectionStatus, int curIpFamily) { + PQCQMIMSG pRequest; + PQCQMIMSG pResponse; + PQMUX_MSG pMUXMsg; + int err; + PQMIWDS_PKT_SRVC_TLV pPktSrvc; + UCHAR oldConnectionStatus = *pConnectionStatus; + UCHAR QMIType = (curIpFamily == IpFamilyV4) ? QMUX_TYPE_WDS : QMUX_TYPE_WDS_IPV6; + + pRequest = ComposeQMUXMsg(QMIType, QMIWDS_GET_PKT_SRVC_STATUS_REQ, NULL, NULL); + err = QmiThreadSendQMI(pRequest, &pResponse); + qmi_rsp_check_and_return(); + + *pConnectionStatus = QWDS_PKT_DATA_DISCONNECTED; + pPktSrvc = (PQMIWDS_PKT_SRVC_TLV)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x01); + if (pPktSrvc) { + *pConnectionStatus = pPktSrvc->ConnectionStatus; + if ((le16_to_cpu(pPktSrvc->TLVLength) == 2) && (pPktSrvc->ReconfigReqd == 0x01)) + *pConnectionStatus = QWDS_PKT_DATA_DISCONNECTED; + } + + if (*pConnectionStatus == QWDS_PKT_DATA_DISCONNECTED) { + if (curIpFamily == IpFamilyV4) + WdsConnectionIPv4Handle = 0; + else + WdsConnectionIPv6Handle = 0; + } + + if (oldConnectionStatus != *pConnectionStatus || debug_qmi) { + dbg_time("%s %sConnectionStatus: %s", __func__, (curIpFamily == IpFamilyV4) ? "IPv4" : "IPv6", + (*pConnectionStatus == QWDS_PKT_DATA_CONNECTED) ? "CONNECTED" : "DISCONNECTED"); + } + + free(pResponse); + return 0; +} + +static int requestSetupDataCall(PROFILE_T *profile, int curIpFamily) { + PQCQMIMSG pRequest; + PQCQMIMSG pResponse; + PQMUX_MSG pMUXMsg; + int err = 0; + UCHAR QMIType = (curIpFamily == IpFamilyV4) ? QMUX_TYPE_WDS : QMUX_TYPE_WDS_IPV6; + +//DualIPSupported means can get ipv4 & ipv6 address at the same time, one wds for ipv4, the other wds for ipv6 + profile->curIpFamily = curIpFamily; + pRequest = ComposeQMUXMsg(QMIType, QMIWDS_START_NETWORK_INTERFACE_REQ, WdsStartNwInterfaceReq, profile); + err = QmiThreadSendQMITimeout(pRequest, &pResponse, 120 * 1000, __func__); + qmi_rsp_check(); + + if (le16_to_cpu(pMUXMsg->QMUXMsgHdrResp.QMUXResult) || le16_to_cpu(pMUXMsg->QMUXMsgHdrResp.QMUXError)) { + PQMI_TLV_HDR pTLVHdr; + + pTLVHdr = GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x10); + if (pTLVHdr) { + uint16_t *data16 = (uint16_t *)(pTLVHdr+1); + uint16_t call_end_reason = le16_to_cpu(data16[0]); + dbg_time("call_end_reason is %d", call_end_reason); + } + + pTLVHdr = GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x11); + if (pTLVHdr) { + uint16_t *data16 = (uint16_t *)(pTLVHdr+1); + uint16_t call_end_reason_type = le16_to_cpu(data16[0]); + uint16_t verbose_call_end_reason = le16_to_cpu(data16[1]); + + dbg_time("call_end_reason_type is %d", call_end_reason_type); + dbg_time("call_end_reason_verbose is %d", verbose_call_end_reason); + } + + free(pResponse); + return le16_to_cpu(pMUXMsg->QMUXMsgHdrResp.QMUXError); + } + + if (curIpFamily == IpFamilyV4) { + WdsConnectionIPv4Handle = le32_to_cpu(pResponse->MUXMsg.StartNwInterfaceResp.Handle); + dbg_time("%s WdsConnectionIPv4Handle: 0x%08x", __func__, WdsConnectionIPv4Handle); + } else { + WdsConnectionIPv6Handle = le32_to_cpu(pResponse->MUXMsg.StartNwInterfaceResp.Handle); + dbg_time("%s WdsConnectionIPv6Handle: 0x%08x", __func__, WdsConnectionIPv6Handle); + } + + free(pResponse); + + return 0; +} + +static int requestDeactivateDefaultPDP(PROFILE_T *profile, int curIpFamily) { + PQCQMIMSG pRequest; + PQCQMIMSG pResponse; + PQMUX_MSG pMUXMsg; + int err; + UCHAR QMIType = (curIpFamily == 0x04) ? QMUX_TYPE_WDS : QMUX_TYPE_WDS_IPV6; + + (void)profile; + if (curIpFamily == IpFamilyV4 && WdsConnectionIPv4Handle == 0) + return 0; + if (curIpFamily == IpFamilyV6 && WdsConnectionIPv6Handle == 0) + return 0; + + dbg_time("%s WdsConnectionIPv%dHandle", __func__, curIpFamily == IpFamilyV4 ? 4 : 6); + + pRequest = ComposeQMUXMsg(QMIType, QMIWDS_STOP_NETWORK_INTERFACE_REQ , WdsStopNwInterfaceReq, &curIpFamily); + err = QmiThreadSendQMI(pRequest, &pResponse); + qmi_rsp_check_and_return(); + + if (curIpFamily == IpFamilyV4) + WdsConnectionIPv4Handle = 0; + else + WdsConnectionIPv6Handle = 0; + free(pResponse); + return 0; +} + +static int requestGetIPAddress(PROFILE_T *profile, int curIpFamily) { + PQCQMIMSG pRequest; + PQCQMIMSG pResponse; + PQMUX_MSG pMUXMsg; + int err; + PQMIWDS_GET_RUNTIME_SETTINGS_TLV_IPV4_ADDR pIpv4Addr; + PQMIWDS_GET_RUNTIME_SETTINGS_TLV_IPV6_ADDR pIpv6Addr = NULL; + PQMIWDS_GET_RUNTIME_SETTINGS_TLV_MTU pMtu; + IPV4_T *pIpv4 = &profile->ipv4; + IPV6_T *pIpv6 = &profile->ipv6; + UCHAR QMIType = (curIpFamily == 0x04) ? QMUX_TYPE_WDS : QMUX_TYPE_WDS_IPV6; + PQMIWDS_GET_RUNNING_SETTINGS_PCSCF_IPV6_ADDR pPCSCFIpv6Addr; + PQMIWDS_GET_RUNNING_SETTINGS_PCSCF_IPV4_ADDR pPCSCFIpv4Addr; + + if (curIpFamily == IpFamilyV4) { + memset(pIpv4, 0x00, sizeof(IPV4_T)); + if (WdsConnectionIPv4Handle == 0) + return 0; + } else if (curIpFamily == IpFamilyV6) { + memset(pIpv6, 0x00, sizeof(IPV6_T)); + if (WdsConnectionIPv6Handle == 0) + return 0; + } + + pRequest = ComposeQMUXMsg(QMIType, QMIWDS_GET_RUNTIME_SETTINGS_REQ, WdsGetRuntimeSettingReq, NULL); + err = QmiThreadSendQMI(pRequest, &pResponse); + qmi_rsp_check_and_return(); + + pPCSCFIpv6Addr = (PQMIWDS_GET_RUNNING_SETTINGS_PCSCF_IPV6_ADDR)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x2e); // 0x2e - pcscf ipv6 address + if (pPCSCFIpv6Addr) { + if (pPCSCFIpv6Addr->PCSCFNumber == 1) { + UCHAR *PCSCFIpv6Addr1 = (UCHAR *)(pPCSCFIpv6Addr + 1); + memcpy(profile->PCSCFIpv6Addr1, PCSCFIpv6Addr1, 16); + }else if (pPCSCFIpv6Addr->PCSCFNumber == 2) { + UCHAR *PCSCFIpv6Addr1 = (UCHAR *)(pPCSCFIpv6Addr + 1); + UCHAR *PCSCFIpv6Addr2 = PCSCFIpv6Addr1 + 16; + memcpy(profile->PCSCFIpv6Addr1, PCSCFIpv6Addr1, 16); + memcpy(profile->PCSCFIpv6Addr2, PCSCFIpv6Addr2, 16); + } + } + + pPCSCFIpv4Addr = (PQMIWDS_GET_RUNNING_SETTINGS_PCSCF_IPV4_ADDR)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x23); // 0x23 - pcscf ipv4 address + if (pPCSCFIpv4Addr) { + if (pPCSCFIpv4Addr->PCSCFNumber == 1) { + UCHAR *PCSCFIpv4Addr1 = (UCHAR *)(pPCSCFIpv4Addr + 1); + memcpy(&profile->PCSCFIpv4Addr1, PCSCFIpv4Addr1, 4); + }else if (pPCSCFIpv4Addr->PCSCFNumber == 2) { + UCHAR *PCSCFIpv4Addr1 = (UCHAR *)(pPCSCFIpv4Addr + 1); + UCHAR *PCSCFIpv4Addr2 = PCSCFIpv4Addr1 + 4; + memcpy(&profile->PCSCFIpv4Addr1, PCSCFIpv4Addr1, 4); + memcpy(&profile->PCSCFIpv4Addr2, PCSCFIpv4Addr2, 4); + } + } + + pIpv4Addr = (PQMIWDS_GET_RUNTIME_SETTINGS_TLV_IPV4_ADDR)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, QMIWDS_GET_RUNTIME_SETTINGS_TLV_TYPE_IPV4PRIMARYDNS); + if (pIpv4Addr) { + pIpv4->DnsPrimary = pIpv4Addr->IPV4Address; + } + + pIpv4Addr = (PQMIWDS_GET_RUNTIME_SETTINGS_TLV_IPV4_ADDR)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, QMIWDS_GET_RUNTIME_SETTINGS_TLV_TYPE_IPV4SECONDARYDNS); + if (pIpv4Addr) { + pIpv4->DnsSecondary = pIpv4Addr->IPV4Address; + } + + pIpv4Addr = (PQMIWDS_GET_RUNTIME_SETTINGS_TLV_IPV4_ADDR)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, QMIWDS_GET_RUNTIME_SETTINGS_TLV_TYPE_IPV4GATEWAY); + if (pIpv4Addr) { + pIpv4->Gateway = pIpv4Addr->IPV4Address; + } + + pIpv4Addr = (PQMIWDS_GET_RUNTIME_SETTINGS_TLV_IPV4_ADDR)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, QMIWDS_GET_RUNTIME_SETTINGS_TLV_TYPE_IPV4SUBNET); + if (pIpv4Addr) { + pIpv4->SubnetMask = pIpv4Addr->IPV4Address; + } + + pIpv4Addr = (PQMIWDS_GET_RUNTIME_SETTINGS_TLV_IPV4_ADDR)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, QMIWDS_GET_RUNTIME_SETTINGS_TLV_TYPE_IPV4); + if (pIpv4Addr) { + pIpv4->Address = pIpv4Addr->IPV4Address; + } + + pIpv6Addr = (PQMIWDS_GET_RUNTIME_SETTINGS_TLV_IPV6_ADDR)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, QMIWDS_GET_RUNTIME_SETTINGS_TLV_TYPE_IPV6PRIMARYDNS); + if (pIpv6Addr) { + memcpy(pIpv6->DnsPrimary, pIpv6Addr->IPV6Address, 16); + } + + pIpv6Addr = (PQMIWDS_GET_RUNTIME_SETTINGS_TLV_IPV6_ADDR)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, QMIWDS_GET_RUNTIME_SETTINGS_TLV_TYPE_IPV6SECONDARYDNS); + if (pIpv6Addr) { + memcpy(pIpv6->DnsSecondary, pIpv6Addr->IPV6Address, 16); + } + + pIpv6Addr = (PQMIWDS_GET_RUNTIME_SETTINGS_TLV_IPV6_ADDR)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, QMIWDS_GET_RUNTIME_SETTINGS_TLV_TYPE_IPV6GATEWAY); + if (pIpv6Addr) { + memcpy(pIpv6->Gateway, pIpv6Addr->IPV6Address, 16); + pIpv6->PrefixLengthGateway = pIpv6Addr->PrefixLength; + } + + pIpv6Addr = (PQMIWDS_GET_RUNTIME_SETTINGS_TLV_IPV6_ADDR)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, QMIWDS_GET_RUNTIME_SETTINGS_TLV_TYPE_IPV6); + if (pIpv6Addr) { + memcpy(pIpv6->Address, pIpv6Addr->IPV6Address, 16); + pIpv6->PrefixLengthIPAddr = pIpv6Addr->PrefixLength; + } + + pMtu = (PQMIWDS_GET_RUNTIME_SETTINGS_TLV_MTU)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, QMIWDS_GET_RUNTIME_SETTINGS_TLV_TYPE_MTU); + if (pMtu) { + if (curIpFamily == IpFamilyV4) + pIpv4->Mtu = le32_to_cpu(pMtu->Mtu); + else + pIpv6->Mtu = le32_to_cpu(pMtu->Mtu); + } + + free(pResponse); + return 0; +} + +#ifdef CONFIG_APN +static int requestSetProfile(PROFILE_T *profile) { + PQCQMIMSG pRequest; + PQCQMIMSG pResponse; + PQMUX_MSG pMUXMsg; + int err; + const char *new_apn = profile->apn ? profile->apn : ""; + const char *new_user = profile->user ? profile->user : ""; + const char *new_password = profile->password ? profile->password : ""; + const char *ipStr[] = {"IPV4", "NULL", "IPV6", "IPV4V6"}; + + dbg_time("%s[%d] %s/%s/%s/%d/%s", __func__, profile->pdp, profile->apn, profile->user, profile->password, profile->auth,ipStr[profile->iptype]); + if (!profile->pdp) + return -1; + + if ( !strcmp(profile->old_apn, new_apn) && !strcmp(profile->old_user, new_user) + && !strcmp(profile->old_password, new_password) + && profile->old_iptype == profile->iptype + && profile->old_auth == profile->auth) + { + dbg_time("no need to set skip the rest"); + return 0; + } + + pRequest = ComposeQMUXMsg(QMUX_TYPE_WDS, QMIWDS_MODIFY_PROFILE_SETTINGS_REQ, WdsModifyProfileSettingsReq, profile); + err = QmiThreadSendQMI(pRequest, &pResponse); + qmi_rsp_check_and_return(); + + free(pResponse); + return 1; +} + +static int requestGetProfile(PROFILE_T *profile) { + PQCQMIMSG pRequest; + PQCQMIMSG pResponse; + PQMUX_MSG pMUXMsg; + int err; + PQMIWDS_APNNAME pApnName; + PQMIWDS_USERNAME pUserName; + PQMIWDS_PASSWD pPassWd; + PQMIWDS_AUTH_PREFERENCE pAuthPref; + PQMIWDS_IPTYPE pIpType; + const char *ipStr[] = {"IPV4", "NULL", "IPV6", "IPV4V6"}; + + profile->old_apn[0] = profile->old_user[0] = profile->old_password[0] = '\0'; + profile->old_auth = 0; + profile->old_iptype = 0; + if (profile->enable_ipv4 && profile->enable_ipv6) + profile->iptype = 3; + else if (profile->enable_ipv6) + profile->iptype = 2; + else + profile->iptype = 0; + + if (!profile->pdp) + return 0; + +_re_check: + pRequest = ComposeQMUXMsg(QMUX_TYPE_WDS, QMIWDS_GET_PROFILE_SETTINGS_REQ, WdsGetProfileSettingsReqSend, profile); + err = QmiThreadSendQMI(pRequest, &pResponse); + if (err == 0 && pResponse && le16_to_cpu(pResponse->MUXMsg.QMUXMsgHdrResp.QMUXResult) + && le16_to_cpu(pResponse->MUXMsg.QMUXMsgHdrResp.QMUXError) == QMI_ERR_EXTENDED_INTERNAL) + { + free(pResponse); + pRequest = ComposeQMUXMsg(QMUX_TYPE_WDS, QMIWDS_CREATE_PROFILE_REQ, WdsCreateProfileSettingsReqSend, profile); + err = QmiThreadSendQMI(pRequest, &pResponse); + qmi_rsp_check_and_return(); + free(pResponse); + goto _re_check; + } + qmi_rsp_check_and_return(); + + pApnName = (PQMIWDS_APNNAME)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x14); + pUserName = (PQMIWDS_USERNAME)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x1B); + pPassWd = (PQMIWDS_PASSWD)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x1C); + pAuthPref = (PQMIWDS_AUTH_PREFERENCE)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x1D); + pIpType = (PQMIWDS_IPTYPE)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x11); + + if (pApnName/* && le16_to_cpu(pApnName->TLVLength)*/) + uchar2char(profile->old_apn, sizeof(profile->old_apn), &pApnName->ApnName, le16_to_cpu(pApnName->TLVLength)); + if (pUserName/* && pUserName->UserName*/) + uchar2char(profile->old_user, sizeof(profile->old_user), &pUserName->UserName, le16_to_cpu(pUserName->TLVLength)); + if (pPassWd/* && le16_to_cpu(pPassWd->TLVLength)*/) + uchar2char(profile->old_password, sizeof(profile->old_password), &pPassWd->Passwd, le16_to_cpu(pPassWd->TLVLength)); + if (pAuthPref/* && le16_to_cpu(pAuthPref->TLVLength)*/) { + profile->old_auth = pAuthPref->AuthPreference; + } + if (pIpType) { + profile->old_iptype = pIpType->IPType; + } + + dbg_time("%s[%d] %s/%s/%s/%d/%s", __func__, profile->pdp, profile->old_apn, profile->old_user, profile->old_password, profile->old_auth, ipStr[profile->old_iptype]); + + free(pResponse); + return 0; +} +#endif + +#ifdef CONFIG_SIGNALINFO +static int requestGetSignalInfo(void) +{ + PQCQMIMSG pRequest; + PQCQMIMSG pResponse; + PQMUX_MSG pMUXMsg; + int err; + + pRequest = ComposeQMUXMsg(QMUX_TYPE_NAS, QMINAS_GET_SIG_INFO_REQ, NULL, NULL); + err = QmiThreadSendQMI(pRequest, &pResponse); + qmi_rsp_check_and_return(); + + + // CDMA + { + PQMINAS_SIG_INFO_CDMA_TLV_MSG ptlv = (PQMINAS_SIG_INFO_CDMA_TLV_MSG)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x10); + if (ptlv && ptlv->TLVLength) + { + dbg_time("%s CDMA: RSSI %d dBm, ECIO %.1lf dBm", __func__, + ptlv->rssi, (-0.5) * (double)ptlv->ecio); + } + } + + // HDR + { + PQMINAS_SIG_INFO_HDR_TLV_MSG ptlv = (PQMINAS_SIG_INFO_HDR_TLV_MSG)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x11); + if (ptlv && ptlv->TLVLength) + { + dbg_time("%s HDR: RSSI %d dBm, ECIO %.1lf dBm, IO %d dBm", __func__, + ptlv->rssi, (-0.5) * (double)ptlv->ecio, ptlv->io); + } + } + + // GSM + { + PQMINAS_SIG_INFO_GSM_TLV_MSG ptlv = (PQMINAS_SIG_INFO_GSM_TLV_MSG)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x12); + if (ptlv && ptlv->TLVLength) + { + dbg_time("%s GSM: RSSI %d dBm", __func__, ptlv->rssi); + } + } + + // WCDMA + { + PQMINAS_SIG_INFO_WCDMA_TLV_MSG ptlv = (PQMINAS_SIG_INFO_WCDMA_TLV_MSG)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x13); + if (ptlv && ptlv->TLVLength) + { + dbg_time("%s WCDMA: RSSI %d dBm, ECIO %.1lf dBm", __func__, + ptlv->rssi, (-0.5) * (double)ptlv->ecio); + } + } + + // LTE + { + PQMINAS_SIG_INFO_LTE_TLV_MSG ptlv = (PQMINAS_SIG_INFO_LTE_TLV_MSG)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x14); + if (ptlv && ptlv->TLVLength) + { + dbg_time("%s LTE: RSSI %d dBm, RSRQ %d dB, RSRP %d dBm, SNR %.1lf dB", __func__, + ptlv->rssi, ptlv->rsrq, ptlv->rsrp, (0.1) * (double)ptlv->snr); + } + } + + // TDSCDMA + { + PQMINAS_SIG_INFO_TDSCDMA_TLV_MSG ptlv = (PQMINAS_SIG_INFO_TDSCDMA_TLV_MSG)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x15); + if (ptlv && ptlv->TLVLength) + { + dbg_time("%s LTE: RSCP %d dBm", __func__, ptlv->rscp); + } + } + + // 5G_NSA + if (s_5g_type == WWAN_DATA_CLASS_5G_NSA) + { + PQMINAS_SIG_INFO_5G_NSA_TLV_MSG ptlv = (PQMINAS_SIG_INFO_5G_NSA_TLV_MSG)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x17); + if (ptlv && ptlv->TLVLength) + { + dbg_time("%s 5G_NSA: RSRP %d dBm, SNR %.1lf dB", __func__, ptlv->rsrp, (0.1) * (double)ptlv->snr); + } + } + + // 5G_SA + if (s_5g_type == WWAN_DATA_CLASS_5G_SA) + { + PQMINAS_SIG_INFO_5G_SA_TLV_MSG ptlv = (PQMINAS_SIG_INFO_5G_SA_TLV_MSG)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x18); + if (ptlv && ptlv->TLVLength) + { + dbg_time("%s 5G_SA: NR5G_RSRQ %d dB", __func__, ptlv->nr5g_rsrq); + } + } + + free(pResponse); + return 0; +} +#endif + +#ifdef CONFIG_VERSION +static int requestBaseBandVersion(PROFILE_T *profile) { + PQCQMIMSG pRequest; + PQCQMIMSG pResponse; + PQMUX_MSG pMUXMsg; + PDEVICE_REV_ID revId; + int err; + + pRequest = ComposeQMUXMsg(QMUX_TYPE_DMS, QMIDMS_GET_DEVICE_REV_ID_REQ, NULL, NULL); + err = QmiThreadSendQMI(pRequest, &pResponse); + qmi_rsp_check_and_return(); + + revId = (PDEVICE_REV_ID)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x01); + + if (revId && le16_to_cpu(revId->TLVLength)) + { + uchar2char(profile->BaseBandVersion, sizeof(profile->BaseBandVersion), &revId->RevisionID, le16_to_cpu(revId->TLVLength)); + dbg_time("%s %s", __func__, profile->BaseBandVersion); + } + + free(pResponse); + return 0; +} +#endif + +static USHORT DmsSetOperatingModeReq(PQMUX_MSG pMUXMsg, void *arg) { + pMUXMsg->SetOperatingModeReq.TLVType = 0x01; + pMUXMsg->SetOperatingModeReq.TLVLength = cpu_to_le16(1); + pMUXMsg->SetOperatingModeReq.OperatingMode = *((UCHAR *)arg); + + return sizeof(QMIDMS_SET_OPERATING_MODE_REQ_MSG); +} + +static USHORT UimSetCardSlotReq(PQMUX_MSG pMUXMsg, void *arg) { + pMUXMsg->UIMSetCardSlotReq.TLVType = 0x01; + pMUXMsg->UIMSetCardSlotReq.TLVLength = cpu_to_le16(1); + pMUXMsg->UIMSetCardSlotReq.slot = *((UCHAR *)arg); + + return sizeof(QMIUIM_SET_CARD_SLOT_REQ_MSG); +} + +static int requestRadioPower(int state) { + PQCQMIMSG pRequest; + PQCQMIMSG pResponse; + PQMUX_MSG pMUXMsg; + int err; + UCHAR OperatingMode = (!!state) ? DMS_OP_MODE_ONLINE : DMS_OP_MODE_LOW_POWER; + USHORT SimOp = (!!state) ? QMIUIM_POWER_UP : QMIUIM_POWER_DOWN; + UCHAR cardSlot = 0x01; + + dbg_time("%s(%d)", __func__, state); + + pRequest = ComposeQMUXMsg(QMUX_TYPE_DMS, QMIDMS_SET_OPERATING_MODE_REQ, DmsSetOperatingModeReq, &OperatingMode); + err = QmiThreadSendQMI(pRequest, &pResponse); + qmi_rsp_check_and_return(); + free(pResponse); + + pRequest = ComposeQMUXMsg(QMUX_TYPE_UIM, SimOp, UimSetCardSlotReq, &cardSlot); + err = QmiThreadSendQMI(pRequest, &pResponse); + qmi_rsp_check_and_return(); + free(pResponse); + + return 0; +} + +static USHORT WdaSetLoopBackReq(PQMUX_MSG pMUXMsg, void *arg) { + (void)arg; + pMUXMsg->SetLoopBackReq.loopback_state.TLVType = 0x01; + pMUXMsg->SetLoopBackReq.loopback_state.TLVLength = cpu_to_le16(1); + + pMUXMsg->SetLoopBackReq.replication_factor.TLVType = 0x10; + pMUXMsg->SetLoopBackReq.replication_factor.TLVLength = cpu_to_le16(4); + + return sizeof(QMI_WDA_SET_LOOPBACK_CONFIG_REQ_MSG); +} + +static int requestSetLoopBackState(UCHAR loopback_state, ULONG replication_factor) { + PQCQMIMSG pRequest; + PQCQMIMSG pResponse; + PQMUX_MSG pMUXMsg; + int err; + + dbg_time("%s(loopback_state=%d, replication_factor=%u)", __func__, loopback_state, replication_factor); + + pRequest = ComposeQMUXMsg(QMUX_TYPE_WDS_ADMIN, QMI_WDA_SET_LOOPBACK_CONFIG_REQ, WdaSetLoopBackReq, NULL); + pRequest->MUXMsg.SetLoopBackReq.loopback_state.TLVVaule = loopback_state; + pRequest->MUXMsg.SetLoopBackReq.replication_factor.TLVVaule = cpu_to_le16(replication_factor); + + err = QmiThreadSendQMI(pRequest, &pResponse); + qmi_rsp_check_and_return(); + + free(pResponse); + return 0; +} + +#ifdef CONFIG_ENABLE_QOS +static USHORT QosSetBindMuxDataPort(PQMUX_MSG pMUXMsg, void *arg) { + PROFILE_T *profile = (PROFILE_T *)arg; + pMUXMsg->QosBindDataPortReq.EpIdTlv.TLVType = 0x10; + pMUXMsg->QosBindDataPortReq.EpIdTlv.TLVLength = cpu_to_le16(8); + pMUXMsg->QosBindDataPortReq.EpIdTlv.ep_type = cpu_to_le32(profile->rmnet_info.ep_type); + pMUXMsg->QosBindDataPortReq.EpIdTlv.iface_id = cpu_to_le32(profile->rmnet_info.iface_id); + pMUXMsg->QosBindDataPortReq.MuxIdTlv.TLVType = 0x11; + pMUXMsg->QosBindDataPortReq.MuxIdTlv.TLVLength = cpu_to_le16(1); + pMUXMsg->QosBindDataPortReq.MuxIdTlv.mux_id = profile->muxid; + return sizeof(QMI_QOS_BIND_DATA_PORT_REQ_MSG); +} + +#ifdef CONFIG_REG_QOS_IND +static USHORT QosIndRegReq(PQMUX_MSG pMUXMsg, void *arg) { + pMUXMsg->QosIndRegReq.ReportGlobalQosFlowTlv.TLVType = 0x10; + pMUXMsg->QosIndRegReq.ReportGlobalQosFlowTlv.TLVLength = cpu_to_le16(1); + pMUXMsg->QosIndRegReq.ReportGlobalQosFlowTlv.report_global_qos_flows = 1; + return sizeof(QMI_QOS_INDICATION_REGISTER_REQ_MSG); +} +#endif + +static int requestRegisterQos(PROFILE_T *profile) { + PQCQMIMSG pRequest; + PQCQMIMSG pResponse = NULL; + PQMUX_MSG pMUXMsg; + int err; + + pRequest = ComposeQMUXMsg(QMUX_TYPE_QOS, QMI_QOS_BIND_DATA_PORT_REQ , QosSetBindMuxDataPort, (void *)profile); + err = QmiThreadSendQMI(pRequest, &pResponse); + dbg_time("%s QosSetBindMuxDataPort", __func__); + qmi_rsp_check_and_return(); + if (pResponse) free(pResponse); + +#ifdef CONFIG_REG_QOS_IND + pRequest = ComposeQMUXMsg(QMUX_TYPE_QOS, QMI_QOS_INDICATION_REGISTER_REQ , QosIndRegReq, NULL); + err = QmiThreadSendQMI(pRequest, &pResponse); + dbg_time("%s QosIndRegReq", __func__); + qmi_rsp_check_and_return(); + if (pResponse) free(pResponse); +#endif + return 0; +} + +#ifdef CONFIG_GET_QOS_INFO +UCHAR ql_get_qos_info_data_rate(PQCQMIMSG pResponse, void *max_data_rate) +{ + PQMI_QOS_GET_QOS_INFO_TLV_GRANTED_FLOW qos_tx_granted_flow = NULL; + PQMI_QOS_GET_QOS_INFO_TLV_GRANTED_FLOW qos_rx_granted_flow = NULL; + qos_tx_granted_flow = (PQMI_QOS_GET_QOS_INFO_TLV_GRANTED_FLOW)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x11); + if(qos_tx_granted_flow != NULL) + { + *(ULONG64 *)(max_data_rate) = le64_to_cpu(qos_tx_granted_flow->data_rate_max); + dbg_time("GET_QOS_INFO: tx_data_rate_max=%llu", *(ULONG64 *)(max_data_rate+0)); + } + else + dbg_time("GET_QOS_INFO: No qos_tx_granted_flow"); + qos_rx_granted_flow = (PQMI_QOS_GET_QOS_INFO_TLV_GRANTED_FLOW)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x12); + if(qos_rx_granted_flow != NULL) + { + *(ULONG64 *)(max_data_rate+sizeof(ULONG64)) = le64_to_cpu(qos_rx_granted_flow->data_rate_max); + dbg_time("GET_QOS_INFO: rx_data_rate_max=%llu", *(ULONG64 *)(max_data_rate+sizeof(ULONG64))); + } + else + dbg_time("GET_QOS_INFO: No qos_rx_granted_flow"); + if(qos_tx_granted_flow != NULL || qos_rx_granted_flow != NULL) + return 0; + else + return 1; +} + +static USHORT QosGetQosInfoReq(PQMUX_MSG pMUXMsg, void *arg) { + PROFILE_T *profile = (PROFILE_T *)arg; + pMUXMsg->QosGetQosInfoReq.QosIdTlv.TLVType = 0x01; + pMUXMsg->QosGetQosInfoReq.QosIdTlv.TLVLength = cpu_to_le16(4); + pMUXMsg->QosGetQosInfoReq.QosIdTlv.qos_id = cpu_to_le32(profile->qos_id); + return sizeof(QMI_QOS_GET_QOS_INFO_REQ_MSG); +} + +static int requestGetQosInfo(PROFILE_T *profile) { + PQCQMIMSG pRequest; + PQCQMIMSG pResponse = NULL; + PQMUX_MSG pMUXMsg; + int err; + + if(profile->qos_id == 0) + { + dbg_time("%s request not send: invalid qos_id", __func__); + return 0; + } + pRequest = ComposeQMUXMsg(QMUX_TYPE_QOS, QMI_QOS_GET_QOS_INFO_REQ , QosGetQosInfoReq, (void *)profile); + err = QmiThreadSendQMI(pRequest, &pResponse); + qmi_rsp_check_and_return(); + if (pResponse) + { +#ifdef CONFIG_GET_QOS_DATA_RATE + ULONG64 max_data_rate[2] = {0}; + if(ql_get_qos_info_data_rate(pResponse, (void *)max_data_rate) == 0){} +#endif + free(pResponse); + } + return 0; +} +#endif //#ifdef CONFIG_GET_QOS_INFO + +#ifdef CONFIG_REG_QOS_IND +UCHAR ql_get_global_qos_flow_ind_qos_id(PQCQMIMSG pResponse, UINT *qos_id) +{ + PQMI_QOS_GLOBAL_QOS_FLOW_TLV_FLOW_STATE qos_flow_state = NULL; + qos_flow_state = (PQMI_QOS_GLOBAL_QOS_FLOW_TLV_FLOW_STATE)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x01); + if(qos_flow_state != NULL) + { + if(le32_to_cpu(qos_flow_state->state_change) == QOS_IND_FLOW_STATE_ACTIVATED && qos_flow_state->new_flow == 1) + { + *qos_id = le32_to_cpu(qos_flow_state->qos_id); + dbg_time("QMI_QOS_GLOBAL_QOS_FLOW_IND: qos_id=%u state=QOS_IND_FLOW_STATE_ACTIVATED", *qos_id); + } + return (qos_flow_state->new_flow); + } + return (0); +} + +#ifdef CONFIG_GET_QOS_DATA_RATE +UCHAR ql_get_global_qos_flow_ind_data_rate(PQCQMIMSG pResponse, void *max_data_rate) +{ + PQMI_QOS_GLOBAL_QOS_FLOW_TLV_FLOW_GRANTED qos_tx_flow_granted = NULL; + PQMI_QOS_GLOBAL_QOS_FLOW_TLV_FLOW_GRANTED qos_rx_flow_granted = NULL; + qos_tx_flow_granted = (PQMI_QOS_GLOBAL_QOS_FLOW_TLV_FLOW_GRANTED)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x10); + if(qos_tx_flow_granted != NULL) + { + *(ULONG64 *)(max_data_rate) = le64_to_cpu(qos_tx_flow_granted->data_rate_max); + dbg_time("QMI_QOS_GLOBAL_QOS_FLOW_IND: tx_data_rate_max=%llu", *(ULONG64 *)(max_data_rate+0)); + } + else + dbg_time("QMI_QOS_GLOBAL_QOS_FLOW_IND: No qos_tx_flow_granted"); + qos_rx_flow_granted = (PQMI_QOS_GLOBAL_QOS_FLOW_TLV_FLOW_GRANTED)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x11); + if(qos_rx_flow_granted != NULL) + { + *(ULONG64 *)(max_data_rate+sizeof(ULONG64)) = le64_to_cpu(qos_rx_flow_granted->data_rate_max); + dbg_time("QMI_QOS_GLOBAL_QOS_FLOW_IND: rx_data_rate_max=%llu", *(ULONG64 *)(max_data_rate+sizeof(ULONG64))); + } + else + dbg_time("QMI_QOS_GLOBAL_QOS_FLOW_IND: No qos_rx_flow_granted"); + if(qos_tx_flow_granted != NULL || qos_rx_flow_granted != NULL) + return 0; + else + return 1; +} +#endif +#endif //#ifdef CONFIG_REG_QOS_IND +#endif //#ifdef CONFIG_ENABLE_QOS + +#ifdef CONFIG_CELLINFO +/* + at+qeng="servingcell" and at+qeng="neighbourcell" + https://gitlab.freedesktop.org/mobile-broadband/libqmi/-/blob/master/src/qmicli/qmicli-nas.c +*/ +static int nas_get_cell_location_info(void); +static int nas_get_rf_band_information(void); + +static int requestGetCellInfoList(void) { + dbg_time("%s", __func__); + nas_get_cell_location_info(); + nas_get_rf_band_information(); + return 0; +} +#endif + +const struct request_ops qmi_request_ops = { +#ifdef CONFIG_VERSION + .requestBaseBandVersion = requestBaseBandVersion, +#endif + .requestSetEthMode = requestSetEthMode, +#ifdef CONFIG_SIM + .requestGetSIMStatus = requestGetSIMStatus, + .requestEnterSimPin = requestEnterSimPin, +#endif +#ifdef CONFIG_IMSI_ICCID + .requestGetICCID = requestGetICCID, + .requestGetIMSI = requestGetIMSI, +#endif +#ifdef CONFIG_APN + .requestSetProfile = requestSetProfile, + .requestGetProfile = requestGetProfile, +#endif + .requestRegistrationState = requestRegistrationState, + .requestSetupDataCall = requestSetupDataCall, + .requestQueryDataCall = requestQueryDataCall, + .requestDeactivateDefaultPDP = requestDeactivateDefaultPDP, + .requestGetIPAddress = requestGetIPAddress, +#ifdef CONFIG_SIGNALINFO + .requestGetSignalInfo = requestGetSignalInfo, +#endif +#ifdef CONFIG_CELLINFO + .requestGetCellInfoList = requestGetCellInfoList, +#endif + .requestSetLoopBackState = requestSetLoopBackState, + .requestRadioPower = requestRadioPower, +#ifdef CONFIG_ENABLE_QOS + .requestRegisterQos = requestRegisterQos, +#endif +#ifdef CONFIG_GET_QOS_INFO + .requestGetQosInfo = requestGetQosInfo, +#endif +#ifdef CONFIG_COEX_WWAN_STATE + .requestGetCoexWWANState = requestGetCoexWWANState, +#endif +}; + +#ifdef CONFIG_CELLINFO +static char *str_from_bcd_plmn (uint8 plmn[3]) +{ + const char bcd_chars[] = "0123456789*#abc\0\0"; + static char str[12]; + int i; + int j = 0; + + for (i = 0; i < 3; i++) { + str[j] = bcd_chars[plmn[i]&0xF]; + if (str[j]) j++; + str[j] = bcd_chars[plmn[i]>>4]; + if (str[j]) j++; + } + + str[j++] = 0; + + return str; +} + +typedef struct { + UINT type; + const char *name; +} ENUM_NAME_T; + +#define enum_name(type) {type, #type} +#define N_ELEMENTS(arr) (sizeof (arr) / sizeof ((arr)[0])) + +static const ENUM_NAME_T QMI_NAS_ACTIVE_BAND_NAME[] = { + enum_name(QMI_NAS_ACTIVE_BAND_BC_0), + enum_name(QMI_NAS_ACTIVE_BAND_BC_1), + enum_name(QMI_NAS_ACTIVE_BAND_BC_2), + enum_name(QMI_NAS_ACTIVE_BAND_BC_3), + enum_name(QMI_NAS_ACTIVE_BAND_BC_4), + enum_name(QMI_NAS_ACTIVE_BAND_BC_5), + enum_name(QMI_NAS_ACTIVE_BAND_BC_6), + enum_name(QMI_NAS_ACTIVE_BAND_BC_7), + enum_name(QMI_NAS_ACTIVE_BAND_BC_8), + enum_name(QMI_NAS_ACTIVE_BAND_BC_9), + enum_name(QMI_NAS_ACTIVE_BAND_BC_10), + enum_name(QMI_NAS_ACTIVE_BAND_BC_11), + enum_name(QMI_NAS_ACTIVE_BAND_BC_12), + enum_name(QMI_NAS_ACTIVE_BAND_BC_13), + enum_name(QMI_NAS_ACTIVE_BAND_BC_14), + enum_name(QMI_NAS_ACTIVE_BAND_BC_15), + enum_name(QMI_NAS_ACTIVE_BAND_BC_16), + enum_name(QMI_NAS_ACTIVE_BAND_BC_17), + enum_name(QMI_NAS_ACTIVE_BAND_BC_18), + enum_name(QMI_NAS_ACTIVE_BAND_BC_19), + enum_name(QMI_NAS_ACTIVE_BAND_GSM_450), + enum_name(QMI_NAS_ACTIVE_BAND_GSM_480), + enum_name(QMI_NAS_ACTIVE_BAND_GSM_750), + enum_name(QMI_NAS_ACTIVE_BAND_GSM_850), + enum_name(QMI_NAS_ACTIVE_BAND_GSM_900_EXTENDED), + enum_name(QMI_NAS_ACTIVE_BAND_GSM_900_PRIMARY), + enum_name(QMI_NAS_ACTIVE_BAND_GSM_900_RAILWAYS), + enum_name(QMI_NAS_ACTIVE_BAND_GSM_DCS_1800), + enum_name(QMI_NAS_ACTIVE_BAND_GSM_PCS_1900), + enum_name(QMI_NAS_ACTIVE_BAND_WCDMA_2100), + enum_name(QMI_NAS_ACTIVE_BAND_WCDMA_PCS_1900), + enum_name(QMI_NAS_ACTIVE_BAND_WCDMA_DCS_1800), + enum_name(QMI_NAS_ACTIVE_BAND_WCDMA_1700_US), + enum_name(QMI_NAS_ACTIVE_BAND_WCDMA_850), + enum_name(QMI_NAS_ACTIVE_BAND_WCDMA_800), + enum_name(QMI_NAS_ACTIVE_BAND_WCDMA_2600), + enum_name(QMI_NAS_ACTIVE_BAND_WCDMA_900), + enum_name(QMI_NAS_ACTIVE_BAND_WCDMA_1700_JAPAN), + enum_name(QMI_NAS_ACTIVE_BAND_WCDMA_1500_JAPAN), + enum_name(QMI_NAS_ACTIVE_BAND_WCDMA_850_JAPAN), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_1), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_2), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_3), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_4), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_5), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_6), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_7), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_8), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_9), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_10), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_11), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_12), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_13), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_14), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_17), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_18), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_19), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_20), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_21), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_23), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_24), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_25), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_26), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_27), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_28), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_29), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_30), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_31), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_32), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_33), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_34), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_35), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_36), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_37), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_38), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_39), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_40), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_41), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_42), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_43), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_46), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_47), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_48), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_66), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_71), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_125), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_126), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_127), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_250), + enum_name(QMI_NAS_ACTIVE_BAND_TDSCDMA_A), + enum_name(QMI_NAS_ACTIVE_BAND_TDSCDMA_B), + enum_name(QMI_NAS_ACTIVE_BAND_TDSCDMA_C), + enum_name(QMI_NAS_ACTIVE_BAND_TDSCDMA_D), + enum_name(QMI_NAS_ACTIVE_BAND_TDSCDMA_E), + enum_name(QMI_NAS_ACTIVE_BAND_TDSCDMA_F), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_1 ), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_2 ), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_3 ), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_5 ), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_7 ), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_8 ), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_20), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_28), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_38), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_41), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_50), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_51), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_66), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_70), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_71), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_74), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_75), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_76), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_77), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_78), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_79), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_80), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_81), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_82), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_83), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_84), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_85), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_257), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_258), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_259), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_260), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_261), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_12), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_25), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_34), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_39), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_40), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_65), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_86), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_48), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_14), +}; + +static const char *qmi_nas_radio_interface_get_string(uint8 radio_if) +{ + const char *str = NULL; + + switch (radio_if) { + case QMI_NAS_RADIO_INTERFACE_CDMA_1X: str = "cdma-1x"; break; + case QMI_NAS_RADIO_INTERFACE_CDMA_1XEVDO: str = "cdma-1xevdo"; break; + case QMI_NAS_RADIO_INTERFACE_AMPS: str = "amps"; break; + case QMI_NAS_RADIO_INTERFACE_GSM: str = "gsm"; break; + case QMI_NAS_RADIO_INTERFACE_UMTS: str = "umts"; break; + case QMI_NAS_RADIO_INTERFACE_LTE: str = "lte"; break; + case QMI_NAS_RADIO_INTERFACE_TD_SCDMA: str = "td-scdma"; break; + case QMI_NAS_RADIO_INTERFACE_5GNR: str = "5gnr"; break; + default: str = NULL; break; + } + + return str ? str : "unknown"; +} + +static const char *qmi_nas_active_band_get_string(uint32 active_band) +{ + size_t i; + + for (i = 0; i < N_ELEMENTS(QMI_NAS_ACTIVE_BAND_NAME); i++) { + if (active_band == QMI_NAS_ACTIVE_BAND_NAME[i].type) + return QMI_NAS_ACTIVE_BAND_NAME[i].name + strlen("QMI_NAS_ACTIVE_BAND_"); + } + + return "unknown"; +} + +typedef struct { + uint16 min; + uint16 max; + const char *name; +} EarfcnRange; + +/* http://niviuk.free.fr/lte_band.php */ +static const EarfcnRange earfcn_ranges[] = { + { 0, 599, "E-UTRA band 1: 2100" }, + { 600, 1199, "E-UTRA band 2: 1900 PCS" }, + { 1200, 1949, "E-UTRA band 3: 1800+" }, + { 1950, 2399, "E-UTRA band 4: AWS-1" }, + { 2400, 2649, "E-UTRA band 5: 850" }, + { 2650, 2749, "E-UTRA band 6: UMTS only" }, + { 2750, 3449, "E-UTRA band 7: 2600" }, + { 3450, 3799, "E-UTRA band 8: 900" }, + { 3800, 4149, "E-UTRA band 9: 1800" }, + { 4150, 4749, "E-UTRA band 10: AWS-1+" }, + { 4750, 4999, "E-UTRA band 11: 1500 Lower" }, + { 5000, 5179, "E-UTRA band 12: 700 a" }, + { 5180, 5279, "E-UTRA band 13: 700 c" }, + { 5280, 5379, "E-UTRA band 14: 700 PS" }, + { 5730, 5849, "E-UTRA band 17: 700 b" }, + { 5850, 5999, "E-UTRA band 18: 800 Lower" }, + { 6000, 6149, "E-UTRA band 19: 800 Upper" }, + { 6150, 6449, "E-UTRA band 20: 800 DD" }, + { 6450, 6599, "E-UTRA band 21: 1500 Upper" }, + { 6600, 7399, "E-UTRA band 22: 3500" }, + { 7500, 7699, "E-UTRA band 23: 2000 S-band" }, + { 7700, 8039, "E-UTRA band 24: 1600 L-band" }, + { 8040, 8689, "E-UTRA band 25: 1900+" }, + { 8690, 9039, "E-UTRA band 26: 850+" }, + { 9040, 9209, "E-UTRA band 27: 800 SMR" }, + { 9210, 9659, "E-UTRA band 28: 700 APT" }, + { 9660, 9769, "E-UTRA band 29: 700 d" }, + { 9770, 9869, "E-UTRA band 30: 2300 WCS" }, + { 9870, 9919, "E-UTRA band 31: 450" }, + { 9920, 10359, "E-UTRA band 32: 1500 L-band" }, + { 36000, 36199, "E-UTRA band 33: TD 1900" }, + { 36200, 36349, "E-UTRA band 34: TD 2000" }, + { 36350, 36949, "E-UTRA band 35: TD PCS Lower" }, + { 36950, 37549, "E-UTRA band 36: TD PCS Upper" }, + { 37550, 37749, "E-UTRA band 37: TD PCS Center" }, + { 37750, 38249, "E-UTRA band 38: TD 2600" }, + { 38250, 38649, "E-UTRA band 39: TD 1900+" }, + { 38650, 39649, "E-UTRA band 40: TD 2300" }, + { 39650, 41589, "E-UTRA band 41: TD 2500" }, + { 41590, 43589, "E-UTRA band 42: TD 3500" }, + { 43590, 45589, "E-UTRA band 43: TD 3700" }, + { 45590, 46589, "E-UTRA band 44: TD 700" }, +}; + +static const char * earfcn_to_eutra_band_string (uint16 earfcn) +{ + size_t i; + + for (i = 0; i < N_ELEMENTS (earfcn_ranges); i++) { + if (earfcn <= earfcn_ranges[i].max && earfcn >= earfcn_ranges[i].min) + return earfcn_ranges[i].name; + } + + return "unknown"; +} + +static int nas_get_cell_location_info(void) +{ + PQCQMIMSG pRequest; + PQCQMIMSG pResponse; + PQMUX_MSG pMUXMsg; + PQMI_TLV pV; + int err; + int i, j; + + pRequest = ComposeQMUXMsg(QMUX_TYPE_NAS, QMINAS_GET_CELL_LOCATION_INFO_REQ, NULL, NULL); + err = QmiThreadSendQMI(pRequest, &pResponse); + qmi_rsp_check_and_return(); + + pV = (PQMI_TLV)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x2E); + if (pV && pV->TLVLength) { + printf ("5GNR ARFCN: '%u'\n", pV->u32); + } + + { + NasGetCellLocationNr5gServingCell *ptlv = (NasGetCellLocationNr5gServingCell *)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x2F); + if (ptlv && ptlv->TLVLength) + { + printf ("5GNR cell information:\n" + "\tPLMN: '%s'\n" + "\tTracking Area Code: '%u'\n" + "\tGlobal Cell ID: '%" PRIu64 "'\n" + "\tPhysical Cell ID: '%u'\n" + "\tRSRQ: '%.1lf dB'\n" + "\tRSRP: '%.1lf dBm'\n" + "\tSNR: '%.1lf dB'\n", + str_from_bcd_plmn(ptlv->plmn), + ptlv->tac[0]<<16 | ptlv->tac[1]<<8 | ptlv->tac[2] , + ptlv->global_cell_id, + ptlv->physical_cell_id, + (0.1) * ((double)ptlv->rsrq), + (0.1) * ((double)ptlv->rsrp), + (0.1) * ((double)ptlv->snr)); + } + } + + { + NasGetCellLocationLteInfoIntrafrequency *ptlv = (NasGetCellLocationLteInfoIntrafrequency *)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x13); + if (ptlv && ptlv->TLVLength) + { + printf ("Intrafrequency LTE Info:\n" + "\tUE In Idle: '%s'\n" + "\tPLMN: '%s'\n" + "\tTracking Area Code: '%u'\n" + "\tGlobal Cell ID: '%u'\n" + "\tEUTRA Absolute RF Channel Number: '%u' (%s)\n" + "\tServing Cell ID: '%u'\n", + ptlv->ue_in_idle ? "yes" : "no", + str_from_bcd_plmn(ptlv->plmn), + ptlv->tracking_area_code, + ptlv->global_cell_id, + ptlv->absolute_rf_channel_number, earfcn_to_eutra_band_string(ptlv->absolute_rf_channel_number), + ptlv->serving_cell_id); + + if (ptlv->ue_in_idle) + printf ("\tCell Reselection Priority: '%u'\n" + "\tS Non Intra Search Threshold: '%u'\n" + "\tServing Cell Low Threshold: '%u'\n" + "\tS Intra Search Threshold: '%u'\n", + ptlv->cell_reselection_priority, + ptlv->s_non_intra_search_threshold, + ptlv->serving_cell_low_threshold, + ptlv->s_intra_search_threshold); + + + for (i = 0; i < ptlv->cells_len; i++) { + NasGetCellLocationLteInfoCell *cell = &ptlv->cells_array[i]; + + printf ("\tCell [%u]:\n" + "\t\tPhysical Cell ID: '%u'\n" + "\t\tRSRQ: '%.1lf' dB\n" + "\t\tRSRP: '%.1lf' dBm\n" + "\t\tRSSI: '%.1lf' dBm\n", + i, + cell->physical_cell_id, + (double) cell->rsrq * 0.1, + (double) cell->rsrp * 0.1, + (double) cell->rssi * 0.1); + + if (ptlv->ue_in_idle) + printf ("\t\tCell Selection RX Level: '%d'\n", + cell->cell_selection_rx_level); + } + } + } + + { + NasGetCellLocationLteInfoInterfrequency *ptlv = (NasGetCellLocationLteInfoInterfrequency *)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x14); + if (ptlv && ptlv->TLVLength) + { + int off = offsetof(NasGetCellLocationLteInfoInterfrequency, freqs[0]); + printf ("Interfrequency LTE Info:\n" + "\tUE In Idle: '%s'\n", ptlv->ue_in_idle ? "yes" : "no"); + + for (i = 0; i < ptlv->freqs_len; i++) { + NasGetCellLocationLteInfoInterfrequencyFrequencyElement *freq = (((void *)ptlv) + off); + + off += sizeof(*freq); + printf ("\tFrequency [%u]:\n" + "\t\tEUTRA Absolute RF Channel Number: '%u' (%s)\n" + "\t\tSelection RX Level Low Threshold: '%u'\n" + "\t\tCell Selection RX Level High Threshold: '%u'\n", + i, + freq->eutra_absolute_rf_channel_number, earfcn_to_eutra_band_string(freq->eutra_absolute_rf_channel_number), + freq->cell_selection_rx_level_low_threshold, + freq->cell_selection_rx_level_high_threshold); + if (ptlv->ue_in_idle) + printf ("\t\tCell Reselection Priority: '%u'\n", + freq->cell_reselection_priority); + + + for (j = 0; j < freq->cells_len; j++) { + NasGetCellLocationLteInfoCell *cell = &freq->cells_array[j]; + + off += sizeof(*cell); + printf ("\t\tCell [%u]:\n" + "\t\t\tPhysical Cell ID: '%u'\n" + "\t\t\tRSRQ: '%.1lf' dB\n" + "\t\t\tRSRP: '%.1lf' dBm\n" + "\t\t\tRSSI: '%.1lf' dBm\n" + "\t\t\tCell Selection RX Level: '%u'\n", + j, + cell->physical_cell_id, + (double) cell->rsrq * 0.1, + (double) cell->rsrp * 0.1, + (double) cell->rssi * 0.1, + cell->cell_selection_rx_level); + } + } + } + } + + pV = (PQMI_TLV)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x1E); + if (pV && pV->TLVLength) { + if (pV->u32 == 0xFFFFFFFF) + printf ("LTE Timing Advance: 'unavailable'\n"); + else + printf ("LTE Timing Advance: '%u'\n", pV->u32); + } + + free(pResponse); + return 0; +} + +static int nas_get_rf_band_information(void) +{ + PQCQMIMSG pRequest; + PQCQMIMSG pResponse; + PQMUX_MSG pMUXMsg; + int err; + int i; + + pRequest = ComposeQMUXMsg(QMUX_TYPE_NAS, QMINAS_GET_RF_BAND_INFO_REQ, NULL, NULL); + err = QmiThreadSendQMI(pRequest, &pResponse); + qmi_rsp_check_and_return(); + + { + NasGetRfBandInfoList *ptlv = (NasGetRfBandInfoList *)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x01); + if (ptlv && ptlv->TLVLength) + { + printf ("Band Information:\n"); + for (i = 0; i < ptlv->num_instances; i++) { + NasGetRfBandInfo *band = &ptlv->bands_array[i]; + + printf ("\tRadio Interface: '%s'\n" + "\tActive Band Class: '%s'\n" + "\tActive Channel: '%u'\n", + qmi_nas_radio_interface_get_string (band->radio_if), + qmi_nas_active_band_get_string (band->active_band), + band->active_channel); + } + } + } + + { + NasGetRfBandInfoExtendedList *ptlv = (NasGetRfBandInfoExtendedList *)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x11); + if (ptlv && ptlv->TLVLength) + { + printf ("Band Information (Extended):\n"); + for (i = 0; i < ptlv->num_instances; i++) { + NasGetRfBandInfoExtended *band = &ptlv->bands_array[i]; + + printf ("\tRadio Interface: '%s'\n" + "\tActive Band Class: '%s'\n" + "\tActive Channel: '%u'\n", + qmi_nas_radio_interface_get_string (band->radio_if), + qmi_nas_active_band_get_string (band->active_band), + band->active_channel); + } + } + } + + { + NasGetRfBandInfoBandWidthList *ptlv = (NasGetRfBandInfoBandWidthList *)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x12); + if (ptlv && ptlv->TLVLength) + { + printf ("Bandwidth:\n"); + for (i = 0; i < ptlv->num_instances; i++) { + NasGetRfBandInfoBandWidth *band = &ptlv->bands_array[i]; + + printf ("\tRadio Interface: '%s'\n" + "\tBandwidth: '%u'\n", + qmi_nas_radio_interface_get_string (band->radio_if), + (band->bandwidth)); + } + } + } + + free(pResponse); + return 0; +} +#endif diff --git a/package/wwan/driver/quectel_cm_5G/src/QMIThread.h b/package/wwan/driver/quectel_cm_5G/src/QMIThread.h new file mode 100644 index 000000000..01f84b76b --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/QMIThread.h @@ -0,0 +1,420 @@ +#ifndef __QMI_THREAD_H__ +#define __QMI_THREAD_H__ + +#define CONFIG_GOBINET +#define CONFIG_QMIWWAN +#define CONFIG_SIM +#define CONFIG_APN +#define CONFIG_VERSION +//#define CONFIG_SIGNALINFO +//#define CONFIG_CELLINFO +//#define CONFIG_COEX_WWAN_STATE +#define CONFIG_DEFAULT_PDP 1 +//#define CONFIG_IMSI_ICCID +#define QUECTEL_UL_DATA_AGG +//#define QUECTEL_QMI_MERGE +//#define REBOOT_SIM_CARD_WHEN_APN_CHANGE +//#define REBOOT_SIM_CARD_WHEN_LONG_TIME_NO_PS 60 //unit is seconds +//#define CONFIG_QRTR +//#define CONFIG_ENABLE_QOS +//#define CONFIG_REG_QOS_IND +//#define CONFIG_GET_QOS_INFO +//#define CONFIG_GET_QOS_DATA_RATE + +#if (defined(CONFIG_REG_QOS_IND) || defined(CONFIG_GET_QOS_INFO) || defined(CONFIG_GET_QOS_DATA_RATE)) +#ifndef CONFIG_REG_QOS_IND +#define CONFIG_REG_QOS_IND +#endif +#ifndef CONFIG_ENABLE_QOS +#define CONFIG_ENABLE_QOS +#endif +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "qendian.h" +#include "MPQMI.h" +#include "MPQCTL.h" +#include "MPQMUX.h" +#include "util.h" + +#define DEVICE_CLASS_UNKNOWN 0 +#define DEVICE_CLASS_CDMA 1 +#define DEVICE_CLASS_GSM 2 + +#define WWAN_DATA_CLASS_NONE 0x00000000 +#define WWAN_DATA_CLASS_GPRS 0x00000001 +#define WWAN_DATA_CLASS_EDGE 0x00000002 /* EGPRS */ +#define WWAN_DATA_CLASS_UMTS 0x00000004 +#define WWAN_DATA_CLASS_HSDPA 0x00000008 +#define WWAN_DATA_CLASS_HSUPA 0x00000010 +#define WWAN_DATA_CLASS_LTE 0x00000020 +#define WWAN_DATA_CLASS_5G_NSA 0x00000040 +#define WWAN_DATA_CLASS_5G_SA 0x00000080 +#define WWAN_DATA_CLASS_1XRTT 0x00010000 +#define WWAN_DATA_CLASS_1XEVDO 0x00020000 +#define WWAN_DATA_CLASS_1XEVDO_REVA 0x00040000 +#define WWAN_DATA_CLASS_1XEVDV 0x00080000 +#define WWAN_DATA_CLASS_3XRTT 0x00100000 +#define WWAN_DATA_CLASS_1XEVDO_REVB 0x00200000 /* for future use */ +#define WWAN_DATA_CLASS_UMB 0x00400000 +#define WWAN_DATA_CLASS_CUSTOM 0x80000000 + +struct wwan_data_class_str { + ULONG class; + const char *str; +}; + +#pragma pack(push, 1) +typedef struct __IPV4 { + uint32_t Address; + uint32_t Gateway; + uint32_t SubnetMask; + uint32_t DnsPrimary; + uint32_t DnsSecondary; + uint32_t Mtu; +} IPV4_T; + +typedef struct __IPV6 { + UCHAR Address[16]; + UCHAR Gateway[16]; + UCHAR SubnetMask[16]; + UCHAR DnsPrimary[16]; + UCHAR DnsSecondary[16]; + UCHAR PrefixLengthIPAddr; + UCHAR PrefixLengthGateway; + ULONG Mtu; +} IPV6_T; + +typedef struct { + UINT size; + UINT rx_urb_size; + UINT ep_type; + UINT iface_id; + UINT MuxId; + UINT ul_data_aggregation_max_datagrams; //0x17 + UINT ul_data_aggregation_max_size ;//0x18 + UINT dl_minimum_padding; //0x1A +} QMAP_SETTING; + +//Configured downlink data aggregationprotocol +#define WDA_DL_DATA_AGG_DISABLED (0x00) //DL data aggregation is disabled (default) +#define WDA_DL_DATA_AGG_TLP_ENABLED (0x01) // DL TLP is enabled +#define WDA_DL_DATA_AGG_QC_NCM_ENABLED (0x02) // DL QC_NCM isenabled +#define WDA_DL_DATA_AGG_MBIM_ENABLED (0x03) // DL MBIM isenabled +#define WDA_DL_DATA_AGG_RNDIS_ENABLED (0x04) // DL RNDIS is enabled +#define WDA_DL_DATA_AGG_QMAP_ENABLED (0x05) // DL QMAP isenabled +#define WDA_DL_DATA_AGG_QMAP_V2_ENABLED (0x06) // DL QMAP V2 is enabled +#define WDA_DL_DATA_AGG_QMAP_V3_ENABLED (0x07) // DL QMAP V3 is enabled +#define WDA_DL_DATA_AGG_QMAP_V4_ENABLED (0x08) // DL QMAP V4 is enabled +#define WDA_DL_DATA_AGG_QMAP_V5_ENABLED (0x09) // DL QMAP V5 is enabled + +typedef struct { + unsigned int size; + unsigned int rx_urb_size; + unsigned int ep_type; + unsigned int iface_id; + unsigned int qmap_mode; + unsigned int qmap_version; + unsigned int dl_minimum_padding; + char ifname[8][16]; + unsigned char mux_id[8]; +} RMNET_INFO; + +#define IpFamilyV4 (0x04) +#define IpFamilyV6 (0x06) + +struct __PROFILE; +struct qmi_device_ops { + int (*init)(struct __PROFILE *profile); + int (*deinit)(void); + int (*send)(PQCQMIMSG pRequest); + void* (*read)(void *pData); +}; +#ifdef CONFIG_QRTR +extern const struct qmi_device_ops qrtr_qmidev_ops; +#endif +extern const struct qmi_device_ops gobi_qmidev_ops; +extern const struct qmi_device_ops qmiwwan_qmidev_ops; +extern const struct qmi_device_ops mbim_dev_ops; +extern const struct qmi_device_ops atc_dev_ops; +extern int (*qmidev_send)(PQCQMIMSG pRequest); + +struct usb_device_info { + int idVendor; + int idProduct; + int busnum; + int devnum; + int bNumInterfaces; +}; + +struct usb_interface_info { + int bNumEndpoints; + int bInterfaceClass; + int bInterfaceSubClass; + int bInterfaceProtocol; + char driver[32]; +}; + +#define LIBQMI_PROXY "qmi-proxy" //src/libqmi-glib/qmi-proxy.h +#define LIBMBIM_PROXY "mbim-proxy" +#define QUECTEL_QMI_PROXY "quectel-qmi-proxy" +#define QUECTEL_MBIM_PROXY "quectel-mbim-proxy" +#define QUECTEL_ATC_PROXY "quectel-atc-proxy" +#define QUECTEL_QRTR_PROXY "quectel-qrtr-proxy" + +#ifndef bool +#define bool uint8_t +#endif +struct request_ops; +typedef struct __PROFILE { + //user input start + const char *apn; + const char *user; + const char *password; + int auth; + int iptype; + const char *pincode; + char proxy[32]; + int pdp; + int enable_bridge; + bool enable_ipv4; + bool enable_ipv6; + const char *logfile; + const char *usblogfile; + char expect_adapter[32]; + int kill_pdp; + int replication_factor; + //user input end + + char qmichannel[32]; + char usbnet_adapter[32]; + char qmapnet_adapter[32]; + char driver_name[32]; + int qmap_mode; + int qmap_size; + int qmap_version; + int curIpFamily; + int rawIP; + int muxid; +#ifdef CONFIG_ENABLE_QOS + UINT qos_id; +#endif + int wda_client; + IPV4_T ipv4; + IPV6_T ipv6; + UINT PCSCFIpv4Addr1; + UINT PCSCFIpv4Addr2; + UCHAR PCSCFIpv6Addr1[16]; + UCHAR PCSCFIpv6Addr2[16]; + bool reattach_flag; + int hardware_interface; + int software_interface; + + struct usb_device_info usb_dev; + struct usb_interface_info usb_intf; + + int usbmon_fd; + FILE *usbmon_logfile_fp; + bool loopback_state; + + char BaseBandVersion[64]; + char old_apn[64]; + char old_user[64]; + char old_password[64]; + int old_auth; + int old_iptype; + + const struct qmi_device_ops *qmi_ops; + const struct request_ops *request_ops; + RMNET_INFO rmnet_info; +} PROFILE_T; + +#ifdef QUECTEL_QMI_MERGE +#define MERGE_PACKET_IDENTITY 0x2c7c +#define MERGE_PACKET_VERSION 0x0001 +#define MERGE_PACKET_MAX_PAYLOAD_SIZE 56 +typedef struct __QMI_MSG_HEADER { + uint16_t idenity; + uint16_t version; + uint16_t cur_len; + uint16_t total_len; +} QMI_MSG_HEADER; + +typedef struct __QMI_MSG_PACKET { + QMI_MSG_HEADER header; + uint16_t len; + char buf[4096]; +} QMI_MSG_PACKET; +#endif + +typedef enum { + SIM_ABSENT = 0, + SIM_NOT_READY = 1, + SIM_READY = 2, /* SIM_READY means the radio state is RADIO_STATE_SIM_READY */ + SIM_PIN = 3, + SIM_PUK = 4, + SIM_NETWORK_PERSONALIZATION = 5, + SIM_BAD = 6, +} SIM_Status; + +#pragma pack(pop) + +#define WDM_DEFAULT_BUFSIZE 256 +#define RIL_REQUEST_QUIT 0x1000 +#define RIL_INDICATE_DEVICE_CONNECTED 0x1002 +#define RIL_INDICATE_DEVICE_DISCONNECTED 0x1003 +#define RIL_UNSOL_RESPONSE_VOICE_NETWORK_STATE_CHANGED 0x1004 +#define RIL_UNSOL_DATA_CALL_LIST_CHANGED 0x1005 +#define MODEM_REPORT_RESET_EVENT 0x1006 +#define RIL_UNSOL_LOOPBACK_CONFIG_IND 0x1007 +#ifdef CONFIG_REG_QOS_IND +#define RIL_UNSOL_GLOBAL_QOS_FLOW_IND_QOS_ID 0x1008 +#endif + +extern pthread_mutex_t cm_command_mutex; +extern pthread_cond_t cm_command_cond; +extern unsigned int cm_recv_buf[1024]; +extern int cm_open_dev(const char *dev); +extern int cm_open_proxy(const char *name); +extern int pthread_cond_timeout_np(pthread_cond_t *cond, pthread_mutex_t * mutex, unsigned msecs); +extern int QmiThreadSendQMITimeout(PQCQMIMSG pRequest, PQCQMIMSG *ppResponse, unsigned msecs, const char *funcname); +#define QmiThreadSendQMI(pRequest, ppResponse) QmiThreadSendQMITimeout(pRequest, ppResponse, 30 * 1000, __func__) +extern void QmiThreadRecvQMI(PQCQMIMSG pResponse); +extern void udhcpc_start(PROFILE_T *profile); +extern void udhcpc_stop(PROFILE_T *profile); +extern void ql_set_driver_link_state(PROFILE_T *profile, int link_state); +extern void ql_set_driver_qmap_setting(PROFILE_T *profile, QMAP_SETTING *qmap_settings); +extern void ql_get_driver_rmnet_info(PROFILE_T *profile, RMNET_INFO *rmnet_info); +extern void dump_qmi(void *dataBuffer, int dataLen); +extern void qmidevice_send_event_to_main(int triger_event); +extern void qmidevice_send_event_to_main_ext(int triger_event, void *data, unsigned len); +extern uint8_t qmi_over_mbim_get_client_id(uint8_t QMIType); +extern uint8_t qmi_over_mbim_release_client_id(uint8_t QMIType, uint8_t ClientId); +#ifdef CONFIG_REG_QOS_IND +extern UCHAR ql_get_global_qos_flow_ind_qos_id(PQCQMIMSG pResponse, UINT *qos_id); +#endif +#ifdef CONFIG_GET_QOS_DATA_RATE +extern UCHAR ql_get_global_qos_flow_ind_data_rate(PQCQMIMSG pResponse, void *max_data_rate); +#endif + +struct request_ops { + int (*requestBaseBandVersion)(PROFILE_T *profile); + int (*requestSetEthMode)(PROFILE_T *profile); + int (*requestSetLoopBackState)(UCHAR loopback_state, ULONG replication_factor); + int (*requestGetSIMStatus)(SIM_Status *pSIMStatus); + int (*requestEnterSimPin)(const char *pPinCode); + int (*requestSetProfile)(PROFILE_T *profile); // 1 ~ success and apn change, 0 ~ success and no apn change, -1 ~ fail + int (*requestGetProfile)(PROFILE_T *profile); + int (*requestRegistrationState)(UCHAR *pPSAttachedState); + int (*requestSetupDataCall)(PROFILE_T *profile, int curIpFamily); + int (*requestQueryDataCall)(UCHAR *pConnectionStatus, int curIpFamily); + int (*requestDeactivateDefaultPDP)(PROFILE_T *profile, int curIpFamily); + int (*requestGetIPAddress)(PROFILE_T *profile, int curIpFamily); + int (*requestGetSignalInfo)(void); + int (*requestGetCellInfoList)(void); + int (*requestGetICCID)(void); + int (*requestGetIMSI)(void); + int (*requestRadioPower)(int state); + int (*requestRegisterQos)(PROFILE_T *profile); + int (*requestGetQosInfo)(PROFILE_T *profile); + int (*requestGetCoexWWANState)(void); +}; +extern const struct request_ops qmi_request_ops; +extern const struct request_ops mbim_request_ops; +extern const struct request_ops atc_request_ops; + +extern int get_driver_type(PROFILE_T *profile); +extern BOOL qmidevice_detect(char *qmichannel, char *usbnet_adapter, unsigned bufsize, PROFILE_T *profile); +int mhidevice_detect(char *qmichannel, char *usbnet_adapter, PROFILE_T *profile); +int atdevice_detect(char *atchannel, char *usbnet_adapter, PROFILE_T *profile); +extern int ql_bridge_mode_detect(PROFILE_T *profile); +extern int ql_enable_qmi_wwan_rawip_mode(PROFILE_T *profile); +extern int ql_qmap_mode_detect(PROFILE_T *profile); +#ifdef CONFIG_QRTR +extern int rtrmnet_ctl_create_vnd(char *devname, char *vndname, uint8_t muxid, + uint32_t qmap_version, uint32_t ul_agg_cnt, uint32_t ul_agg_size); +#endif + +#define qmidev_is_gobinet(_qmichannel) (strncmp(_qmichannel, "/dev/qcqmi", strlen("/dev/qcqmi")) == 0) +#define qmidev_is_qmiwwan(_qmichannel) (strncmp(_qmichannel, "/dev/cdc-wdm", strlen("/dev/cdc-wdm")) == 0) +#define qmidev_is_pciemhi(_qmichannel) (strncmp(_qmichannel, "/dev/mhi_", strlen("/dev/mhi_")) == 0) + +#define driver_is_qmi(_drv_name) (strncasecmp(_drv_name, "qmi_wwan", strlen("qmi_wwan")) == 0) +#define driver_is_mbim(_drv_name) (strncasecmp(_drv_name, "cdc_mbim", strlen("cdc_mbim")) == 0) + +extern FILE *logfilefp; +extern int debug_qmi; +extern int qmidevice_control_fd[2]; +extern int g_donot_exit_when_modem_hangup; +extern void update_resolv_conf(int iptype, const char *ifname, const char *dns1, const char *dns2); +void update_ipv4_address(const char *ifname, const char *ip, const char *gw, unsigned prefix); +void update_ipv6_address(const char *ifname, const char *ip, const char *gw, unsigned prefix); +int reattach_driver(PROFILE_T *profile); +extern void no_trunc_strncpy(char *dest, const char *src, size_t dest_size); + +enum +{ + DRV_INVALID, + SOFTWARE_QMI, + SOFTWARE_MBIM, + SOFTWARE_ECM_RNDIS_NCM, + SOFTWARE_QRTR, + HARDWARE_PCIE, + HARDWARE_USB, +}; + +enum +{ + SIG_EVENT_START, + SIG_EVENT_CHECK, + SIG_EVENT_STOP, +}; + +typedef enum +{ + DMS_OP_MODE_ONLINE, + DMS_OP_MODE_LOW_POWER, + DMS_OP_MODE_FACTORY_TEST_MODE, + DMS_OP_MODE_OFFLINE, + DMS_OP_MODE_RESETTING, + DMS_OP_MODE_SHUTTING_DOWN, + DMS_OP_MODE_PERSISTENT_LOW_POWER, + DMS_OP_MODE_MODE_ONLY_LOW_POWER, + DMS_OP_MODE_NET_TEST_GW, +}Device_operating_mode; + +#ifdef CM_DEBUG +#define dbg_time(fmt, args...) do { \ + fprintf(stdout, "[%15s-%04d: %s] " fmt "\n", __FILE__, __LINE__, get_time(), ##args); \ + fflush(stdout);\ + if (logfilefp) fprintf(logfilefp, "[%s-%04d: %s] " fmt "\n", __FILE__, __LINE__, get_time(), ##args); \ +} while(0) +#else +#define dbg_time(fmt, args...) do { \ + fprintf(stdout, "[%s] " fmt "\n", get_time(), ##args); \ + fflush(stdout);\ + if (logfilefp) fprintf(logfilefp, "[%s] " fmt "\n", get_time(), ##args); \ +} while(0) +#endif +#endif diff --git a/package/wwan/driver/quectel_cm_5G/src/QmiWwanCM.c b/package/wwan/driver/quectel_cm_5G/src/QmiWwanCM.c new file mode 100644 index 000000000..99f96b679 --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/QmiWwanCM.c @@ -0,0 +1,459 @@ +/****************************************************************************** + @file QmiWwanCM.c + @brief QMI WWAN connectivity manager. + + DESCRIPTION + Connectivity Management Tool for USB network adapter of Quectel wireless cellular modules. + + INITIALIZATION AND SEQUENCING REQUIREMENTS + None. + + --------------------------------------------------------------------------- + Copyright (c) 2016 - 2020 Quectel Wireless Solution, Co., Ltd. All Rights Reserved. + Quectel Wireless Solution Proprietary and Confidential. + --------------------------------------------------------------------------- +******************************************************************************/ + +#include +#include +#include +#include +#include +#include "QMIThread.h" + +#ifdef CONFIG_QMIWWAN +static int cdc_wdm_fd = -1; +static UCHAR qmiclientId[QMUX_TYPE_ALL]; + +static UCHAR GetQCTLTransactionId(void) { + static int TransactionId = 0; + if (++TransactionId > 0xFF) + TransactionId = 1; + return TransactionId; +} + +typedef USHORT (*CUSTOMQCTL)(PQMICTL_MSG pCTLMsg, void *arg); + +static PQCQMIMSG ComposeQCTLMsg(USHORT QMICTLType, CUSTOMQCTL customQctlMsgFunction, void *arg) { + UCHAR QMIBuf[WDM_DEFAULT_BUFSIZE]; + PQCQMIMSG pRequest = (PQCQMIMSG)QMIBuf; + int Length; + + pRequest->QMIHdr.IFType = USB_CTL_MSG_TYPE_QMI; + pRequest->QMIHdr.CtlFlags = 0x00; + pRequest->QMIHdr.QMIType = QMUX_TYPE_CTL; + pRequest->QMIHdr.ClientId= 0x00; + + pRequest->CTLMsg.QMICTLMsgHdr.CtlFlags = QMICTL_FLAG_REQUEST; + pRequest->CTLMsg.QMICTLMsgHdr.TransactionId = GetQCTLTransactionId(); + pRequest->CTLMsg.QMICTLMsgHdr.QMICTLType = cpu_to_le16(QMICTLType); + if (customQctlMsgFunction) + pRequest->CTLMsg.QMICTLMsgHdr.Length = cpu_to_le16(customQctlMsgFunction(&pRequest->CTLMsg, arg) - sizeof(QCQMICTL_MSG_HDR)); + else + pRequest->CTLMsg.QMICTLMsgHdr.Length = cpu_to_le16(0x0000); + + pRequest->QMIHdr.Length = cpu_to_le16(le16_to_cpu(pRequest->CTLMsg.QMICTLMsgHdr.Length) + sizeof(QCQMICTL_MSG_HDR) + sizeof(QCQMI_HDR) - 1); + Length = le16_to_cpu(pRequest->QMIHdr.Length) + 1; + + pRequest = (PQCQMIMSG)malloc(Length); + if (pRequest == NULL) { + dbg_time("%s fail to malloc", __func__); + } else { + memcpy(pRequest, QMIBuf, Length); + } + + return pRequest; +} + +static USHORT CtlGetVersionReq(PQMICTL_MSG QCTLMsg, void *arg) +{ + (void)arg; + QCTLMsg->GetVersionReq.TLVType = QCTLV_TYPE_REQUIRED_PARAMETER; + QCTLMsg->GetVersionReq.TLVLength = cpu_to_le16(0x0001); + QCTLMsg->GetVersionReq.QMUXTypes = QMUX_TYPE_ALL; + return sizeof(QMICTL_GET_VERSION_REQ_MSG); +} + +static USHORT CtlGetClientIdReq(PQMICTL_MSG QCTLMsg, void *arg) { + QCTLMsg->GetClientIdReq.TLVType = QCTLV_TYPE_REQUIRED_PARAMETER; + QCTLMsg->GetClientIdReq.TLVLength = cpu_to_le16(0x0001); + QCTLMsg->GetClientIdReq.QMIType = ((UCHAR *)arg)[0]; + return sizeof(QMICTL_GET_CLIENT_ID_REQ_MSG); +} + +static USHORT CtlReleaseClientIdReq(PQMICTL_MSG QCTLMsg, void *arg) { + QCTLMsg->ReleaseClientIdReq.TLVType = QCTLV_TYPE_REQUIRED_PARAMETER; + QCTLMsg->ReleaseClientIdReq.TLVLength = cpu_to_le16(0x0002); + QCTLMsg->ReleaseClientIdReq.QMIType = ((UCHAR *)arg)[0]; + QCTLMsg->ReleaseClientIdReq.ClientId = ((UCHAR *)arg)[1] ; + return sizeof(QMICTL_RELEASE_CLIENT_ID_REQ_MSG); +} + +static USHORT CtlLibQmiProxyOpenReq(PQMICTL_MSG QCTLMsg, void *arg) +{ + (void)arg; + const char *device_path = (const char *)(arg); + QCTLMsg->LibQmiProxyOpenReq.TLVType = 0x01; + QCTLMsg->LibQmiProxyOpenReq.TLVLength = cpu_to_le16(strlen(device_path)); + //strcpy(QCTLMsg->LibQmiProxyOpenReq.device_path, device_path); + //__builtin___strcpy_chk + memcpy(QCTLMsg->LibQmiProxyOpenReq.device_path, device_path, strlen(device_path)); + return sizeof(QMICTL_LIBQMI_PROXY_OPEN_MSG) + (strlen(device_path)); +} + +static int libqmi_proxy_open(const char *cdc_wdm) { + int ret; + PQCQMIMSG pResponse; + + ret = QmiThreadSendQMI(ComposeQCTLMsg(QMI_MESSAGE_CTL_INTERNAL_PROXY_OPEN, + CtlLibQmiProxyOpenReq, (void *)cdc_wdm), &pResponse); + if (!ret && pResponse + && pResponse->CTLMsg.QMICTLMsgHdrRsp.QMUXResult == 0 + && pResponse->CTLMsg.QMICTLMsgHdrRsp.QMUXError == 0) { + ret = 0; + } + else { + return -1; + } + + if (pResponse) + free(pResponse); + + return ret; +} + +static int QmiWwanSendQMI(PQCQMIMSG pRequest) { + struct pollfd pollfds[]= {{cdc_wdm_fd, POLLOUT, 0}}; + int ret; + + if (cdc_wdm_fd == -1) { + dbg_time("%s cdc_wdm_fd = -1", __func__); + return -ENODEV; + } + + if (pRequest->QMIHdr.QMIType != QMUX_TYPE_CTL) { + pRequest->QMIHdr.ClientId = qmiclientId[pRequest->QMIHdr.QMIType]; + if (pRequest->QMIHdr.ClientId == 0) { + dbg_time("QMIType %d has no clientID", pRequest->QMIHdr.QMIType); + return -ENODEV; + } + + if (pRequest->QMIHdr.QMIType == QMUX_TYPE_WDS_IPV6) + pRequest->QMIHdr.QMIType = QMUX_TYPE_WDS; + } + + do { + ret = poll(pollfds, sizeof(pollfds)/sizeof(pollfds[0]), 5000); + } while ((ret < 0) && (errno == EINTR)); + + if (pollfds[0].revents & POLLOUT) { + ssize_t nwrites = le16_to_cpu(pRequest->QMIHdr.Length) + 1; + ret = write(cdc_wdm_fd, pRequest, nwrites); + if (ret == nwrites) { + ret = 0; + } else { + dbg_time("%s write=%d, errno: %d (%s)", __func__, ret, errno, strerror(errno)); + } + } else { + dbg_time("%s poll=%d, revents = 0x%x, errno: %d (%s)", __func__, ret, pollfds[0].revents, errno, strerror(errno)); + } + + return ret; +} + +static UCHAR QmiWwanGetClientID(UCHAR QMIType) { + PQCQMIMSG pResponse; + + QmiThreadSendQMI(ComposeQCTLMsg(QMICTL_GET_CLIENT_ID_REQ, CtlGetClientIdReq, &QMIType), &pResponse); + + if (pResponse) { + USHORT QMUXResult = cpu_to_le16(pResponse->CTLMsg.QMICTLMsgHdrRsp.QMUXResult); // QMI_RESULT_SUCCESS + USHORT QMUXError = cpu_to_le16(pResponse->CTLMsg.QMICTLMsgHdrRsp.QMUXError); // QMI_ERR_INVALID_ARG + //UCHAR QMIType = pResponse->CTLMsg.GetClientIdRsp.QMIType; + UCHAR ClientId = pResponse->CTLMsg.GetClientIdRsp.ClientId; + + if (!QMUXResult && !QMUXError && (QMIType == pResponse->CTLMsg.GetClientIdRsp.QMIType)) { + switch (QMIType) { + case QMUX_TYPE_WDS: dbg_time("Get clientWDS = %d", ClientId); break; + case QMUX_TYPE_DMS: dbg_time("Get clientDMS = %d", ClientId); break; + case QMUX_TYPE_NAS: dbg_time("Get clientNAS = %d", ClientId); break; + case QMUX_TYPE_QOS: dbg_time("Get clientQOS = %d", ClientId); break; + case QMUX_TYPE_WMS: dbg_time("Get clientWMS = %d", ClientId); break; + case QMUX_TYPE_PDS: dbg_time("Get clientPDS = %d", ClientId); break; + case QMUX_TYPE_UIM: dbg_time("Get clientUIM = %d", ClientId); break; + case QMUX_TYPE_COEX: dbg_time("Get clientCOEX = %d", ClientId); break; + case QMUX_TYPE_WDS_ADMIN: dbg_time("Get clientWDA = %d", ClientId); + break; + default: break; + } + return ClientId; + } + } + return 0; +} + +static int QmiWwanReleaseClientID(QMI_SERVICE_TYPE QMIType, UCHAR ClientId) { + UCHAR argv[] = {QMIType, ClientId}; + QmiThreadSendQMI(ComposeQCTLMsg(QMICTL_RELEASE_CLIENT_ID_REQ, CtlReleaseClientIdReq, argv), NULL); + return 0; +} + +static int QmiWwanInit(PROFILE_T *profile) { + unsigned i; + int ret; + PQCQMIMSG pResponse; + + if (profile->proxy[0] && !strcmp(profile->proxy, LIBQMI_PROXY)) { + ret = libqmi_proxy_open(profile->qmichannel); + if (ret) + return ret; + } + + if (!profile->proxy[0]) { + for (i = 0; i < 10; i++) { + ret = QmiThreadSendQMITimeout(ComposeQCTLMsg(QMICTL_SYNC_REQ, NULL, NULL), NULL, 1 * 1000, __func__); + if (!ret) + break; + sleep(1); + } + if (ret) + return ret; + } + + QmiThreadSendQMI(ComposeQCTLMsg(QMICTL_GET_VERSION_REQ, CtlGetVersionReq, NULL), &pResponse); + if (profile->qmap_mode) { + if (pResponse) { + if (pResponse->CTLMsg.QMICTLMsgHdrRsp.QMUXResult == 0 && pResponse->CTLMsg.QMICTLMsgHdrRsp.QMUXError == 0) { + uint8_t NumElements = 0; + + for (NumElements = 0; NumElements < pResponse->CTLMsg.GetVersionRsp.NumElements; NumElements++) { +#if 0 + dbg_time("QMUXType = %02x Version = %d.%d", + pResponse->CTLMsg.GetVersionRsp.TypeVersion[NumElements].QMUXType, + pResponse->CTLMsg.GetVersionRsp.TypeVersion[NumElements].MajorVersion, + pResponse->CTLMsg.GetVersionRsp.TypeVersion[NumElements].MinorVersion); +#endif + if (pResponse->CTLMsg.GetVersionRsp.TypeVersion[NumElements].QMUXType == QMUX_TYPE_WDS_ADMIN) + profile->qmap_version = (pResponse->CTLMsg.GetVersionRsp.TypeVersion[NumElements].MinorVersion > 16); + } + } + } + } + if (pResponse) free(pResponse); + qmiclientId[QMUX_TYPE_WDS] = QmiWwanGetClientID(QMUX_TYPE_WDS); + if (profile->enable_ipv6) + qmiclientId[QMUX_TYPE_WDS_IPV6] = QmiWwanGetClientID(QMUX_TYPE_WDS); + qmiclientId[QMUX_TYPE_DMS] = QmiWwanGetClientID(QMUX_TYPE_DMS); + qmiclientId[QMUX_TYPE_NAS] = QmiWwanGetClientID(QMUX_TYPE_NAS); + qmiclientId[QMUX_TYPE_UIM] = QmiWwanGetClientID(QMUX_TYPE_UIM); + qmiclientId[QMUX_TYPE_WDS_ADMIN] = QmiWwanGetClientID(QMUX_TYPE_WDS_ADMIN); +#ifdef CONFIG_COEX_WWAN_STATE + qmiclientId[QMUX_TYPE_COEX] = QmiWwanGetClientID(QMUX_TYPE_COEX); +#endif +#ifdef CONFIG_ENABLE_QOS + qmiclientId[QMUX_TYPE_QOS] = QmiWwanGetClientID(QMUX_TYPE_QOS); +#endif + profile->wda_client = qmiclientId[QMUX_TYPE_WDS_ADMIN]; + + return 0; +} + +static int QmiWwanDeInit(void) { + unsigned int i; + for (i = 0; i < sizeof(qmiclientId)/sizeof(qmiclientId[0]); i++) + { + if (qmiclientId[i] != 0) + { + QmiWwanReleaseClientID(i, qmiclientId[i]); + qmiclientId[i] = 0; + } + } + + return 0; +} + +static ssize_t qmi_proxy_read (int fd, void *buf, size_t size) { + ssize_t nreads; + PQCQMI_HDR pHdr = (PQCQMI_HDR)buf; + + nreads = read(fd, pHdr, sizeof(QCQMI_HDR)); + if (nreads == sizeof(QCQMI_HDR) && le16_to_cpu(pHdr->Length) < size) { + nreads += read(fd, pHdr+1, le16_to_cpu(pHdr->Length) + 1 - sizeof(QCQMI_HDR)); + } + + return nreads; +} + +#ifdef QUECTEL_QMI_MERGE +static int QmiWwanMergeQmiRsp(void *buf, ssize_t *src_size) { + static QMI_MSG_PACKET s_QMIPacket; + QMI_MSG_HEADER *header = NULL; + ssize_t size = *src_size; + + if((uint16_t)size < sizeof(QMI_MSG_HEADER)) + return -1; + + header = (QMI_MSG_HEADER *)buf; + if(le16_to_cpu(header->idenity) != MERGE_PACKET_IDENTITY || le16_to_cpu(header->version) != MERGE_PACKET_VERSION || le16_to_cpu(header->cur_len) > le16_to_cpu(header->total_len)) + return -1; + + if(le16_to_cpu(header->cur_len) == le16_to_cpu(header->total_len)) { + *src_size = le16_to_cpu(header->total_len); + memcpy(buf, buf + sizeof(QMI_MSG_HEADER), *src_size); + s_QMIPacket.len = 0; + return 0; + } + + memcpy(s_QMIPacket.buf + s_QMIPacket.len, buf + sizeof(QMI_MSG_HEADER), le16_to_cpu(header->cur_len)); + s_QMIPacket.len += le16_to_cpu(header->cur_len); + + if (le16_to_cpu(header->cur_len) < MERGE_PACKET_MAX_PAYLOAD_SIZE || s_QMIPacket.len >= le16_to_cpu(header->total_len)) { + memcpy(buf, s_QMIPacket.buf, s_QMIPacket.len); + *src_size = s_QMIPacket.len; + s_QMIPacket.len = 0; + return 0; + } + + return -1; +} +#endif + +static void * QmiWwanThread(void *pData) { + PROFILE_T *profile = (PROFILE_T *)pData; + const char *cdc_wdm = (const char *)profile->qmichannel; + int wait_for_request_quit = 0; + char num = cdc_wdm[strlen(cdc_wdm)-1]; + + if (profile->proxy[0]) { + if (!strncmp(profile->proxy, QUECTEL_QMI_PROXY, strlen(QUECTEL_QMI_PROXY))) { + snprintf(profile->proxy, sizeof(profile->proxy), "%s%c", QUECTEL_QMI_PROXY, num); + } + } + else if (!strncmp(cdc_wdm, "/dev/mhi_IPCR", strlen("/dev/mhi_IPCR"))) { + snprintf(profile->proxy, sizeof(profile->proxy), "%s%c", QUECTEL_QRTR_PROXY, num); + } + else if (profile->qmap_mode > 1) { + snprintf(profile->proxy, sizeof(profile->proxy), "%s%c", QUECTEL_QMI_PROXY, num); + } + + if (profile->proxy[0]) + cdc_wdm_fd = cm_open_proxy(profile->proxy); + else + cdc_wdm_fd = cm_open_dev(cdc_wdm); + + if (cdc_wdm_fd == -1) { + dbg_time("%s Failed to open %s, errno: %d (%s)", __func__, cdc_wdm, errno, strerror(errno)); + qmidevice_send_event_to_main(RIL_INDICATE_DEVICE_DISCONNECTED); + pthread_exit(NULL); + return NULL; + } + + dbg_time("cdc_wdm_fd = %d", cdc_wdm_fd); + + qmidevice_send_event_to_main(RIL_INDICATE_DEVICE_CONNECTED); + while (1) { + struct pollfd pollfds[] = {{qmidevice_control_fd[1], POLLIN, 0}, {cdc_wdm_fd, POLLIN, 0}}; + int ne, ret, nevents = sizeof(pollfds)/sizeof(pollfds[0]); + + do { + ret = poll(pollfds, nevents, wait_for_request_quit ? 1000 : -1); + } while ((ret < 0) && (errno == EINTR)); + + if (ret == 0 && wait_for_request_quit) { + QmiThreadRecvQMI(NULL); + continue; + } + + if (ret <= 0) { + dbg_time("%s poll=%d, errno: %d (%s)", __func__, ret, errno, strerror(errno)); + break; + } + + for (ne = 0; ne < nevents; ne++) { + int fd = pollfds[ne].fd; + short revents = pollfds[ne].revents; + + //dbg_time("{%d, %x, %x}", pollfds[ne].fd, pollfds[ne].events, pollfds[ne].revents); + + if (revents & (POLLERR | POLLHUP | POLLNVAL)) { + dbg_time("%s poll err/hup/inval", __func__); + dbg_time("poll fd = %d, events = 0x%04x", fd, revents); + if (fd == cdc_wdm_fd) { + } else { + } + if (revents & (POLLHUP | POLLNVAL)) //EC20 bug, Can get POLLERR + goto __QmiWwanThread_quit; + } + + if ((revents & POLLIN) == 0) + continue; + + if (fd == qmidevice_control_fd[1]) { + int triger_event; + if (read(fd, &triger_event, sizeof(triger_event)) == sizeof(triger_event)) { + //DBG("triger_event = 0x%x", triger_event); + switch (triger_event) { + case RIL_REQUEST_QUIT: + goto __QmiWwanThread_quit; + break; + case SIG_EVENT_STOP: + wait_for_request_quit = 1; + break; + default: + break; + } + } + } + + if (fd == cdc_wdm_fd) { + ssize_t nreads; + PQCQMIMSG pResponse = (PQCQMIMSG)cm_recv_buf; + + if (!profile->proxy[0]) + nreads = read(fd, cm_recv_buf, sizeof(cm_recv_buf)); + else + nreads = qmi_proxy_read(fd, cm_recv_buf, sizeof(cm_recv_buf)); + //dbg_time("%s read=%d errno: %d (%s)", __func__, (int)nreads, errno, strerror(errno)); + if (nreads <= 0) { + dbg_time("%s read=%d errno: %d (%s)", __func__, (int)nreads, errno, strerror(errno)); + break; + } +#ifdef QUECTEL_QMI_MERGE + if((profile->qmap_mode == 0 || profile->qmap_mode == 1) && QmiWwanMergeQmiRsp(cm_recv_buf, &nreads)) + continue; +#endif + if (nreads != (le16_to_cpu(pResponse->QMIHdr.Length) + 1)) { + dbg_time("%s nreads=%d, pQCQMI->QMIHdr.Length = %d", __func__, (int)nreads, le16_to_cpu(pResponse->QMIHdr.Length)); + continue; + } + + QmiThreadRecvQMI(pResponse); + } + } + } + +__QmiWwanThread_quit: + if (cdc_wdm_fd != -1) { close(cdc_wdm_fd); cdc_wdm_fd = -1; } + qmidevice_send_event_to_main(RIL_INDICATE_DEVICE_DISCONNECTED); + QmiThreadRecvQMI(NULL); //main thread may pending on QmiThreadSendQMI() + dbg_time("%s exit", __func__); + pthread_exit(NULL); + return NULL; +} + +const struct qmi_device_ops qmiwwan_qmidev_ops = { + .init = QmiWwanInit, + .deinit = QmiWwanDeInit, + .send = QmiWwanSendQMI, + .read = QmiWwanThread, +}; + +uint8_t qmi_over_mbim_get_client_id(uint8_t QMIType) { + return QmiWwanGetClientID(QMIType); +} + +uint8_t qmi_over_mbim_release_client_id(uint8_t QMIType, uint8_t ClientId) { + return QmiWwanReleaseClientID(QMIType, ClientId); +} +#endif + diff --git a/package/wwan/driver/quectel_cm_5G/src/ReleaseNote.txt b/package/wwan/driver/quectel_cm_5G/src/ReleaseNote.txt new file mode 100644 index 000000000..5da5cb766 --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/ReleaseNote.txt @@ -0,0 +1,325 @@ +Release Notes + +[V1.6.4] +Date: 9/7/2022 +enhancement: + 1. set cflags as -Wall -Wextra -Werror -O1, and fix compile errors + 2. some code refactoring + 3. add quectel-qrtr-proxy +fix: + 1. netmask error when use ifconfig on little endian cpu + +[V1.6.2] +Date: 11/18/2021 +enhancement: + 1. support 'LTE && WiFi Coexistence Solution via QMI'. + If customer want use this feature, need enable CONFIG_COEX_WWAN_STATE in QMIThread.h + +[V1.6.1] +Date: 7/20/2021 +enhancement: + 1. add requestGetCellInfoList requestRadioPower + 2. add QMI OVER MBIM + 3. support qrtr and rmnet + 4. support RG500U PCIE + 5. add qos service && get qos flow data_rate_max func +fix: + 1. mbim: increase mbim open timeout to 3 seconds. some modem take long time for open cmd. + 2. support MsChapV2 + 3. mbim: invalid memory access when only get one DNS + 4. some bug fix for use AT Command to setup data call + +[V1.6.0.26] +Date: 4/22/2021 +enhancement: + 1. add lots of log file to show how to use this tool + 2. support pcie mhi multiple call + 3. at command: support EC200U/EC200T/EC200S/RG801H/RG500U/ +fix: + 1. mbim-proxy: fix errors on big endian cpu, ignore mbim open/close cmd from quectel-cm + +[V1.6.0.25] +Date: 4/8/2021 +enhancement: +fix: + 1. fix compile error when use gcc 9.3.0 + 2. fix yocto 'QA Issue: No GNU_HASH in the ELF binary' + +[V1.6.0.24] +Date: 3/9/2021 +enhancement: + 1. '-p [quectel-][qmi|mbim]-proxy', can connect to quectel/libqmi/libmbim's proxy, even only one data + 2. set variable s_9x07 as 1 (from 0), most of modems are base on MDM90x7 and later QCOM chip. +fix: + 1. define CHAR as signed char + 2. mofidy Makefile to generate more compile warnnings and fix them + +[V1.6.0.23] +Date: 2/26/2021 +enhancement: + 1. support 'AT+QNETDEVCTL' (not release) +fix: + 1. modify help/usage + 2. fix some memroy access error in mbim-cm.c + +[V1.6.0.22] +Date: 2/4/2021 +enhancement: + 1. support connect to libqmi's qmi-proxy + 2. only allow ' 0/1/2/none/pap/chap' for auth of '-s' + 3. '-m iface-idx' bind QMAP data call to wwan0_ +fix: + +[V1.6.0.21] +Date: 1/28/2021 +enhancement: + 1. print 5G signal +fix: + 1. fix compile errors: -Werror=format-truncation= + +[V1.6.0.20] +Date: 12/29/2020 +enhancement: + 1. Code refactoring + 2. support 'AT+QNETDEVCTL' (not release) +fix: + +[V1.6.0.19] +Date: 12/4/2020 +enhancement: + 1. if 'udhcpc's default.script' missed, directy set ip/dns/route by 'ip' co,mand +fix: + +[V1.6.0.18] +Date: 12/4/2020 +enhancement: + 1. Code refactoring +fix: + +[V1.6.0.17] +Date: 8/25/2020 +enhancement: + 1. support MBIM multi-call + 2. support unisoc RG500U mbim + 3. QUECTEL_QMI_MERGE: some SOC can not read more then 64 bytes (QMI)data via USB Endpoint 0 +fix: + +[V1.6.0.15] +Date: 7/24/2020 +enhancement: +fix: + 1. QMAP multi-call, AT+CFUN=4 then AT+CFUN=1, only one call can obtain IP by DHCP + +[V1.6.0.14] +Date: 6/10/2020 +enhancement: + 1. support X55's GobiNet LOOPBACK +fix: + 1. very old uclib do not support htole32 and pthread_condattr_setclock + 2. pthread_cond_wait tv_nsec >= 1000000000U is wrong + 3. do not close socket in udhcpc.c ifc_get_addr() + +[V1.6.0.13] +Date: 6/9/2020 +enhancement: + 1. add some example for openwrt, marco 'QL_OPENWER_NETWORK_SETUP' +fix: + +[V1.6.0.12] +Date: 5/29/2020 +enhancement: +fix: + 1. some EM12's usb-net-qmi/mbim interface is at 8 (not 4) + +[V1.6.0.11] +Date: 5/28/2020 +enhancement: +fix: + 1. fix mbim debug on Big Endian CPU + +[V1.6.0.10] +Date: 5/25/2020 +enhancement: +fix: + 1. set QMAP .ul_data_aggregation_max_datagrams to 11 (from 16) + +[V1.6.0.9] +Date: 5/22/2020 +enhancement: +fix: + 1. dial fail when register to 5G-SA + +[V1.6.0.8] +Date: 4/30/2020 +enhancement: + 1. support '-b' to seletc brige mode +fix: + +[V1.6.0.7] +Date: 4/29/2020 +enhancement: + 1. support QMAP multi-call for qmi_wwan_q and pcie_mhi 's rmnet driver +fix: + +[V1.6.0.6] +Date: 4/20/2020 +enhancement: + 1. support '-k pdn_idx' to hangup call '-n pdn_idx' +fix: + 1. fix set dl_minimum_padding as 0, modems do not support this featrue + +[V1.6.0.5] +Date: 4/10/2020 +enhancement: + 1. support X55's QMAPV5 for PCIE +fix: + +[V1.6.0.3] +Date: 4/8/2020 +enhancement: + 1. support multi-modems all use multi-data-calls +fix: + +[V1.6.0.2] +Date: 4/7/2020 +enhancement: + 1. support X55's QMAPV5 for USB +fix: + +[V1.6.0.1] +Date: 4/1/2020 +enhancement: + 1. support QMAP UL AGG (multi data call) +fix: + 1. some EM12's usb-net-qmi/mbim interface is at 8 (not 4) + +[V1.5.9] +Date: 3/4/2020 +enhancement: + 1. support pcie mhi multi-APN data call + 3. support QMAP UL AGG (single data call) +fix: + 1. set 4 bytes aligned for mbim parameters, or cause dial mbim call fail + +[V1.5.8] +Date: 2/18/2020 +enhancement: + 1. support '-l 14' X55's loopback function +fix: + +[V1.5.7] +Date: 2/6/2020 +enhancement: + 1. support '-u usbmon_log_file' to catch usbmon log +fix: + +[V1.5.6] +Date: 1/20/202 +enhancement: + 1. show driver name and version + 2. support PCSCF + 3. support bridge in mbim +fix: + +[V1.5.5] +Date: 12/31/2019 +enhancement: +fix: + 1. fix some memory access bug in mbim-cm.c + +[WCDMA<E_QConnectManager_Linux&Android_V1.5.4] +Date: 12/17/2019 +enhancement: + 1. Add copyright + 2. auto detect pcie mhi /dev/mhi* +fix: + +[WCDMA<E_QConnectManager_Linux&Android_V1.5.3] +Date: 2019/12/11 +enhancement: +1. support show SignalInfo, controlled by macro CONFIG_SIGNALINFO +2. support show 5G_NSA/5G_NA +3. support Microsoft Extend MBIM message +fix: +1. quectel-qmi-proxy bugs on Big-Endian CPU + +[WCDMA<E_QConnectManager_Linux&Android_V1.5.2] +Date: 12/2/2019 +enhancement: + 1. support requestGetSignalInfo() +fix: + +[WCDMA<E_QConnectManager_Linux&Android_V1.4.1] +Date: 10/23/2019 +enhancement: + 1. support QMI_CTL_REVOKE_CLIENT_ID_IND (Quectel define QMI) + 2. add copyright +fix: + 1. remove SIGUSR + +[WCDMA<E_QConnectManager_Linux&Android_V1.3.10] +Date: 10/14/2019 +enhancement: + 1. increase retry interval +fix: + +[WCDMA<E_QConnectManager_Linux&Android_V1.2.1] +Date: 2019/02/26 +enhancement: +1. Implement help message. + +root@ubuntu:# ./quectel-CM -h +[02-26_10:39:21:353] Usage: ./quectel-CM [options] +[02-26_10:39:21:353] -s [apn [user password auth]] Set apn/user/password/auth get from your network provider +[02-26_10:39:21:353] -p pincode Verify sim card pin if sim card is locked +[02-26_10:39:21:353] -f logfilename Save log message of this program to file +[02-26_10:39:21:353] -i interface Specify network interface(default auto-detect) +[02-26_10:39:21:353] -4 IPv4 protocol +[02-26_10:39:21:353] -6 IPv6 protocol +[02-26_10:39:21:353] -m muxID Specify muxid when set multi-pdn data connection. +[02-26_10:39:21:353] -n channelID Specify channelID when set multi-pdn data connection(default 1). +[02-26_10:39:21:353] [Examples] +[02-26_10:39:21:353] Example 1: ./quectel-CM +[02-26_10:39:21:353] Example 2: ./quectel-CM -s 3gnet +[02-26_10:39:21:353] Example 3: ./quectel-CM -s 3gnet carl 1234 0 -p 1234 -f gobinet_log.txt +root@ubuntu:# +2. Support bridge mode when set multi-pdn data connections. +3. Host device can access network in bridge mode. + +[WCDMA<E_QConnectManager_Linux&Android_V1.1.46] +Date: 2019/02/18 +enhancement: +1. support only IPV6 data call. quectel-CM now support three dialing methods: IPV4 only, IPV6 only, IPV4V6. + ./quectel-CM -4(or no argument) only IPV4 + -6 only IPV6 + -4 -6 IPV4 && IPV6 + +[WCDMA<E_QConnectManager_Linux&Android_V1.1.45] +Date: 2018/09/13 +enhancement: +1. support EG12 PCIE interface + +[WCDMA<E_QConnectManager_Linux&Android_V1.1.44] +Date: 2018/09/10 +enhancement: +1. support setup IPV4&IPV6 data call. + +[WCDMA<E_QConnectManager_Linux&Android_V1.1.43] +[WCDMA<E_QConnectManager_Linux&Android_V1.1.42] +Date: 2018/08/29 +enhancement: +1. support QMI_WWAN's QMAP fucntion and bridge mode, please contact Quectel FAE to get qmi_wwan.c patch. + when enable QMI_WWAN's QMAP IP Mux function, must run 'quectel-qmi-proxy -d /dev/cdc-wdmX' before quectel-CM + +[WCDMA<E_QConnectManager_Linux&Android_V1.1.41] +Date: 2018/05/24 +enhancement: +1. fix a cdma data call error + +[WCDMA<E_QConnectManager_Linux&Android_V1.1.40] +Date: 2018/05/12 +enhancement: +1. support GobiNet's QMAP fucntion and bridge mode. + 'Quectel_WCDMA<E_Linux&Android_GobiNet_Driver_V1.3.5' and later version is required to use QMAP and bridge mode. + for detail, please refer to GobiNet Driver + diff --git a/package/wwan/driver/quectel_cm_5G/src/at_tok.c b/package/wwan/driver/quectel_cm_5G/src/at_tok.c new file mode 100644 index 000000000..6736cc839 --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/at_tok.c @@ -0,0 +1,283 @@ +/* //device/system/reference-ril/at_tok.c +** +** Copyright 2006, The Android Open Source Project +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +#include "at_tok.h" +#include +#include +#include +#include +#include + +/** + * Starts tokenizing an AT response string + * returns -1 if this is not a valid response string, 0 on success. + * updates *p_cur with current position + */ +int at_tok_start(char **p_cur) +{ + if (*p_cur == NULL) { + return -1; + } + + // skip prefix + // consume "^[^:]:" + + *p_cur = strchr(*p_cur, ':'); + + if (*p_cur == NULL) { + return -1; + } + + (*p_cur)++; + + return 0; +} + +static void skipWhiteSpace(char **p_cur) +{ + if (*p_cur == NULL) return; + + while (**p_cur != '\0' && isspace(**p_cur)) { + (*p_cur)++; + } +} + +static void skipNextComma(char **p_cur) +{ + if (*p_cur == NULL) return; + + while (**p_cur != '\0' && **p_cur != ',') { + (*p_cur)++; + } + + if (**p_cur == ',') { + (*p_cur)++; + } +} + +static char * nextTok(char **p_cur) +{ + char *ret = NULL; + + skipWhiteSpace(p_cur); + + if (*p_cur == NULL) { + ret = NULL; + } else if (**p_cur == '"') { + (*p_cur)++; + ret = strsep(p_cur, "\""); + skipNextComma(p_cur); + } else { + ret = strsep(p_cur, ","); + } + + return ret; +} + + +/** + * Parses the next integer in the AT response line and places it in *p_out + * returns 0 on success and -1 on fail + * updates *p_cur + * "base" is the same as the base param in strtol + */ + +static int at_tok_nextint_base(char **p_cur, int *p_out, int base, int uns) +{ + char *ret; + + if (*p_cur == NULL) { + return -1; + } + + ret = nextTok(p_cur); + + if (ret == NULL) { + return -1; + } else { + long l; + char *end; + + if (uns) + l = strtoul(ret, &end, base); + else + l = strtol(ret, &end, base); + + *p_out = (int)l; + + if (end == ret) { + return -1; + } + } + + return 0; +} + +/** + * Parses the next base 10 integer in the AT response line + * and places it in *p_out + * returns 0 on success and -1 on fail + * updates *p_cur + */ +int at_tok_nextint(char **p_cur, int *p_out) +{ + return at_tok_nextint_base(p_cur, p_out, 10, 0); +} + +/** + * Parses the next base 16 integer in the AT response line + * and places it in *p_out + * returns 0 on success and -1 on fail + * updates *p_cur + */ +int at_tok_nexthexint(char **p_cur, int *p_out) +{ + return at_tok_nextint_base(p_cur, p_out, 16, 1); +} + +int at_tok_nextbool(char **p_cur, char *p_out) +{ + int ret; + int result; + + ret = at_tok_nextint(p_cur, &result); + + if (ret < 0) { + return -1; + } + + // booleans should be 0 or 1 + if (!(result == 0 || result == 1)) { + return -1; + } + + if (p_out != NULL) { + *p_out = (char)result; + } + + return ret; +} + +int at_tok_nextstr(char **p_cur, char **p_out) +{ + if (*p_cur == NULL) { + return -1; + } + + *p_out = nextTok(p_cur); + + return 0; +} + +/** returns 1 on "has more tokens" and 0 if no */ +int at_tok_hasmore(char **p_cur) +{ + return ! (*p_cur == NULL || **p_cur == '\0'); +} + +int at_tok_count(const char *in_line) +{ + int commas = 0; + const char *p; + + if (!in_line) + return 0; + + for (p = in_line; *p != '\0' ; p++) { + if (*p == ',') commas++; + } + + return commas; +} + +//fmt: d ~ int, x ~ hexint, b ~ bool, s ~ str +int at_tok_scanf(const char *in_line, const char *fmt, ...) +{ + int n = 0; + int err; + va_list ap; + const char *p = fmt; + void *d; + void *dump; + static char s_line[1024]; + char *line = s_line; + + if (!in_line) + return 0; + + strncpy(s_line, in_line, sizeof(s_line) - 1); + + va_start(ap, fmt); + + err = at_tok_start(&line); + if (err < 0) goto error; + + for (; *p; p++) { + if (*p == ',' || *p == ' ') + continue; + + if (*p != '%') { + goto error; + } + p++; + + d = va_arg(ap, void *); + if (!d) + d = &dump; + + if (!at_tok_hasmore(&line)) + break; + + if (*line == '-' && *(line + 1) == ',') { + line += 2; + n++; + if (*p == 'd') + *(int *)d = -1; + continue; + } + + switch(*p) { + case 'd': + err = at_tok_nextint(&line, (int *)d); + if (err < 0) goto error; + break; + case 'x': + err = at_tok_nexthexint(&line, (int *)d); + if (err < 0) goto error; + break; + case 'b': + err = at_tok_nextbool(&line, (char *)d); + if (err < 0) goto error; + break; + case 's': + err = at_tok_nextstr(&line, (char **)d); //if strdup(line), here return free memory to caller + if (err < 0) goto error; + break; + default: + goto error; + break; + } + + n++; + } + + va_end(ap); + +error: + //free(line); + return n; +} diff --git a/package/wwan/driver/quectel_cm_5G/src/at_tok.h b/package/wwan/driver/quectel_cm_5G/src/at_tok.h new file mode 100644 index 000000000..2fcb68346 --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/at_tok.h @@ -0,0 +1,33 @@ +/* //device/system/reference-ril/at_tok.h +** +** Copyright 2006, The Android Open Source Project +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +#ifndef AT_TOK_H +#define AT_TOK_H 1 + +int at_tok_start(char **p_cur); +int at_tok_nextint(char **p_cur, int *p_out); +int at_tok_nexthexint(char **p_cur, int *p_out); + +int at_tok_nextbool(char **p_cur, char *p_out); +int at_tok_nextstr(char **p_cur, char **out); + +int at_tok_hasmore(char **p_cur); +int at_tok_count(const char *in_line); +int at_tok_scanf(const char *line, const char *fmt, ...); + +#endif /*AT_TOK_H */ + diff --git a/package/wwan/driver/quectel_cm_5G/src/atc.c b/package/wwan/driver/quectel_cm_5G/src/atc.c new file mode 100644 index 000000000..3f61c359c --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/atc.c @@ -0,0 +1,1054 @@ +/****************************************************************************** + @file atc.c + @brief at command. + + DESCRIPTION + Connectivity Management Tool for USB network adapter of Quectel wireless cellular modules. + + INITIALIZATION AND SEQUENCING REQUIREMENTS + None. + + --------------------------------------------------------------------------- + Copyright (c) 2016 - 2020 Quectel Wireless Solution, Co., Ltd. All Rights Reserved. + Quectel Wireless Solution Proprietary and Confidential. + --------------------------------------------------------------------------- +******************************************************************************/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +extern int asprintf(char **s, const char *fmt, ...); + +#include "QMIThread.h" + +#include "atchannel.h" +#include "at_tok.h" + +static int asr_style_atc = 0; +static int s_pdp; +#define safe_free(__x) do { if (__x) { free((void *)__x); __x = NULL;}} while(0) +#define safe_at_response_free(__x) { if (__x) { at_response_free(__x); __x = NULL;}} + +#define at_response_error(err, p_response) \ + (err \ + || p_response == NULL \ + || p_response->finalResponse == NULL \ + || p_response->success == 0) + +static int atc_init(PROFILE_T *profile) { + int err; + char *cmd; + ATResponse *p_response = NULL; + + if (profile->proxy[0]) { + s_pdp = profile->pdp; + err = at_send_command_singleline("AT+QNETDEVSTATUS=?", "+QNETDEVSTATUS:", &p_response); + if (at_response_error(err, p_response)) + asr_style_atc = 1; //EC200T/EC100Y do not support this AT, but RG801/RG500U support + safe_at_response_free(p_response); + + return err; + } + + err = at_handshake(); + if (err) { + dbg_time("handshake fail, TODO ... "); + goto exit; + } + + s_pdp = profile->pdp; + at_send_command_singleline("AT+QCFG=\"usbnet\"", "+QCFG:", NULL); + at_send_command_multiline("AT+QNETDEVCTL=?", "+QNETDEVCTL:", NULL); + at_send_command("AT+CGREG=2", NULL); //GPRS Network Registration Status + at_send_command("AT+CEREG=2", NULL); //EPS Network Registration Status + at_send_command("AT+C5GREG=2", NULL); //5GS Network Registration Status + + err = at_send_command_singleline("AT+QNETDEVSTATUS=?", "+QNETDEVSTATUS:", &p_response); + if (at_response_error(err, p_response)) + asr_style_atc = 1; //EC200T/EC100Y do not support this AT, but RG801/RG500U support + safe_at_response_free(p_response); + + err = at_send_command_singleline("AT+QCFG=\"NAT\"", "+QCFG:", &p_response); + if (!at_response_error(err, p_response)) { + int old_nat, new_nat = asr_style_atc ? 1 : 0; + + err = at_tok_scanf(p_response->p_intermediates->line, "%s%d", NULL, &old_nat); + if (err == 2 && old_nat != new_nat) { + safe_at_response_free(p_response); + asprintf(&cmd, "AT+QCFG=\"NAT\",%d", new_nat); + err = at_send_command(cmd, &p_response); + safe_free(cmd); + if (!at_response_error(err, p_response)) { + err = at_send_command("at+cfun=1,1",NULL); + if (!err) + g_donot_exit_when_modem_hangup = 1; + //reboot to take effect + } + safe_at_response_free(p_response); + } + err = 0; + } + safe_at_response_free(p_response); + +exit: + return err; +} + +static int atc_deinit(void) { + return 0; +} + +/** + * Called by atchannel when an unsolicited line appears + * This is called on atchannel's reader thread. AT commands may + * not be issued here + */ +static void onUnsolicited (const char *s, const char *sms_pdu) +{ + (void)sms_pdu; + + if (strStartsWith(s, "+QNETDEVSTATUS:")) { + qmidevice_send_event_to_main(RIL_UNSOL_DATA_CALL_LIST_CHANGED); + } + else if (strStartsWith(s, "+CGREG:") + || strStartsWith(s, "+CEREG:") + || strStartsWith(s, "+C5GREG:")) { + qmidevice_send_event_to_main(RIL_UNSOL_RESPONSE_VOICE_NETWORK_STATE_CHANGED); + } +} + +static void onTimeout(void) { + dbg_time("%s", __func__); + //TODO +} + +static void onClose(void) { + dbg_time("%s", __func__); +} + +static void * atc_read_thread(void *param) { + PROFILE_T *profile = (PROFILE_T *)param; + const char *cdc_wdm = (const char *)profile->qmichannel; + int wait_for_request_quit = 0; + int atc_fd; + + atc_fd = cm_open_dev(cdc_wdm); + if (atc_fd <= 0) { + dbg_time("fail to open (%s), errno: %d (%s)", cdc_wdm, errno, strerror(errno)); + goto __quit; + } + + dbg_time("atc_fd = %d", atc_fd); + + if (at_open(atc_fd, onUnsolicited, 0)) + goto __quit; + + at_set_on_timeout(onTimeout); + at_set_on_reader_closed(onClose); + qmidevice_send_event_to_main(RIL_INDICATE_DEVICE_CONNECTED); + + while (atc_fd > 0) { + struct pollfd pollfds[] = {{atc_fd, POLLIN, 0}, {qmidevice_control_fd[1], POLLIN, 0}}; + int ne, ret, nevents = 2; + + ret = poll(pollfds, nevents, wait_for_request_quit ? 1000 : -1); + + if (ret == 0 && wait_for_request_quit) { + break; + } + + if (ret < 0) { + dbg_time("%s poll=%d, errno: %d (%s)", __func__, ret, errno, strerror(errno)); + break; + } + + for (ne = 0; ne < nevents; ne++) { + int fd = pollfds[ne].fd; + short revents = pollfds[ne].revents; + + if (revents & (POLLERR | POLLHUP | POLLNVAL)) { + dbg_time("%s poll err/hup/inval", __func__); + dbg_time("epoll fd = %d, events = 0x%04x", fd, revents); + if (revents & (POLLERR | POLLHUP | POLLNVAL)) + goto __quit; + } + + if ((revents & POLLIN) == 0) + continue; + + if (atc_fd == fd) { + usleep(10*1000); //let atchannel.c read at response. + } + else if (fd == qmidevice_control_fd[1]) { + int triger_event; + if (read(fd, &triger_event, sizeof(triger_event)) == sizeof(triger_event)) { + //dbg_time("triger_event = 0x%x", triger_event); + switch (triger_event) { + case RIL_REQUEST_QUIT: + goto __quit; + break; + case SIG_EVENT_STOP: + wait_for_request_quit = 1; + break; + default: + break; + } + } + } + } + } + +__quit: + at_close(); + qmidevice_send_event_to_main(RIL_INDICATE_DEVICE_DISCONNECTED); + dbg_time("%s exit", __func__); + + return NULL; +} + +const struct qmi_device_ops atc_dev_ops = { + .init = atc_init, + .deinit = atc_deinit, + .read = atc_read_thread, +}; + +static int requestBaseBandVersion(PROFILE_T *profile) { + int retVal = -1; + int err; + ATResponse *p_response = NULL; + + (void)profile; + + err = at_send_command_multiline("AT+CGMR", "\0", &p_response); + if (at_response_error(err, p_response)) + goto exit; + + if (p_response->p_intermediates && p_response->p_intermediates->line) { + strncpy(profile->BaseBandVersion, p_response->p_intermediates->line, sizeof(profile->BaseBandVersion) - 1); + retVal = 0; + } + +exit: + safe_at_response_free(p_response); + return retVal; +} + +static int requestGetSIMStatus(SIM_Status *pSIMStatus) +{ + int err; + ATResponse *p_response = NULL; + char *cpinLine; + char *cpinResult; + int ret = SIM_NOT_READY; + + err = at_send_command_singleline("AT+CPIN?", "+CPIN:", &p_response); + if (at_response_error(err, p_response)) + goto done; + + switch (at_get_cme_error(p_response)) + { + case CME_SUCCESS: + break; + + case CME_SIM_NOT_INSERTED: + case CME_OPERATION_NOT_ALLOWED: + case CME_FAILURE: + ret = SIM_ABSENT; + goto done; + + default: + ret = SIM_NOT_READY; + goto done; + } + + cpinLine = p_response->p_intermediates->line; + err = at_tok_start (&cpinLine); + + if (err < 0) + { + ret = SIM_NOT_READY; + goto done; + } + + err = at_tok_nextstr(&cpinLine, &cpinResult); + + if (err < 0) + { + ret = SIM_NOT_READY; + goto done; + } + + if (0 == strcmp (cpinResult, "SIM PIN")) + { + ret = SIM_PIN; + goto done; + } + else if (0 == strcmp (cpinResult, "SIM PUK")) + { + ret = SIM_PUK; + goto done; + } + else if (0 == strcmp (cpinResult, "PH-NET PIN")) + { + return SIM_NETWORK_PERSONALIZATION; + } + else if (0 != strcmp (cpinResult, "READY")) + { + /* we're treating unsupported lock types as "sim absent" */ + ret = SIM_ABSENT; + goto done; + } + + ret = SIM_READY; + +done: + safe_at_response_free(p_response); + *pSIMStatus = ret; + return err; +} + +static int requestEnterSimPin(const char *pPinCode) { + int retVal = -1; + int err; + ATResponse *p_response = NULL; + char *cmd = NULL; + + asprintf(&cmd, "AT+CPIN=%s", pPinCode); + err = at_send_command(cmd, NULL); + safe_free(cmd); + + if (!at_response_error(err, p_response)) { + retVal = 0; + } + + safe_at_response_free(p_response); + return retVal; +} + +static int requestSetProfile(PROFILE_T *profile) { + int err; + ATResponse *p_response = NULL; + char *cmd = NULL; + const char *new_apn = profile->apn ? profile->apn : ""; + const char *new_user = profile->user ? profile->user : ""; + const char *new_password = profile->password ? profile->password : ""; + const char *ipStr[] = {"NULL", "IPV4", "IPV6", "IPV4V6"}; + + dbg_time("%s[%d] %s/%s/%s/%d/%s", __func__, + profile->pdp, profile->apn, profile->user, profile->password, + profile->auth,ipStr[profile->iptype]); + + if ( !strcmp(profile->old_apn, new_apn) && !strcmp(profile->old_user, new_user) + && !strcmp(profile->old_password, new_password) + && profile->old_iptype == profile->iptype + && profile->old_auth == profile->auth) + { + dbg_time("no need to set skip the rest"); + return 0; + } + + asprintf(&cmd, "AT+QICSGP=%d,%d,\"%s\",\"%s\",\"%s\",%d", + profile->pdp, profile->iptype, new_apn, new_user, new_password, profile->auth); + err = at_send_command(cmd, &p_response); + safe_free(cmd); + if (at_response_error(err, p_response)) { + safe_at_response_free(p_response); + asprintf(&cmd, "AT+CGDCONT=%d,\"%s\",\"%s\"", profile->pdp, ipStr[profile->iptype], new_apn); + err = at_send_command(cmd, &p_response); + safe_free(cmd); + } + + safe_at_response_free(p_response); + return 1; +} + +static int requestGetProfile(PROFILE_T *profile) { + int retVal = -1; + int err; + ATResponse *p_response = NULL; + char *cmd = NULL; + int pdp; + int old_iptype = 1; // 1 ~ IPV4, 2 ~ IPV6, 3 ~ IPV4V6 + char *old_apn = "", *old_user = "", *old_password = ""; + int old_auth = 0; + const char *ipStr[] = {"NULL", "IPV4", "IPV6", "IPV4V6"}; + + if (profile->enable_ipv4 && profile->enable_ipv6) + profile->iptype = 3; + else if (profile->enable_ipv6) + profile->iptype = 2; + else + profile->iptype = 1; + +_re_check: + asprintf(&cmd, "AT+QICSGP=%d", profile->pdp); + err = at_send_command_singleline(cmd, "+QICSGP:", &p_response); + safe_free(cmd); + if (err == AT_ERROR_INVALID_RESPONSE && p_response == NULL) { + //bug of RG801H + safe_at_response_free(p_response); + asprintf(&cmd, "AT+QICSGP=%d,%d,\"\",\"\",\"\",0", profile->pdp, profile->iptype); + err = at_send_command(cmd, &p_response); + safe_free(cmd); + if (!at_response_error(err, p_response)) { + safe_at_response_free(p_response); + goto _re_check; + } + } + + if (!at_response_error(err, p_response)) { + err = at_tok_scanf(p_response->p_intermediates->line, + "%d%s%s%s%d", &old_iptype, &old_apn, &old_user, &old_password, &old_auth); + + if (err != 4 || pdp != profile->pdp) + goto _error; + } + else { + ATLine *atLine = NULL; + char *cgdcont_iptype = NULL; + + safe_at_response_free(p_response); + err = at_send_command_multiline("AT+CGDCONT?", "+CGDCONT:", &p_response); + if (at_response_error(err, p_response)) + goto _error; + + atLine = p_response->p_intermediates; + while (atLine) { + err = at_tok_scanf(atLine->line, "%d%s%s", &pdp, &cgdcont_iptype, &old_apn); + if (err == 3 && pdp == profile->pdp) { + if (!strcasecmp(cgdcont_iptype, ipStr[3])) + old_iptype = 3; + else if (!strcasecmp(cgdcont_iptype, ipStr[2])) + old_iptype = 2; + else + old_iptype = 1; + break; + } + old_apn = NULL; + atLine = atLine->p_next; + } + } + + retVal = 0; + +_error: + if (!old_apn) old_apn = ""; + if (!old_user) old_user = ""; + if (!old_password) old_password = ""; + + strncpy(profile->old_apn, old_apn, sizeof(profile->old_apn)); + strncpy(profile->old_user, old_user, sizeof(profile->old_user)); + strncpy(profile->old_password, old_password, sizeof(profile->old_password)); + profile->old_auth = old_auth; + profile->old_iptype = old_iptype; + + dbg_time("%s[%d] %s/%s/%s/%d/%s", __func__, + profile->pdp, profile->old_apn, profile->old_user, profile->old_password, + profile->old_auth, ipStr[profile->old_iptype]); + + safe_at_response_free(p_response); + + return retVal; +} + +static int requestRegistrationState(UCHAR *pPSAttachedState) { + int retVal = -1; + int err; + ATResponse *p_response = NULL; + ATLine *p_cur; + int i; + int cops_act = -1; + int state = 0, lac = 0, cid = 0, act = 0; + int commas; + char *line; + + *pPSAttachedState = 0; + + err = at_send_command_multiline( + "AT+COPS=3,0;+COPS?;+COPS=3,1;+COPS?;+COPS=3,2;+COPS?", + "+COPS:", &p_response); + if (at_response_error(err, p_response)) + goto error; + +/* +AT< +COPS: 0,0,"CHINA MOBILE",13 +AT< +COPS: 0,1,"CMCC",13 +AT< +COPS: 0,2,"46000",13 +AT< OK +*/ + retVal = 0; + for (i = 0, p_cur = p_response->p_intermediates; p_cur != NULL; p_cur = p_cur->p_next, i++) { + err = at_tok_scanf(p_cur->line, "%d%d%s%d", NULL, NULL, NULL, &cops_act); + if (err != 4) goto error; + + break; + } + + safe_at_response_free(p_response); + switch (cops_act) { + case 2: //UTRAN + case 4: //UTRAN W/HSDPA + case 5: //UTRAN W/HSUPA + case 6: //UTRAN W/HSDPA and HSUPA + //AT+CGREG GPRS Network Registration Status + err = at_send_command_singleline("AT+CGREG?", "+CGREG:", &p_response); + break; + + case 7: //E-UTRAN + case 13: //E-UTRAN-NR dual connectivity + //AT+CEREG EPS Network Registration Status + err = at_send_command_singleline("AT+CEREG?", "+CEREG:", &p_response); + break; + + case 10: //E-UTRAN connected to a 5GCN + case 11: //NR connected to a 5GCN + case 12: //NG-RAN + //AT+C5GREG 5GS Network Registration Status + err = at_send_command_singleline("AT+C5GREG?", "+C5GREG:", &p_response); + break; + + default: + goto error; + break; + } + + if (at_response_error(err, p_response)) + goto error; + if (!p_response->p_intermediates || !p_response->p_intermediates->line) goto error; + + line = p_response->p_intermediates->line; + commas = at_tok_count(line); + switch (commas) { + case 0: /* +CREG: */ + err = at_tok_nextint(&line, &state); + if (err < 0) goto error; + break; + + case 1: /* +CREG: , */ + err = at_tok_scanf(line, "%d%d", NULL, &state); + if (err != 2) goto error; + break; + + case 2: /* +CREG: , , */ + err = at_tok_scanf(line, "%d%x%x", NULL, &state, &lac, &cid); + if (err != 3) goto error; + break; + + case 3: /* +CREG: , , , */ + err = at_tok_scanf(line, "%d%d%x%x", NULL, &state, &lac, &cid); + if (err != 4) goto error; + break; + + case 4: //, , , , */ + case 5: + case 6: + case 7: + err = at_tok_scanf(line, "%d%d%x%x%d", NULL, &state, &lac, &cid, &act); + if (err != 5) goto error; + break; + + default: + goto error; + } + + //dbg_time("state=%d", state); + + if (state == 1 || state == 5) { //Registered, home network / roaming + *pPSAttachedState = 1; + } + +error: + safe_at_response_free(p_response); + return retVal; +} + +static int requestSetupDataCall(PROFILE_T *profile, int curIpFamily) { + int err; + ATResponse *p_response = NULL; + char *cmd = NULL; + ATLine *p_cur = NULL; + int pdp = profile->pdp; + int state = 0; + + (void)curIpFamily; + + if (strStartsWith(profile->BaseBandVersion, "RG801H") || strStartsWith(profile->BaseBandVersion, "EC200H")) { + //RG801H will miss USB_CDC_NOTIFY_NETWORK_CONNECTION + asprintf(&cmd, "ifconfig %s up", profile->usbnet_adapter); + if (system(cmd)) {}; + safe_free(cmd); + } + + if (asr_style_atc) { + err = at_send_command_multiline("AT+CGACT?", "+CGACT:", &p_response); + if (at_response_error(err, p_response)) + goto _error; + + for (p_cur = p_response->p_intermediates; p_cur != NULL; p_cur = p_cur->p_next) { + int cid = 0; + state = 0; + + err = at_tok_scanf(p_cur->line, "%d%d", &cid, &state); + if (cid == pdp) + break; + else if(state) + state = 0; + } + safe_at_response_free(p_response); + + if (state == 0) { + asprintf(&cmd, "AT+CGACT=1,%d", pdp); + err = at_send_command(cmd, &p_response); + safe_free(cmd); + if (at_response_error(err, p_response)) + goto _error; + } + } + + if(asr_style_atc) + asprintf(&cmd, "AT+QNETDEVCTL=1,%d,%d", pdp, 1); + else + asprintf(&cmd, "AT+QNETDEVCTL=%d,1,%d", pdp, 1); + err = at_send_command(cmd, &p_response); + safe_free(cmd); + + if (at_response_error(err, p_response)) + goto _error; + + if (!asr_style_atc) { //TODO some modems do not sync return setup call resule + int t = 0; + + while (t++ < 15) { + asprintf(&cmd, "AT+QNETDEVSTATUS=%d", pdp); + err = at_send_command_singleline(cmd, "+QNETDEVSTATUS", &p_response); + safe_free(cmd); + if (err) goto _error; + + if (!at_response_error(err, p_response)) { + break; + } + safe_at_response_free(p_response); + sleep(1); + } + } + + //some modem do not report URC + qmidevice_send_event_to_main(RIL_UNSOL_DATA_CALL_LIST_CHANGED); + +_error: + safe_at_response_free(p_response); + //dbg_time("%s err=%d", __func__, err); + return err; +} + +static int at_netdevstatus(int pdp, unsigned int *pV4Addr) { + int err; + ATResponse *p_response = NULL; + char *cmd = NULL; + char *ipv4_address = NULL; + char *ipv4_gate = NULL; + char *ipv4_DHCP = NULL; + char *ipv4_pDNS = NULL; + char *ipv4_sDNS = NULL; + char *ipv6_address = NULL; + char *ipv6_gate = NULL; + char *ipv6_DHCP = NULL; + char *ipv6_pDNS = NULL; + char *ipv6_sDNS = NULL; + + *pV4Addr = 0; + + asprintf(&cmd, "AT+QNETDEVSTATUS=%d", pdp); + err = at_send_command_singleline(cmd, "+QNETDEVSTATUS", &p_response); + safe_free(cmd); + if (at_response_error(err, p_response)) goto _error; + if (!p_response->p_intermediates || !p_response->p_intermediates->line) goto _error; + + err = at_tok_scanf(p_response->p_intermediates->line, "%s%s%s%s%s%s%s%s%s%s", + &ipv4_address, &ipv4_gate, &ipv4_DHCP, &ipv4_pDNS, &ipv4_sDNS, + &ipv6_address, &ipv6_gate, &ipv6_DHCP, &ipv6_pDNS, &ipv6_sDNS); + if (err > 0) { +#if 0 + dbg_time("%s,%s,%s,%s,%s,%s,%s,%s,%s,%s", + ipv4_address, ipv4_gate, ipv4_DHCP, ipv4_pDNS, ipv4_sDNS, + ipv6_address, ipv6_gate, ipv6_DHCP, ipv6_pDNS, ipv6_sDNS); +#endif + + if (ipv4_address && ipv4_address[0]) { + int addr[4] = {0, 0, 0, 0}; + + if (strstr(ipv4_address, ".")) { + sscanf(ipv4_address, "%d.%d.%d.%d", &addr[0], &addr[1], &addr[2], &addr[3]); + } + else { + sscanf(ipv4_address, "%02X%02X%02X%02X", &addr[3], &addr[2], &addr[1], &addr[0]); + } + *pV4Addr = (addr[0]) | (addr[1]<<8) | (addr[2]<<16) | (addr[3]<<24); + } + } + +_error: + safe_at_response_free(p_response); + return 0; +} + +static int requestQueryDataCall(UCHAR *pConnectionStatus, int curIpFamily) { + int err; + ATResponse *p_response = NULL; + ATLine *p_cur = NULL; + int state = 0; + int bind = 0; + int cid; + int pdp = s_pdp; + unsigned int v4Addr = 0; + + (void)curIpFamily; + + *pConnectionStatus = QWDS_PKT_DATA_DISCONNECTED; + + if (!asr_style_atc) { + err = at_netdevstatus(pdp, &v4Addr); + if (!err && v4Addr) { + *pConnectionStatus = QWDS_PKT_DATA_CONNECTED; + //if (profile->ipv4.Address == v4Addr) {} //TODO + } + return err; + } + + err = at_send_command_multiline("AT+QNETDEVCTL?", "+QNETDEVCTL:", &p_response); + if (at_response_error(err, p_response)) + goto _error; + + for (p_cur = p_response->p_intermediates; p_cur != NULL; p_cur = p_cur->p_next) + { + //+QNETDECTL:,,, + err = at_tok_scanf(p_cur->line, "%d%d%d%d", &bind, &cid, NULL, &state); + if (err != 4 || cid != pdp) + continue; + if (bind != 1) + bind = 0; + } + safe_at_response_free(p_response); + + if (bind == 0 || state == 0) + goto _error; + + err = at_send_command_multiline("AT+CGACT?", "+CGACT:", &p_response); + if (at_response_error(err, p_response)) + goto _error; + + for (p_cur = p_response->p_intermediates; p_cur != NULL; p_cur = p_cur->p_next) + { + state = 0; + err = at_tok_scanf(p_cur->line, "%d%d", &cid, &state); + if (cid == pdp) + break; + else if(state) + state = 0; + } + safe_at_response_free(p_response); + + if (bind && state) + *pConnectionStatus = QWDS_PKT_DATA_CONNECTED; + +_error: + safe_at_response_free(p_response); + //dbg_time("%s err=%d, call_state=%d", __func__, err, *pConnectionStatus); + return 0; +} + +static int requestDeactivateDefaultPDP(PROFILE_T *profile, int curIpFamily) { + char *cmd = NULL; + int pdp = profile->pdp; + + (void)curIpFamily; + + if (asr_style_atc) + asprintf(&cmd, "AT+QNETDEVCTL=0,%d,%d", pdp, 0); + else + asprintf(&cmd, "AT+QNETDEVCTL=%d,0,%d", pdp, 0); + at_send_command(cmd, NULL); + safe_free(cmd); + + //dbg_time("%s err=%d", __func__, err); + return 0; +} + +static int requestGetIPAddress(PROFILE_T *profile, int curIpFamily) { + int err; + ATResponse *p_response = NULL; + char *cmd = NULL; + ATLine *p_cur = NULL; + int pdp = profile->pdp; + unsigned int v4Addr = 0; + + (void)curIpFamily; + + if (!asr_style_atc) { + err = at_netdevstatus(pdp, &v4Addr); + goto _error; + } + + asprintf(&cmd, "AT+CGPADDR=%d", profile->pdp); + err = at_send_command_singleline(cmd, "+CGPADDR:", &p_response); + safe_free(cmd); + if (at_response_error(err, p_response)) + goto _error; + + //+CGPADDR: 1,"10.201.80.91","2409:8930:4B3:41C7:F9B8:3D9B:A2F7:CA96" + for (p_cur = p_response->p_intermediates; p_cur != NULL; p_cur = p_cur->p_next) + { + char *ipv4 = NULL; + char *ipv6 = NULL; + + err = at_tok_scanf(p_cur->line, "%d%s%s", &pdp, &ipv4, &ipv6); + if (err < 2 || pdp != profile->pdp) + continue; + + if (ipv4) { + int addr[4] = {0, 0, 0, 0}; + + sscanf(ipv4, "%d.%d.%d.%d", &addr[0], &addr[1], &addr[2], &addr[3]); + v4Addr = (addr[0]) | (addr[1]<<8) | (addr[2]<<16) | (addr[3]<<24); + break; + } + } + +_error: + if (v4Addr && profile->ipv4.Address != v4Addr) { + unsigned char *v4 = (unsigned char *)&v4Addr; + + profile->ipv4.Address = v4Addr; + dbg_time("%s %d.%d.%d.%d", __func__, v4[0], v4[1], v4[2], v4[3]); + } + + //dbg_time("%s err=%d", __func__, err); + return v4Addr ? 0 : -1; +} + +static int requestGetSignalInfo(void) { + int retVal = -1; + int err; + ATResponse *p_response = NULL; + int i; + ATLine *p_cur = NULL; + char *rat = NULL; + int cops_act = 0; + int is_nr5g_nsa = 0, nr5g_sa = 0; + int verbose = 0; + + err = at_send_command_singleline("at+cops?", "+COPS:", &p_response); + if (at_response_error(err, p_response)) goto _error; + if (!p_response->p_intermediates || !p_response->p_intermediates->line) goto _error; + + retVal = 0; + err = at_tok_scanf(p_response->p_intermediates->line, "%d%d%s%d", NULL, NULL, NULL, &cops_act); + if (err != 4) goto _error; + + nr5g_sa = (cops_act == 11); + + safe_at_response_free(p_response); + err = at_send_command_multiline("at+qeng=\"servingcell\"", "+QENG:", &p_response); + if (at_response_error(err, p_response)) + goto _error; + + for (i = 0, p_cur = p_response->p_intermediates; p_cur != NULL; p_cur = p_cur->p_next, i++) { + char *type, *state; + + err = at_tok_scanf(p_cur->line, "%s%s", &type, &state); + if (err != 2 || strcmp(type, "servingcell")) + continue; + + if (!strcmp(state, "SEARCH") || !strcmp(state, "LIMSRV")) + continue; + + if (!strcmp(state, "NOCONN") || !strcmp(state, "CONNECT")) { + err = at_tok_scanf(p_cur->line, "%s%s%s", &type, &state, &rat); + if (err != 3) + continue; + } + else { + rat = state; + } + + if (!strcmp(rat, "NR5G-SA")) + { + //+QENG: "servingcell",,"NR5G-SA",,,,,,,,,,,,,, + //+QENG: "servingcell","NOCONN","NR5G-SA","TDD", 454,12,0,21,4ED,636576,78,3,-85,-11,32,0,5184 + struct qeng_servingcell_nr5g_sa { + char *cell_type, *state, *rat, *is_tdd; + int MCC, MNC, cellID/*hex*/; + int PCID, TAC/*hex*/, ARFCN; + int band, NR_DL_bandwidth; + int RSRP, RSRQ, RSSI, SINR; + }; + struct qeng_servingcell_nr5g_sa nr5g_sa; + + memset(&nr5g_sa, 0, sizeof(nr5g_sa)); + err = at_tok_scanf(p_cur->line, "%s,%s,%s,%s,%d,%d,%x,%d,%x,%d,%d,%d,%d,%d,%d,%d", + &nr5g_sa.cell_type, &nr5g_sa.state, &nr5g_sa.rat, &nr5g_sa.is_tdd, + &nr5g_sa.MCC, &nr5g_sa.MNC, &nr5g_sa.cellID, &nr5g_sa.PCID, &nr5g_sa.TAC, + &nr5g_sa.ARFCN, &nr5g_sa.band, &nr5g_sa.NR_DL_bandwidth, + &nr5g_sa.RSRP, &nr5g_sa.RSRQ, &nr5g_sa.RSSI, &nr5g_sa.SINR); + + if (err >= 13 && verbose) { + dbg_time("%s,%s,%s,%s,%d,%d,%x,%d,%x,%d,%d,%d,%d,%d,%d,%d", + nr5g_sa.cell_type, nr5g_sa.state, nr5g_sa.rat, nr5g_sa.is_tdd, + nr5g_sa.MCC, nr5g_sa.MNC, nr5g_sa.cellID, nr5g_sa.PCID, nr5g_sa.TAC, + nr5g_sa.ARFCN, nr5g_sa.band, nr5g_sa.NR_DL_bandwidth, + nr5g_sa.RSRP, nr5g_sa.RSRQ, nr5g_sa.RSSI, nr5g_sa.SINR); + } + } + else if (!strcmp(rat, "NR5G-NSA")) + { + //+QENG: "NR5G-NSA",,,,,< SINR>,,, + struct qeng_servingcell_nr5g_nsa { + char *mcc, *mnc; + int pcid, rsrp, sinr, rsrq; + }; + struct qeng_servingcell_nr5g_nsa nr5g_nsa; + + memset(&nr5g_nsa, 0, sizeof(nr5g_nsa)); + err = at_tok_scanf(p_cur->line, "%s%s%s%s%d%d%d%dd", + NULL, NULL, &nr5g_nsa.mcc, &nr5g_nsa.mnc, &nr5g_nsa.pcid, &nr5g_nsa.rsrp, &nr5g_nsa.sinr, &nr5g_nsa.rsrq); + if (err == 8 && verbose) + { + dbg_time("mcc=%s, mnc=%s, pcid=%d, rsrp=%d, sinr=%d, rsrq=%d", + nr5g_nsa.mcc, nr5g_nsa.mnc, nr5g_nsa.pcid, nr5g_nsa.rsrp, nr5g_nsa.sinr, nr5g_nsa.rsrq); + } + + is_nr5g_nsa = 1; + } + else if (!strcmp(rat, "LTE")) + { + //+QENG: "LTE",,,,,,,,,,,,,,,,, + struct qeng_servingcell_lte { + char *is_tdd, *mcc, *mnc; + int cellID/*hex*/, pcid, earfcn, freq_band_ind, ul_bandwidth, dl_bandwidth; + int tac/*hex*/, rsrp, rsrq, rssi, sinr, cqi,tx_power,srxlev; + }; + struct qeng_servingcell_lte lte; + + memset(<e, 0, sizeof(lte)); + if (!strcmp(rat, state)) + err = at_tok_scanf(p_cur->line, "%s%s%s%s%s%x%d%d%d%d%d%x%d%d%d%d%d%d%d", + NULL, NULL, <e.is_tdd, <e.mcc, <e.mnc, + <e.cellID, <e.pcid, <e.earfcn, <e.freq_band_ind, <e.ul_bandwidth, <e.dl_bandwidth, + <e.tac, <e.rsrp, <e.rsrq, <e.rssi, <e.sinr, <e.cqi, <e.tx_power, <e.srxlev); + else + err = at_tok_scanf(p_cur->line, "%s%s%s%s%s%s%x%d%d%d%d%d%x%d%d%d%d%d%d%d", + NULL, NULL, NULL, <e.is_tdd, <e.mcc, <e.mnc, + <e.cellID, <e.pcid, <e.earfcn, <e.freq_band_ind, <e.ul_bandwidth, <e.dl_bandwidth, + <e.tac, <e.rsrp, <e.rsrq, <e.rssi, <e.sinr, <e.cqi, <e.tx_power, <e.srxlev); + + if (err >= 18 && verbose) + { + dbg_time("is_tdd=%s, mcc=%s, mnc=%s", lte.is_tdd, lte.mcc, lte.mnc); + dbg_time("cellID=%x, pcid=%d, earfcn=%d", lte.cellID, lte.pcid, lte.earfcn); + dbg_time("freq_band_ind=%d, ul_bandwidth=%d, dl_bandwidth=%d", lte.freq_band_ind, lte.ul_bandwidth, lte.dl_bandwidth); + dbg_time("tac=%x, rsrp=%d, rsrq=%d, rssi=%d, sinr=%d", lte.tac, lte.rsrp, lte.rsrq, lte.rssi, lte.sinr); + dbg_time("cqi=%d, tx_power=%d, earfcn=%d", lte.cqi, lte.tx_power, lte.srxlev); + } + } + } + + if (is_nr5g_nsa) { + int endc_avl, plmn_info_list_r15_avl, endc_rstr, nr5g_basic; + + is_nr5g_nsa = 0; + safe_at_response_free(p_response); + err = at_send_command_multiline("at+qendc", "+QENDC:", &p_response); + if (at_response_error(err, p_response)) goto _error; + if (!p_response->p_intermediates || !p_response->p_intermediates->line) goto _error; + + err = at_tok_scanf(p_response->p_intermediates->line, "%d%d%d%d", + &endc_avl, &plmn_info_list_r15_avl, &endc_rstr, &nr5g_basic); + if (err == 4 && nr5g_basic) { + is_nr5g_nsa = 1; + } + } + + if (verbose) + dbg_time("cops_act=%d, nr5g_nsa=%d, nr5g_sa=%d", cops_act, is_nr5g_nsa, nr5g_sa); + +_error: + safe_at_response_free(p_response); + return retVal; +} + +static int requestGetICCID(void) { + int retVal = -1; + int err; + ATResponse *p_response = NULL; + char *iccid; + + err = at_send_command_singleline("AT+QCCID", "+QCCID:", &p_response); + if (at_response_error(err, p_response)) goto _error; + if (!p_response->p_intermediates || !p_response->p_intermediates->line) goto _error; + + err = at_tok_scanf(p_response->p_intermediates->line, "%s", &iccid); + if (err != 1) goto _error; + + if (iccid && iccid[0]) { + dbg_time("%s %s", __func__, iccid); + retVal = 0; + } + +_error: + safe_at_response_free(p_response); + return retVal; +} + +static int requestGetIMSI(void) { + int retVal = -1; + int err; + ATResponse *p_response = NULL; + char *imsi; + + err = at_send_command_numeric("AT+CIMI", &p_response); + if (at_response_error(err, p_response)) goto _error; + if (!p_response->p_intermediates || !p_response->p_intermediates->line) goto _error; + + imsi = p_response->p_intermediates->line; + if (imsi) { + dbg_time("%s %s", __func__, imsi); + retVal = 0; + } + +_error: + safe_at_response_free(p_response); + return retVal; +} + +const struct request_ops atc_request_ops = { + .requestBaseBandVersion = requestBaseBandVersion, + .requestGetSIMStatus = requestGetSIMStatus, + .requestEnterSimPin = requestEnterSimPin, + .requestSetProfile = requestSetProfile, + .requestGetProfile = requestGetProfile, + .requestRegistrationState = requestRegistrationState, + .requestSetupDataCall = requestSetupDataCall, + .requestQueryDataCall = requestQueryDataCall, + .requestDeactivateDefaultPDP = requestDeactivateDefaultPDP, + .requestGetIPAddress = requestGetIPAddress, + .requestGetSignalInfo = requestGetSignalInfo, + .requestGetICCID = requestGetICCID, + .requestGetIMSI = requestGetIMSI, +}; + diff --git a/package/wwan/driver/quectel_cm_5G/src/atchannel.c b/package/wwan/driver/quectel_cm_5G/src/atchannel.c new file mode 100644 index 000000000..90aa1c4c9 --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/atchannel.c @@ -0,0 +1,1037 @@ +/* //device/system/reference-ril/atchannel.c +** +** Copyright 2006, The Android Open Source Project +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +#include "atchannel.h" +#include "at_tok.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "QMIThread.h" +#define LOGE dbg_time +#define LOGD dbg_time + +#define NUM_ELEMS(x) (sizeof(x)/sizeof(x[0])) + +#define MAX_AT_RESPONSE sizeof(cm_recv_buf) +#define HANDSHAKE_RETRY_COUNT 8 +#define HANDSHAKE_TIMEOUT_MSEC 1000 + +static pthread_t s_tid_reader; +static int s_fd = -1; /* fd of the AT channel */ +static ATUnsolHandler s_unsolHandler; +static int s_atc_proxy = 0; /* fd of the AT channel */ + +/* for input buffering */ + +static char *s_ATBuffer = (char *)cm_recv_buf; +static char *s_ATBufferCur = (char *)cm_recv_buf; + +static int s_readCount = 0; + +#if AT_DEBUG +void AT_DUMP(const char* prefix, const char* buff, int len) +{ + if (len < 0) + len = strlen(buff); + LOGD("%.*s", len, buff); +} +#endif + +/* + * for current pending command + * these are protected by s_commandmutex + */ +static ATCommandType s_type; +static const char *s_responsePrefix = NULL; +static const char *s_smsPDU = NULL; +static const char *s_raw_data = NULL; +static size_t s_raw_len; +static ATResponse *sp_response = NULL; + +static void (*s_onTimeout)(void) = NULL; +static void (*s_onReaderClosed)(void) = NULL; +static int s_readerClosed; + +static void onReaderClosed(); +static int writeCtrlZ (const char *s); +static int writeline (const char *s); +static int writeraw (const char *s, size_t len); + +static void sleepMsec(long long msec) +{ + struct timespec ts; + int err; + + ts.tv_sec = (msec / 1000); + ts.tv_nsec = (msec % 1000) * 1000 * 1000; + + do { + err = nanosleep (&ts, &ts); + } while (err < 0 && errno == EINTR); +} + +/** returns 1 if line starts with prefix, 0 if it does not */ +int strStartsWith(const char *line, const char *prefix) +{ + for ( ; *line != '\0' && *prefix != '\0' ; line++, prefix++) { + if (*line != *prefix) { + return 0; + } + } + + return *prefix == '\0'; +} + +/** add an intermediate response to sp_response*/ +static void addIntermediate(const char *line) +{ + ATLine *p_new; + + p_new = (ATLine *) malloc(sizeof(ATLine)); + + p_new->line = strdup(line); + + /* note: this adds to the head of the list, so the list + will be in reverse order of lines received. the order is flipped + again before passing on to the command issuer */ + p_new->p_next = sp_response->p_intermediates; + sp_response->p_intermediates = p_new; +} + + +/** + * returns 1 if line is a final response indicating error + * See 27.007 annex B + * WARNING: NO CARRIER and others are sometimes unsolicited + */ +static const char * s_finalResponsesError[] = { + "ERROR", + "+CMS ERROR:", + "+CME ERROR:", + "NO CARRIER", /* sometimes! */ + "NO ANSWER", + "NO DIALTONE", + "COMMAND NOT SUPPORT", +}; +static int isFinalResponseError(const char *line) +{ + size_t i; + + for (i = 0 ; i < NUM_ELEMS(s_finalResponsesError) ; i++) { + if (strStartsWith(line, s_finalResponsesError[i])) { + return 1; + } + } + + return 0; +} + +/** + * returns 1 if line is a final response indicating success + * See 27.007 annex B + * WARNING: NO CARRIER and others are sometimes unsolicited + */ +static const char * s_finalResponsesSuccess[] = { + "OK", + "+QIND: \"FOTA\",\"END\",0", + "CONNECT" /* some stacks start up data on another channel */ +}; + +static int isFinalResponseSuccess(const char *line) +{ + size_t i; + + for (i = 0 ; i < NUM_ELEMS(s_finalResponsesSuccess) ; i++) { + if (strStartsWith(line, s_finalResponsesSuccess[i])) { + return 1; + } + } + + return 0; +} + +#if 0 +/** + * returns 1 if line is a final response, either error or success + * See 27.007 annex B + * WARNING: NO CARRIER and others are sometimes unsolicited + */ +static int isFinalResponse(const char *line) +{ + return isFinalResponseSuccess(line) || isFinalResponseError(line); +} +#endif + +/** + * returns 1 if line is the first line in (what will be) a two-line + * SMS unsolicited response + */ +static const char * s_smsUnsoliciteds[] = { + "+CMT:", + "+CDS:", + "+CBM:", + "+CMTI:" +}; +static int isSMSUnsolicited(const char *line) +{ + size_t i; + + for (i = 0 ; i < NUM_ELEMS(s_smsUnsoliciteds) ; i++) { + if (strStartsWith(line, s_smsUnsoliciteds[i])) { + return 1; + } + } + + return 0; +} + + +/** assumes s_commandmutex is held */ +static void handleFinalResponse(const char *line) +{ + sp_response->finalResponse = strdup(line); + + pthread_cond_signal(&cm_command_cond); +} + +static void handleUnsolicited(const char *line) +{ + if (s_unsolHandler != NULL) { + s_unsolHandler(line, NULL); + } +} + +static void processLine(const char *line) +{ + pthread_mutex_lock(&cm_command_mutex); + + if (sp_response == NULL) { + /* no command pending */ + handleUnsolicited(line); + } else if (s_raw_data != NULL && 0 == strcmp(line, "CONNECT")) { + usleep(500*1000); //for EC20 + writeraw(s_raw_data, s_raw_len); + s_raw_data = NULL; + } else if (isFinalResponseSuccess(line)) { + if(s_atc_proxy) + handleUnsolicited(line); + sp_response->success = 1; + handleFinalResponse(line); + } else if (isFinalResponseError(line)) { + if(s_atc_proxy) + handleUnsolicited(line); + sp_response->success = 0; + handleFinalResponse(line); + } else if (s_smsPDU != NULL && 0 == strcmp(line, "> ")) { + // See eg. TS 27.005 4.3 + // Commands like AT+CMGS have a "> " prompt + writeCtrlZ(s_smsPDU); + s_smsPDU = NULL; + } else switch (s_type) { + case NO_RESULT: + handleUnsolicited(line); + break; + case NUMERIC: + if (sp_response->p_intermediates == NULL + && isdigit(line[0]) + ) { + addIntermediate(line); + } else { + /* either we already have an intermediate response or + the line doesn't begin with a digit */ + handleUnsolicited(line); + } + break; + case SINGLELINE: + if (sp_response->p_intermediates == NULL + && strStartsWith (line, s_responsePrefix) + ) { + addIntermediate(line); + } else { + /* we already have an intermediate response */ + handleUnsolicited(line); + } + break; + case MULTILINE: + if (strStartsWith (line, s_responsePrefix)) { + addIntermediate(line); + } else { + handleUnsolicited(line); + } + break; + + default: /* this should never be reached */ + LOGE("Unsupported AT command type %d\n", s_type); + handleUnsolicited(line); + break; + } + + pthread_mutex_unlock(&cm_command_mutex); +} + + +/** + * Returns a pointer to the end of the next line + * special-cases the "> " SMS prompt + * + * returns NULL if there is no complete line + */ +static char * findNextEOL(char *cur) +{ + if (cur[0] == '>' && cur[1] == ' ' && cur[2] == '\0') { + /* SMS prompt character...not \r terminated */ + return cur+2; + } + + // Find next newline + while (*cur != '\0' && *cur != '\r' && *cur != '\n') cur++; + + return *cur == '\0' ? NULL : cur; +} + + +/** + * Reads a line from the AT channel, returns NULL on timeout. + * Assumes it has exclusive read access to the FD + * + * This line is valid only until the next call to readline + * + * This function exists because as of writing, android libc does not + * have buffered stdio. + */ + +static const char *readline() +{ + ssize_t count; + + char *p_read = NULL; + char *p_eol = NULL; + char *ret; + + /* this is a little odd. I use *s_ATBufferCur == 0 to + * mean "buffer consumed completely". If it points to a character, than + * the buffer continues until a \0 + */ + if (*s_ATBufferCur == '\0') { + /* empty buffer */ + s_ATBufferCur = s_ATBuffer; + *s_ATBufferCur = '\0'; + p_read = s_ATBuffer; + } else { /* *s_ATBufferCur != '\0' */ + /* there's data in the buffer from the last read */ + + // skip over leading newlines + while (*s_ATBufferCur == '\r' || *s_ATBufferCur == '\n') + s_ATBufferCur++; + + p_eol = findNextEOL(s_ATBufferCur); + + if (p_eol == NULL) { + /* a partial line. move it up and prepare to read more */ + size_t len; + + len = strlen(s_ATBufferCur); + + memmove(s_ATBuffer, s_ATBufferCur, len + 1); + p_read = s_ATBuffer + len; + s_ATBufferCur = s_ATBuffer; + } + /* Otherwise, (p_eol !- NULL) there is a complete line */ + /* that will be returned the while () loop below */ + } + + while (p_eol == NULL) { + if (0 == MAX_AT_RESPONSE - (p_read - s_ATBuffer)) { + LOGE("ERROR: Input line exceeded buffer\n"); + /* ditch buffer and start over again */ + s_ATBufferCur = s_ATBuffer; + *s_ATBufferCur = '\0'; + p_read = s_ATBuffer; + } + + do { + while (s_fd > 0) { + struct pollfd pollfds[1] = {{s_fd, POLLIN, 0}}; + int ret; + + do { + ret = poll(pollfds, 1, -1); + } while ((ret < 0) && (errno == EINTR)); + + if (pollfds[0].revents & (POLLERR | POLLHUP | POLLNVAL)) { + break; + } else if (pollfds[0].revents & (POLLIN)) { + break; + } + }; + + count = (s_fd == -1) ? 0 : read(s_fd, p_read, + MAX_AT_RESPONSE - (p_read - s_ATBuffer)); + } while (count < 0 && errno == EINTR); + + if (count > 0) { + AT_DUMP( "<< ", p_read, count ); + s_readCount += count; + + p_read[count] = '\0'; + + // skip over leading newlines + while (*s_ATBufferCur == '\r' || *s_ATBufferCur == '\n') + s_ATBufferCur++; + + p_eol = findNextEOL(s_ATBufferCur); + p_read += count; + } else if (count <= 0) { + /* read error encountered or EOF reached */ + if(count == 0) { + LOGD("atchannel: EOF reached"); + } else { + LOGD("atchannel: read error %s", strerror(errno)); + } + return NULL; + } + } + + /* a full line in the buffer. Place a \0 over the \r and return */ + + ret = s_ATBufferCur; + *p_eol = '\0'; + s_ATBufferCur = p_eol + 1; /* this will always be <= p_read, */ + /* and there will be a \0 at *p_read */ + + LOGD("AT< %s", ret); + return ret; +} + + +static void onReaderClosed() +{ + LOGE("%s", __func__); + if (s_onReaderClosed != NULL && s_readerClosed == 0) { + + pthread_mutex_lock(&cm_command_mutex); + + s_readerClosed = 1; + + pthread_cond_signal(&cm_command_cond); + + pthread_mutex_unlock(&cm_command_mutex); + + s_onReaderClosed(); + } +} + + +static void *readerLoop(void *arg) +{ + (void)arg; + + for (;;) { + const char * line; + + line = readline(); + + if (line == NULL) { + break; + } + + if(isSMSUnsolicited(line)) { + char *line1; + const char *line2; + + // The scope of string returned by 'readline()' is valid only + // till next call to 'readline()' hence making a copy of line + // before calling readline again. + line1 = strdup(line); + line2 = readline(); + + if (line2 == NULL) { + break; + } + + if (s_unsolHandler != NULL) { + s_unsolHandler (line1, line2); + } + free(line1); + } else { + processLine(line); + } + } + + onReaderClosed(); + + return NULL; +} + +/** + * Sends string s to the radio with a \r appended. + * Returns AT_ERROR_* on error, 0 on success + * + * This function exists because as of writing, android libc does not + * have buffered stdio. + */ +static int writeline (const char *s) +{ + size_t cur = 0; + size_t len = strlen(s); + ssize_t written; + static char at_command[64]; + + if (s_fd < 0 || s_readerClosed > 0) { + return AT_ERROR_CHANNEL_CLOSED; + } + + LOGD("AT> %s", s); + + AT_DUMP( ">> ", s, strlen(s) ); + +#if 1 //send '\r' maybe fail via USB controller: Intel Corporation 7 Series/C210 Series Chipset Family USB xHCI Host Controller (rev 04) + if (len < (sizeof(at_command) - 1)) { + strcpy(at_command, s); + at_command[len++] = '\r'; + s = (const char *)at_command; + } +#endif + + /* the main string */ + while (cur < len) { + do { + written = write (s_fd, s + cur, len - cur); + } while (written < 0 && errno == EINTR); + + if (written < 0) { + return AT_ERROR_GENERIC; + } + + cur += written; + } + +#if 1 //Quectel send '\r' maybe fail via USB controller: Intel Corporation 7 Series/C210 Series Chipset Family USB xHCI Host Controller (rev 04) + if (s == (const char *)at_command) { + return 0; + } +#endif + + /* the \r */ + + do { + written = write (s_fd, "\r" , 1); + } while ((written < 0 && errno == EINTR) || (written == 0)); + + if (written < 0) { + return AT_ERROR_GENERIC; + } + + return 0; +} +static int writeCtrlZ (const char *s) +{ + size_t cur = 0; + size_t len = strlen(s); + ssize_t written; + + if (s_fd < 0 || s_readerClosed > 0) { + return AT_ERROR_CHANNEL_CLOSED; + } + + LOGD("AT> %s^Z", s); + + AT_DUMP( ">* ", s, strlen(s) ); + + /* the main string */ + while (cur < len) { + do { + written = write (s_fd, s + cur, len - cur); + } while (written < 0 && errno == EINTR); + + if (written < 0) { + return AT_ERROR_GENERIC; + } + + cur += written; + } + + /* the ^Z */ + + do { + written = write (s_fd, "\032" , 1); + } while ((written < 0 && errno == EINTR) || (written == 0)); + + if (written < 0) { + return AT_ERROR_GENERIC; + } + + return 0; +} + +static int writeraw (const char *s, size_t len) { + size_t cur = 0; + ssize_t written; + + if (s_fd < 0 || s_readerClosed > 0) { + return AT_ERROR_CHANNEL_CLOSED; + } + + /* the main string */ + while (cur < len) { + struct pollfd pollfds[1] = {{s_fd, POLLOUT, 0}}; + int ret; + + ret = poll(pollfds, 1, -1); + if (ret <= 0) + break; + + do { + written = write (s_fd, s + cur, len - cur); + } while (written < 0 && errno == EINTR); + + if (written < 0) { + return AT_ERROR_GENERIC; + } + + cur += written; + } + + if (written < 0) { + return AT_ERROR_GENERIC; + } + + return cur; +} + +static void clearPendingCommand() +{ + if (sp_response != NULL) { + at_response_free(sp_response); + } + + sp_response = NULL; + s_responsePrefix = NULL; + s_smsPDU = NULL; +} + + +/** + * Starts AT handler on stream "fd' + * returns 0 on success, -1 on error + */ +int at_open(int fd, ATUnsolHandler h, int proxy) +{ + int ret; + pthread_attr_t attr; + + s_fd = fd; + s_unsolHandler = h; + s_readerClosed = 0; + s_atc_proxy = proxy; + + s_responsePrefix = NULL; + s_smsPDU = NULL; + sp_response = NULL; + + pthread_attr_init (&attr); + pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); + + ret = pthread_create(&s_tid_reader, &attr, readerLoop, NULL); + + if (ret < 0) { + LOGE("readerLoop create fail!"); + perror ("pthread_create\n"); + return -1; + } + + return 0; +} + +/* FIXME is it ok to call this from the reader and the command thread? */ +void at_close() +{ + dbg_time("at_close"); + if (s_fd >= 0) { + close(s_fd); + } + s_fd = -1; + + pthread_mutex_lock(&cm_command_mutex); + + s_readerClosed = 1; + + pthread_cond_signal(&cm_command_cond); + + pthread_mutex_unlock(&cm_command_mutex); + + /* the reader thread should eventually die */ +} + +static ATResponse * at_response_new() +{ + return (ATResponse *) calloc(1, sizeof(ATResponse)); +} + +void at_response_free(ATResponse *p_response) +{ + ATLine *p_line; + + if (p_response == NULL) return; + + p_line = p_response->p_intermediates; + + while (p_line != NULL) { + ATLine *p_toFree; + + p_toFree = p_line; + p_line = p_line->p_next; + + free(p_toFree->line); + free(p_toFree); + } + + free (p_response->finalResponse); + free (p_response); +} + +/** + * The line reader places the intermediate responses in reverse order + * here we flip them back + */ +static void reverseIntermediates(ATResponse *p_response) +{ + ATLine *pcur,*pnext; + + pcur = p_response->p_intermediates; + p_response->p_intermediates = NULL; + + while (pcur != NULL) { + pnext = pcur->p_next; + pcur->p_next = p_response->p_intermediates; + p_response->p_intermediates = pcur; + pcur = pnext; + } +} + +/** + * Internal send_command implementation + * Doesn't lock or call the timeout callback + * + * timeoutMsec == 0 means infinite timeout + */ +static int at_send_command_full_nolock (const char *command, ATCommandType type, + const char *responsePrefix, const char *smspdu, + long long timeoutMsec, ATResponse **pp_outResponse) +{ + int err = 0; + + if (!timeoutMsec) + timeoutMsec = 15000; + + if(sp_response != NULL) { + err = AT_ERROR_COMMAND_PENDING; + goto error; + } + + if (command != NULL) + err = writeline (command); + + if (err < 0) { + printf("%s errno: %d (%s)\n", __func__, errno, strerror(errno)); + goto error; + } + + s_type = type; + s_responsePrefix = responsePrefix; + s_smsPDU = smspdu; + sp_response = at_response_new(); + + while (sp_response->finalResponse == NULL && s_readerClosed == 0) { + err = pthread_cond_timeout_np(&cm_command_cond, &cm_command_mutex, timeoutMsec); + + if (err == ETIMEDOUT) { + err = AT_ERROR_TIMEOUT; + goto error; + } + } + + if (pp_outResponse == NULL) { + at_response_free(sp_response); + } else { + /* line reader stores intermediate responses in reverse order */ + reverseIntermediates(sp_response); + *pp_outResponse = sp_response; + } + + sp_response = NULL; + + if(s_readerClosed > 0) { + err = AT_ERROR_CHANNEL_CLOSED; + goto error; + } + + err = 0; +error: + clearPendingCommand(); + + return err; +} + +/** + * Internal send_command implementation + * + * timeoutMsec == 0 means infinite timeout + */ +static int at_send_command_full (const char *command, ATCommandType type, + const char *responsePrefix, const char *smspdu, + long long timeoutMsec, ATResponse **pp_outResponse) +{ + int err; + + if (0 != pthread_equal(s_tid_reader, pthread_self())) { + /* cannot be called from reader thread */ + return AT_ERROR_INVALID_THREAD; + } + + pthread_mutex_lock(&cm_command_mutex); + + err = at_send_command_full_nolock(command, type, + responsePrefix, smspdu, + timeoutMsec, pp_outResponse); + + pthread_mutex_unlock(&cm_command_mutex); + + if (err == AT_ERROR_TIMEOUT && s_onTimeout != NULL) { + s_onTimeout(); + } + + return err; +} + + +/** + * Issue a single normal AT command with no intermediate response expected + * + * "command" should not include \r + * pp_outResponse can be NULL + * + * if non-NULL, the resulting ATResponse * must be eventually freed with + * at_response_free + */ +int at_send_command (const char *command, ATResponse **pp_outResponse) +{ + int err; + + err = at_send_command_full (command, NO_RESULT, NULL, + NULL, 0, pp_outResponse); + + return err; +} + + +int at_send_command_singleline (const char *command, + const char *responsePrefix, + ATResponse **pp_outResponse) +{ + int err; + + err = at_send_command_full (command, SINGLELINE, responsePrefix, + NULL, 0, pp_outResponse); + + if (err == 0 && pp_outResponse != NULL + && (*pp_outResponse)->success > 0 + && (*pp_outResponse)->p_intermediates == NULL + ) { + /* successful command must have an intermediate response */ + at_response_free(*pp_outResponse); + *pp_outResponse = NULL; + return AT_ERROR_INVALID_RESPONSE; + } + + return err; +} + + +int at_send_command_numeric (const char *command, + ATResponse **pp_outResponse) +{ + int err; + + err = at_send_command_full (command, NUMERIC, NULL, + NULL, 0, pp_outResponse); + + if (err == 0 && pp_outResponse != NULL + && (*pp_outResponse)->success > 0 + && (*pp_outResponse)->p_intermediates == NULL + ) { + /* successful command must have an intermediate response */ + at_response_free(*pp_outResponse); + *pp_outResponse = NULL; + return AT_ERROR_INVALID_RESPONSE; + } + + return err; +} + + +int at_send_command_sms (const char *command, + const char *pdu, + const char *responsePrefix, + ATResponse **pp_outResponse) +{ + int err; + + err = at_send_command_full (command, SINGLELINE, responsePrefix, + pdu, 0, pp_outResponse); + + if (err == 0 && pp_outResponse != NULL + && (*pp_outResponse)->success > 0 + && (*pp_outResponse)->p_intermediates == NULL + ) { + /* successful command must have an intermediate response */ + at_response_free(*pp_outResponse); + *pp_outResponse = NULL; + return AT_ERROR_INVALID_RESPONSE; + } + + return err; +} + +int at_send_command_multiline (const char *command, + const char *responsePrefix, + ATResponse **pp_outResponse) +{ + int err; + + err = at_send_command_full (command, MULTILINE, responsePrefix, + NULL, 0, pp_outResponse); + + return err; +} + +int at_send_command_raw (const char *command, + const char *raw_data, unsigned int raw_len, + const char *responsePrefix, + ATResponse **pp_outResponse) +{ + int err; + + s_raw_data = raw_data; + s_raw_len = raw_len; + err = at_send_command_full (command, SINGLELINE, responsePrefix, + NULL, 0, pp_outResponse); + + return err; +} + +/** + * Periodically issue an AT command and wait for a response. + * Used to ensure channel has start up and is active + */ + +int at_handshake() +{ + int i; + int err = 0; + + if (0 != pthread_equal(s_tid_reader, pthread_self())) { + /* cannot be called from reader thread */ + return AT_ERROR_INVALID_THREAD; + } + + pthread_mutex_lock(&cm_command_mutex); + + for (i = 0 ; i < HANDSHAKE_RETRY_COUNT ; i++) { + /* some stacks start with verbose off */ + err = at_send_command_full_nolock ("ATE0Q0V1", NO_RESULT, + NULL, NULL, HANDSHAKE_TIMEOUT_MSEC, NULL); + + if (err == 0) { + break; + } + } + + pthread_mutex_unlock(&cm_command_mutex); + + if (err == 0) { + /* pause for a bit to let the input buffer drain any unmatched OK's + (they will appear as extraneous unsolicited responses) */ + + sleepMsec(HANDSHAKE_TIMEOUT_MSEC); + } + + return err; +} + +AT_CME_Error at_get_cme_error(const ATResponse *p_response) +{ + int ret; + int err; + char *p_cur; + + if (p_response == NULL) + return CME_ERROR_NON_CME; + + if (p_response->success > 0) { + return CME_SUCCESS; + } + + if (p_response->finalResponse == NULL + || !strStartsWith(p_response->finalResponse, "+CME ERROR:") + ) { + return CME_ERROR_NON_CME; + } + + p_cur = p_response->finalResponse; + err = at_tok_start(&p_cur); + + if (err < 0) { + return CME_ERROR_NON_CME; + } + + err = at_tok_nextint(&p_cur, &ret); + + if (err < 0) { + return CME_ERROR_NON_CME; + } + + return (AT_CME_Error) ret; +} + +/** This callback is invoked on the command thread */ +void at_set_on_timeout(void (*onTimeout)(void)) +{ + s_onTimeout = onTimeout; +} + +/** + * This callback is invoked on the reader thread (like ATUnsolHandler) + * when the input stream closes before you call at_close + * (not when you call at_close()) + * You should still call at_close() + */ +void at_set_on_reader_closed(void (*onClose)(void)) +{ + s_onReaderClosed = onClose; +} diff --git a/package/wwan/driver/quectel_cm_5G/src/atchannel.h b/package/wwan/driver/quectel_cm_5G/src/atchannel.h new file mode 100644 index 000000000..cce28b1df --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/atchannel.h @@ -0,0 +1,152 @@ +/* //device/system/reference-ril/atchannel.h +** +** Copyright 2006, The Android Open Source Project +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +#ifndef ATCHANNEL_H +#define ATCHANNEL_H 1 + +#ifdef __cplusplus +extern "C" { +#endif + +/* define AT_DEBUG to send AT traffic to /tmp/radio-at.log" */ +#define AT_DEBUG 0 + +#if AT_DEBUG +extern void AT_DUMP(const char* prefix, const char* buff, int len); +#else +#define AT_DUMP(prefix,buff,len) do{}while(0) +#endif + +#define AT_ERROR_GENERIC -1 +#define AT_ERROR_COMMAND_PENDING -2 +#define AT_ERROR_CHANNEL_CLOSED -3 +#define AT_ERROR_TIMEOUT -4 +#define AT_ERROR_INVALID_THREAD -5 /* AT commands may not be issued from + reader thread (or unsolicited response + callback */ +#define AT_ERROR_INVALID_RESPONSE -6 /* eg an at_send_command_singleline that + did not get back an intermediate + response */ + + +typedef enum { + NO_RESULT, /* no intermediate response expected */ + NUMERIC, /* a single intermediate response starting with a 0-9 */ + SINGLELINE, /* a single intermediate response starting with a prefix */ + MULTILINE /* multiple line intermediate response + starting with a prefix */ +} ATCommandType; + +/** a singly-lined list of intermediate responses */ +typedef struct ATLine { + struct ATLine *p_next; + char *line; +} ATLine; + +/** Free this with at_response_free() */ +typedef struct { + int success; /* true if final response indicates + success (eg "OK") */ + char *finalResponse; /* eg OK, ERROR */ + ATLine *p_intermediates; /* any intermediate responses */ +} ATResponse; + +/** + * a user-provided unsolicited response handler function + * this will be called from the reader thread, so do not block + * "s" is the line, and "sms_pdu" is either NULL or the PDU response + * for multi-line TS 27.005 SMS PDU responses (eg +CMT:) + */ +typedef void (*ATUnsolHandler)(const char *s, const char *sms_pdu); + +int at_open(int fd, ATUnsolHandler h, int proxy); +void at_close(); + +/* This callback is invoked on the command thread. + You should reset or handshake here to avoid getting out of sync */ +void at_set_on_timeout(void (*onTimeout)(void)); +/* This callback is invoked on the reader thread (like ATUnsolHandler) + when the input stream closes before you call at_close + (not when you call at_close()) + You should still call at_close() + It may also be invoked immediately from the current thread if the read + channel is already closed */ +void at_set_on_reader_closed(void (*onClose)(void)); + +int at_send_command_singleline (const char *command, + const char *responsePrefix, + ATResponse **pp_outResponse); + +int at_send_command_numeric (const char *command, + ATResponse **pp_outResponse); + +int at_send_command_multiline (const char *command, + const char *responsePrefix, + ATResponse **pp_outResponse); + +int at_send_command_raw (const char *command, + const char *raw_data, unsigned int raw_len, + const char *responsePrefix, + ATResponse **pp_outResponse); + +int at_handshake(); + +int at_send_command (const char *command, ATResponse **pp_outResponse); + +int at_send_command_sms (const char *command, const char *pdu, + const char *responsePrefix, + ATResponse **pp_outResponse); + +void at_response_free(ATResponse *p_response); + +int strStartsWith(const char *line, const char *prefix); + +typedef enum { + CME_ERROR_NON_CME = -1, + CME_SUCCESS = 0, + + CME_OPERATION_NOT_ALLOWED = 3, + CME_OPERATION_NOT_SUPPORTED = 4, + CME_PH_SIM_PIN= 5, + CME_PH_FSIM_PIN = 6, + CME_PH_FSIM_PUK = 7, + CME_SIM_NOT_INSERTED =10, + CME_SIM_PIN_REQUIRED = 11, + CME_SIM_PUK_REQUIRED = 12, + CME_FAILURE = 13, + CME_SIM_BUSY = 14, + CME_SIM_WRONG = 15, + CME_INCORRECT_PASSWORD = 16, + CME_SIM_PIN2_REQUIRED = 17, + CME_SIM_PUK2_REQUIRED = 18, + CME_MEMORY_FULL = 20, + CME_INVALID_INDEX = 21, + CME_NOT_FOUND = 22, + CME_MEMORY_FAILURE = 23, + CME_STRING_TO_LONG = 24, + CME_INVALID_CHAR = 25, + CME_DIALSTR_TO_LONG = 26, + CME_INVALID_DIALCHAR = 27, +} AT_CME_Error; + +AT_CME_Error at_get_cme_error(const ATResponse *p_response); + +#ifdef __cplusplus +} +#endif + +#endif /*ATCHANNEL_H*/ diff --git a/package/wwan/driver/quectel_cm_5G/src/configure.ac b/package/wwan/driver/quectel_cm_5G/src/configure.ac new file mode 100644 index 000000000..f4c60eac1 --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/configure.ac @@ -0,0 +1,48 @@ +# -*- Autoconf -*- +# Process this file with autoconf to produce a configure script. + +AC_PREREQ([2.61]) +AC_INIT([quectel-CM], [1.0], [fae-support@quectel.com]) +AC_CONFIG_HEADERS([config.h]) + +# Checks for programs. +AC_PROG_CC + +# Checks for libraries. + +# Checks for header files. + +# Checks for typedefs, structures, and compiler characteristics. +AC_ARG_WITH(sanitized-headers, + AS_HELP_STRING([--with-sanitized-headers=DIR], + [Specify the location of the sanitized Linux headers]), + [CPPFLAGS="$CPPFLAGS -idirafter $withval"]) + +AC_ARG_WITH([qrtr], + AC_HELP_STRING([--with-qrtr], + [enable qrtr, building which use qrtr])) + +if (test "x${with_qrtr}" = "xyes"); then + #AC_DEFINE(ENABLE_USEQTRT, 1, [Define if uses qrtr]) + AC_CHECK_HEADERS([linux/qrtr.h linux/rmnet_data.h]) +fi + +AM_CONDITIONAL(USE_QRTR, test "x${with_qrtr}" = "xyes") + +AC_ARG_WITH([msm-ipc], + AC_HELP_STRING([--with-msm-ipc], + [enable msm-ipc, building which use qrtr])) + +if (test "x${with_msm_ipc}" = "xyes"); then + #AC_DEFINE(ENABLE_USEQTRT, 1, [Define if uses qrtr]) + AC_CHECK_HEADERS([linux/msm_ipc.h linux/rmnet_data.h]) +fi + +AM_CONDITIONAL(USE_MSM_IPC, test "x${with_msm_ipc}" = "xyes") + +# Checks for library functions. + +# Does not strictly follow GNU Coding standards +AM_INIT_AUTOMAKE([foreign subdir-objects]) +AC_CONFIG_FILES([Makefile]) +AC_OUTPUT diff --git a/package/wwan/driver/quectel_cm_5G/src/default.script b/package/wwan/driver/quectel_cm_5G/src/default.script new file mode 100644 index 000000000..26b95c1b9 --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/default.script @@ -0,0 +1,63 @@ +#!/bin/sh +# Busybox udhcpc dispatcher script. Copyright (C) 2009 by Axel Beckert. +# +# Based on the busybox example scripts and the old udhcp source +# package default.* scripts. + +RESOLV_CONF="/etc/resolv.conf" + +case $1 in + bound|renew) + [ -n "$broadcast" ] && BROADCAST="broadcast $broadcast" + [ -n "$subnet" ] && NETMASK="netmask $subnet" + + /sbin/ifconfig $interface $ip $BROADCAST $NETMASK + + if [ -n "$router" ]; then + echo "$0: Resetting default routes" + while /sbin/route del default gw 0.0.0.0 dev $interface; do :; done + + metric=0 + for i in $router; do + /sbin/route add default gw $i dev $interface metric $metric + metric=$(($metric + 1)) + done + fi + + # Update resolver configuration file + R="" + [ -n "$domain" ] && R="domain $domain +" + for i in $dns; do + echo "$0: Adding DNS $i" + R="${R}nameserver $i +" + done + + if [ -x /sbin/resolvconf ]; then + echo -n "$R" | resolvconf -a "${interface}.udhcpc" + else + echo -n "$R" > "$RESOLV_CONF" + fi + ;; + + deconfig) + if [ -x /sbin/resolvconf ]; then + resolvconf -d "${interface}.udhcpc" + fi + /sbin/ifconfig $interface 0.0.0.0 + ;; + + leasefail) + echo "$0: Lease failed: $message" + ;; + + nak) + echo "$0: Received a NAK: $message" + ;; + + *) + echo "$0: Unknown udhcpc command: $1"; + exit 1; + ;; +esac diff --git a/package/wwan/driver/quectel_cm_5G/src/default.script_ip b/package/wwan/driver/quectel_cm_5G/src/default.script_ip new file mode 100644 index 000000000..24f8e59c2 --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/default.script_ip @@ -0,0 +1,61 @@ +#!/bin/sh +# Busybox udhcpc dispatcher script. Copyright (C) 2009 by Axel Beckert. +# +# Based on the busybox example scripts and the old udhcp source +# package default.* scripts. + +RESOLV_CONF="/etc/resolv.conf" +IPCMD=`which ip` + +case $1 in + bound|renew) + $IPCMD address add broadcast $broadcast $ip/$subnet dev $interface + + if [ -n "$router" ]; then + echo "$0: Resetting default routes" + while $IPCMD route del default dev $interface; do :; done + + metric=0 + for i in $router; do + $IPCMD route add default dev $interface via $router metric $metric + metric=$(($metric + 1)) + done + fi + + # Update resolver configuration file + R="" + [ -n "$domain" ] && R="domain $domain +" + for i in $dns; do + echo "$0: Adding DNS $i" + R="${R}nameserver $i +" + done + + if [ -x /sbin/resolvconf ]; then + echo -n "$R" | resolvconf -a "${interface}.udhcpc" + else + echo -n "$R" > "$RESOLV_CONF" + fi + ;; + + deconfig) + if [ -x /sbin/resolvconf ]; then + resolvconf -d "${interface}.udhcpc" + fi + $IPCMD address flush dev $interface + ;; + + leasefail) + echo "$0: Lease failed: $message" + ;; + + nak) + echo "$0: Received a NAK: $message" + ;; + + *) + echo "$0: Unknown udhcpc command: $1"; + exit 1; + ;; +esac diff --git a/package/wwan/driver/quectel_cm_5G/src/device.c b/package/wwan/driver/quectel_cm_5G/src/device.c new file mode 100644 index 000000000..9371f70b2 --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/device.c @@ -0,0 +1,746 @@ +/****************************************************************************** + @file device.c + @brief QMI device dirver. + + DESCRIPTION + Connectivity Management Tool for USB network adapter of Quectel wireless cellular modules. + + INITIALIZATION AND SEQUENCING REQUIREMENTS + None. + + --------------------------------------------------------------------------- + Copyright (c) 2016 - 2020 Quectel Wireless Solution, Co., Ltd. All Rights Reserved. + Quectel Wireless Solution Proprietary and Confidential. + --------------------------------------------------------------------------- +******************************************************************************/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "QMIThread.h" +#include "ethtool-copy.h" + +#define USB_CLASS_VENDOR_SPEC 0xff +#define USB_CLASS_COMM 2 +#define USB_CDC_SUBCLASS_ACM 0x02 +#define USB_CDC_SUBCLASS_ETHERNET 0x06 +#define USB_CDC_SUBCLASS_NCM 0x0d +#define USB_CDC_SUBCLASS_MBIM 0x0e +#define USB_CLASS_WIRELESS_CONTROLLER 0xe0 + +#define CM_MAX_PATHLEN 256 + +#define CM_INVALID_VAL (~((int)0)) +/* get first line from file 'fname' + * And convert the content into a hex number, then return this number */ +static int file_get_value(const char *fname, int base) +{ + FILE *fp = NULL; + long num; + char buff[32 + 1] = {'\0'}; + char *endptr = NULL; + + fp = fopen(fname, "r"); + if (!fp) goto error; + if (fgets(buff, sizeof(buff), fp) == NULL) + goto error; + fclose(fp); + + num = (int)strtol(buff, &endptr, base); + if (errno == ERANGE && (num == LONG_MAX || num == LONG_MIN)) + goto error; + /* if there is no digit in buff */ + if (endptr == buff) + goto error; + + if (debug_qmi) + dbg_time("(%s) = %lx", fname, num); + return (int)num; + +error: + if (fp) fclose(fp); + return CM_INVALID_VAL; +} + +/* + * This function will search the directory 'dirname' and return the first child. + * '.' and '..' is ignored by default + */ +static int dir_get_child(const char *dirname, char *buff, unsigned bufsize, const char *prefix) +{ + struct dirent *entptr = NULL; + DIR *dirptr; + + buff[0] = 0; + + dirptr = opendir(dirname); + if (!dirptr) + return -1; + + while ((entptr = readdir(dirptr))) { + if (entptr->d_name[0] == '.') + continue; + if (prefix && strlen(prefix) && strncmp(entptr->d_name, prefix, strlen(prefix))) + continue; + snprintf(buff, bufsize, "%.31s", entptr->d_name); + break; + } + closedir(dirptr); + + return 0; +} + +static int conf_get_val(const char *fname, const char *key) +{ + char buff[128] = {'\0'}; + FILE *fp = fopen(fname, "r"); + if (!fp) + return CM_INVALID_VAL; + + while (fgets(buff, sizeof(buff)-1, fp)) { + char prefix[128] = {'\0'}; + char tail[128] = {'\0'}; + /* To eliminate cppcheck warnning: Assume string length is no more than 15 */ + sscanf(buff, "%15[^=]=%15s", prefix, tail); + if (!strncasecmp(prefix, key, strlen(key))) { + fclose(fp); + return atoi(tail); + } + } + + fclose(fp); + return CM_INVALID_VAL; +} + +static void query_usb_device_info(char *path, struct usb_device_info *p) { + size_t offset = strlen(path); + + memset(p, 0, sizeof(*p)); + + path[offset] = '\0'; + strcat(path, "/idVendor"); + p->idVendor = file_get_value(path, 16); + + if (p->idVendor == CM_INVALID_VAL) + return; + + path[offset] = '\0'; + strcat(path, "/idProduct"); + p->idProduct = file_get_value(path, 16); + + path[offset] = '\0'; + strcat(path, "/busnum"); + p->busnum = file_get_value(path, 10); + + path[offset] = '\0'; + strcat(path, "/devnum"); + p->devnum = file_get_value(path, 10); + + path[offset] = '\0'; + strcat(path, "/bNumInterfaces"); + p->bNumInterfaces = file_get_value(path, 10); + + path[offset] = '\0'; +} + +static void query_usb_interface_info(char *path, struct usb_interface_info *p) { + char driver[128]; + size_t offset = strlen(path); + int n; + + memset(p, 0, sizeof(*p)); + + path[offset] = '\0'; + strcat(path, "/bNumEndpoints"); + p->bInterfaceClass = file_get_value(path, 16); + + path[offset] = '\0'; + strcat(path, "/bInterfaceClass"); + p->bInterfaceClass = file_get_value(path, 16); + + path[offset] = '\0'; + strcat(path, "/bInterfaceSubClass"); + p->bInterfaceSubClass = file_get_value(path, 16); + + path[offset] = '\0'; + strcat(path, "/bInterfaceProtocol"); + p->bInterfaceProtocol = file_get_value(path, 16); + + path[offset] = '\0'; + strcat(path, "/driver"); + n = readlink(path, driver, sizeof(driver)); + if (n > 0) { + driver[n] = 0; + if (debug_qmi) dbg_time("driver -> %s", driver); + n = strlen(driver); + while (n > 0) { + if (driver[n] == '/') + break; + n--; + } + strncpy(p->driver, &driver[n+1], sizeof(p->driver) - 1); + } + + path[offset] = '\0'; +} + +static int detect_path_cdc_wdm_or_qcqmi(char *path, char *devname, size_t bufsize) +{ + size_t offset = strlen(path); + char tmp[32]; + + devname[0] = 0; + + if (access(path, R_OK)) + return -1; + + path[offset] = '\0'; + strcat(path, "/GobiQMI"); + if (!access(path, R_OK)) + goto step_1; + + path[offset] = '\0'; + strcat(path, "/usbmisc"); + if (!access(path, R_OK)) + goto step_1; + + path[offset] = '\0'; + strcat(path, "/usb"); + if (!access(path, R_OK)) + goto step_1; + + return -1; + +step_1: + /* get device(qcqmiX|cdc-wdmX) */ + if (debug_qmi) dbg_time("%s", path); + dir_get_child(path, tmp, sizeof(tmp), NULL); + if (tmp[0] == '\0') + return -1; + + /* There is a chance that, no device(qcqmiX|cdc-wdmX) is generated. We should warn user about that! */ + snprintf(devname, bufsize, "/dev/%s", tmp); + if (access(devname, R_OK | F_OK) && errno == ENOENT) + { + int major, minor; + + dbg_time("access %s failed, errno: %d (%s)", devname, errno, strerror(errno)); + strcat(path, "/"); + strcat(path, tmp); + strcat(path, "/uevent"); + major = conf_get_val(path, "MAJOR"); + minor = conf_get_val(path, "MINOR"); + + if(major == CM_INVALID_VAL || minor == CM_INVALID_VAL) + dbg_time("get major and minor failed"); + else if (mknod(devname, S_IFCHR|0666, (((major & 0xfff) << 8) | (minor & 0xff) | ((minor & 0xfff00) << 12)))) + dbg_time("please mknod %s c %d %d", devname, major, minor); + } + + return 0; +} + +/* To detect the device info of the modem. + * return: + * FALSE -> fail + * TRUE -> ok + */ +BOOL qmidevice_detect(char *qmichannel, char *usbnet_adapter, unsigned bufsize, PROFILE_T *profile) { + struct dirent* ent = NULL; + DIR *pDir; + const char *rootdir = "/sys/bus/usb/devices"; + struct { + char path[255*2]; + } *pl; + pl = (typeof(pl)) malloc(sizeof(*pl)); + memset(pl, 0x00, sizeof(*pl)); + + pDir = opendir(rootdir); + if (!pDir) { + dbg_time("opendir %s failed: %s", rootdir, strerror(errno)); + goto error; + } + + while ((ent = readdir(pDir)) != NULL) { + char netcard[32+1] = {'\0'}; + char devname[32+5] = {'\0'}; //+strlen("/dev/") + int netIntf; + int driver_type; + + if (ent->d_name[0] == 'u') + continue; + + snprintf(pl->path, sizeof(pl->path), "%s/%s", rootdir, ent->d_name); + query_usb_device_info(pl->path, &profile->usb_dev); + if (profile->usb_dev.idVendor == CM_INVALID_VAL) + continue; + + if (profile->usb_dev.idVendor == 0x2c7c || profile->usb_dev.idVendor == 0x05c6) { + dbg_time("Find %s/%s idVendor=0x%x idProduct=0x%x, bus=0x%03x, dev=0x%03x", + rootdir, ent->d_name, profile->usb_dev.idVendor, profile->usb_dev.idProduct, + profile->usb_dev.busnum, profile->usb_dev.devnum); + } + + /* get network interface */ + /* NOTICE: there is a case that, bNumberInterface=6, but the net interface is 8 */ + /* toolchain-mips_24kc_gcc-5.4.0_musl donot support GLOB_BRACE */ + /* RG500U's MBIM is at inteface 0 */ + for (netIntf = 0; netIntf < (profile->usb_dev.bNumInterfaces + 8); netIntf++) { + snprintf(pl->path, sizeof(pl->path), "%s/%s:1.%d/net", rootdir, ent->d_name, netIntf); + dir_get_child(pl->path, netcard, sizeof(netcard), NULL); + if (netcard[0]) + break; + } + + if (netcard[0] == '\0') { //for centos 2.6.x + const char *n= "usb0"; + const char *c = "qcqmi0"; + + snprintf(pl->path, sizeof(pl->path), "%s/%s:1.4/net:%s", rootdir, ent->d_name, n); + if (!access(pl->path, F_OK)) { + snprintf(pl->path, sizeof(pl->path), "%s/%s:1.4/GobiQMI:%s", rootdir, ent->d_name, c); + if (!access(pl->path, F_OK)) { + snprintf(qmichannel, bufsize, "/dev/%s", c); + snprintf(usbnet_adapter, bufsize, "%s", n); + snprintf(pl->path, sizeof(pl->path), "%s/%s:1.4", rootdir, ent->d_name); + query_usb_interface_info(pl->path, &profile->usb_intf); + break; + } + } + } + + if (netcard[0] == '\0') + continue; + + /* not '-i iface' */ + if (usbnet_adapter[0] && strcmp(usbnet_adapter, netcard)) + continue; + + snprintf(pl->path, sizeof(pl->path), "%s/%s:1.%d", rootdir, ent->d_name, netIntf); + query_usb_interface_info(pl->path, &profile->usb_intf); + driver_type = get_driver_type(profile); + + if (driver_type == SOFTWARE_QMI || driver_type == SOFTWARE_MBIM) { + detect_path_cdc_wdm_or_qcqmi(pl->path, devname, sizeof(devname)); + } + else if (driver_type == SOFTWARE_ECM_RNDIS_NCM) + { + int atIntf = -1; + + if (profile->usb_dev.idVendor == 0x2c7c) { //Quectel + switch (profile->usb_dev.idProduct) { //EC200U + case 0x0901: //EC200U + case 0x8101: //RG801H + atIntf = 2; + break; + case 0x0900: //RG500U + atIntf = 4; + break; + case 0x6026: //EC200T + case 0x6005: //EC200A + case 0x6002: //EC200S + case 0x6001: //EC100Y + atIntf = 3; + break; + default: + dbg_time("unknow at interface for USB idProduct:%04x\n", profile->usb_dev.idProduct); + break; + } + } + + if (atIntf != -1) { + snprintf(pl->path, sizeof(pl->path), "%s/%s:1.%d", rootdir, ent->d_name, atIntf); + dir_get_child(pl->path, devname, sizeof(devname), "tty"); + if (devname[0] && !strcmp(devname, "tty")) { + snprintf(pl->path, sizeof(pl->path), "%s/%s:1.%d/tty", rootdir, ent->d_name, atIntf); + dir_get_child(pl->path, devname, sizeof(devname), "tty"); + } + } + } + + if (netcard[0] && devname[0]) { + if (devname[0] == '/') + snprintf(qmichannel, bufsize, "%s", devname); + else + snprintf(qmichannel, bufsize, "/dev/%s", devname); + snprintf(usbnet_adapter, bufsize, "%s", netcard); + dbg_time("Auto find qmichannel = %s", qmichannel); + dbg_time("Auto find usbnet_adapter = %s", usbnet_adapter); + break; + } + } + closedir(pDir); + + if (qmichannel[0] == '\0' || usbnet_adapter[0] == '\0') { + dbg_time("network interface '%s' or qmidev '%s' is not exist", usbnet_adapter, qmichannel); + goto error; + } + free(pl); + return TRUE; +error: + free(pl); + return FALSE; +} + +int mhidevice_detect(char *qmichannel, char *usbnet_adapter, PROFILE_T *profile) { + struct dirent* ent = NULL; + DIR *pDir; + const char *rootdir_mhi[] = {"/sys/bus/mhi_q/devices", "/sys/bus/mhi/devices", NULL}; + int i = 0; + char path[256]; + int find = 0; + + while (rootdir_mhi[i]) { + const char *rootdir = rootdir_mhi[i++]; + + pDir = opendir(rootdir); + if (!pDir) { + if (errno != ENOENT) + dbg_time("opendir %s failed: %s", rootdir, strerror(errno)); + continue; + } + + while ((ent = readdir(pDir)) != NULL) { + char netcard[32] = {'\0'}; + char devname[32] = {'\0'}; + int software_interface = SOFTWARE_QMI; + char *pNode = NULL; + + pNode = strstr(ent->d_name, "_IP_HW0"); //0306_00.01.00_IP_HW0 + if (!pNode) + continue; + + snprintf(path, sizeof(path), "%s/%.32s/net", rootdir, ent->d_name); + dir_get_child(path, netcard, sizeof(netcard), NULL); + if (!netcard[0]) + continue; + + if (usbnet_adapter[0] && strcmp(netcard, usbnet_adapter)) //not '-i x' + continue; + + if (!strcmp(rootdir, "/sys/bus/mhi/devices")) { + snprintf(path, sizeof(path), "%s/%.13s_IPCR", rootdir, ent->d_name); // 13 is sizeof(0306_00.01.00) + if (!access(path, F_OK)) { + /* we also need 'cat /dev/mhi_0306_00.01.00_pipe_14' to enable rmnet as like USB's DTR + or will get error 'requestSetEthMode QMUXResult = 0x1, QMUXError = 0x46' */ + sprintf(usbnet_adapter, "%s", netcard); + sprintf(qmichannel, "qrtr-%d", 3); // 3 is sdx modem's node id + profile->software_interface = SOFTWARE_QRTR; + find = 1; + break; + } + continue; + } + + snprintf(path, sizeof(path), "%s/%.13s_IPCR", rootdir, ent->d_name); + if (access(path, F_OK)) { + snprintf(path, sizeof(path), "%s/%.13s_QMI0", rootdir, ent->d_name); + if (access(path, F_OK)) { + snprintf(path, sizeof(path), "%s/%.13s_MBIM", rootdir, ent->d_name); + if (!access(path, F_OK)) + software_interface = SOFTWARE_MBIM; + } + } + if (access(path, F_OK)) + continue; + + strncat(path, "/mhi_uci_q", sizeof(path)-1); + dir_get_child(path, devname, sizeof(devname), NULL); + if (!devname[0]) + continue; + + sprintf(usbnet_adapter, "%s", netcard); + sprintf(qmichannel, "/dev/%s", devname); + profile->software_interface = software_interface; + find = 1; + break; + } + + closedir(pDir); + } + + return find; +} + +int atdevice_detect(char *atchannel, char *usbnet_adapter, PROFILE_T *profile) { + if (!access("/sys/class/net/sipa_dummy0", F_OK)) { + strcpy(usbnet_adapter, "sipa_dummy0"); + snprintf(profile->qmapnet_adapter, sizeof(profile->qmapnet_adapter), "%s%d", "pcie", profile->pdp - 1); + } + else { + dbg_time("atdevice_detect failed"); + goto error; + } + + if (!access("/dev/stty_nr31", F_OK)) { + strcpy(atchannel, "/dev/stty_nr31"); + profile->software_interface = SOFTWARE_ECM_RNDIS_NCM; + } + else { + goto error; + } + + return 1; +error: + return 0; +} + + +int get_driver_type(PROFILE_T *profile) +{ + /* QMI_WWAN */ + if (profile->usb_intf.bInterfaceClass == USB_CLASS_VENDOR_SPEC) { + return SOFTWARE_QMI; + } + else if (profile->usb_intf.bInterfaceClass == USB_CLASS_COMM) { + switch (profile->usb_intf.bInterfaceSubClass) { + case USB_CDC_SUBCLASS_MBIM: + return SOFTWARE_MBIM; + break; + case USB_CDC_SUBCLASS_ETHERNET: + case USB_CDC_SUBCLASS_NCM: + return SOFTWARE_ECM_RNDIS_NCM; + break; + default: + break; + } + } + else if (profile->usb_intf.bInterfaceClass == USB_CLASS_WIRELESS_CONTROLLER) { + if (profile->usb_intf.bInterfaceSubClass == 1 && profile->usb_intf.bInterfaceProtocol == 3) + return SOFTWARE_ECM_RNDIS_NCM; + } + + dbg_time("%s unknow bInterfaceClass=%d, bInterfaceSubClass=%d", __func__, + profile->usb_intf.bInterfaceClass, profile->usb_intf.bInterfaceSubClass); + return DRV_INVALID; +} + +struct usbfs_getdriver +{ + unsigned int interface; + char driver[255 + 1]; +}; + +struct usbfs_ioctl +{ + int ifno; /* interface 0..N ; negative numbers reserved */ + int ioctl_code; /* MUST encode size + direction of data so the + * macros in give correct values */ + void *data; /* param buffer (in, or out) */ +}; + +#define IOCTL_USBFS_DISCONNECT _IO('U', 22) +#define IOCTL_USBFS_CONNECT _IO('U', 23) + +int usbfs_is_kernel_driver_alive(int fd, int ifnum) +{ + struct usbfs_getdriver getdrv; + getdrv.interface = ifnum; + if (ioctl(fd, USBDEVFS_GETDRIVER, &getdrv) < 0) { + dbg_time("%s ioctl USBDEVFS_GETDRIVER failed, kernel driver may be inactive", __func__); + return 0; + } + dbg_time("%s find interface %d has match the driver %s", __func__, ifnum, getdrv.driver); + return 1; +} + +void usbfs_detach_kernel_driver(int fd, int ifnum) +{ + struct usbfs_ioctl operate; + operate.data = NULL; + operate.ifno = ifnum; + operate.ioctl_code = IOCTL_USBFS_DISCONNECT; + if (ioctl(fd, USBDEVFS_IOCTL, &operate) < 0) { + dbg_time("%s detach kernel driver failed", __func__); + } else { + dbg_time("%s detach kernel driver success", __func__); + } +} + +void usbfs_attach_kernel_driver(int fd, int ifnum) +{ + struct usbfs_ioctl operate; + operate.data = NULL; + operate.ifno = ifnum; + operate.ioctl_code = IOCTL_USBFS_CONNECT; + if (ioctl(fd, USBDEVFS_IOCTL, &operate) < 0) { + dbg_time("%s detach kernel driver failed", __func__); + } else { + dbg_time("%s detach kernel driver success", __func__); + } +} + +int reattach_driver(PROFILE_T *profile) +{ + int ifnum = 4; + int fd; + char devpath[128] = {'\0'}; + snprintf(devpath, sizeof(devpath), "/dev/bus/usb/%03d/%03d", profile->usb_dev.busnum, profile->usb_dev.devnum); + fd = open(devpath, O_RDWR | O_NOCTTY); + if (fd < 0) + { + dbg_time("%s fail to open %s", __func__, devpath); + return -1; + } + usbfs_detach_kernel_driver(fd, ifnum); + usbfs_attach_kernel_driver(fd, ifnum); + close(fd); + return 0; +} + +#define SIOCETHTOOL 0x8946 +int ql_get_netcard_driver_info(const char *devname) +{ + int fd = -1; + struct ethtool_drvinfo drvinfo; + struct ifreq ifr; /* ifreq suitable for ethtool ioctl */ + + memset(&ifr, 0, sizeof(ifr)); + strcpy(ifr.ifr_name, devname); + + fd = socket(AF_INET, SOCK_DGRAM, 0); + if (fd < 0) { + dbg_time("Cannot get control socket: errno(%d)(%s)", errno, strerror(errno)); + return -1; + } + + drvinfo.cmd = ETHTOOL_GDRVINFO; + ifr.ifr_data = (void *)&drvinfo; + + if (ioctl(fd, SIOCETHTOOL, &ifr) < 0) { + dbg_time("ioctl() error: errno(%d)(%s)", errno, strerror(errno)); + close(fd); + return -1; + } + + dbg_time("netcard driver = %s, driver version = %s", drvinfo.driver, drvinfo.version); + + close(fd); + + return 0; +} + +int ql_get_netcard_carrier_state(const char *devname) +{ + int fd = -1; + struct ethtool_value edata; + struct ifreq ifr; /* ifreq suitable for ethtool ioctl */ + + memset(&ifr, 0, sizeof(ifr)); + strcpy(ifr.ifr_name, devname); + + fd = socket(AF_INET, SOCK_DGRAM, 0); + if (fd < 0) { + dbg_time("Cannot get control socket: errno(%d)(%s)", errno, strerror(errno)); + return -1; + } + + edata.cmd = ETHTOOL_GLINK; + edata.data = 0; + ifr.ifr_data = (void *)&edata; + + if (ioctl(fd, SIOCETHTOOL, &ifr) < 0) { + dbg_time("ioctl('%s') error: errno(%d)(%s)", devname, errno, strerror(errno)); + return -1; + } + + if (!edata.data) + dbg_time("netcard carrier = %d", edata.data); + + close(fd); + + return edata.data; +} + +static void *catch_log(void *arg) +{ + PROFILE_T *profile = (PROFILE_T *)arg; + int nreads = 0; + char tbuff[256+32]; + char filter[32]; + size_t tsize = strlen(get_time()) + 1; + + snprintf(filter, sizeof(filter), ":%d:%03d:", profile->usb_dev.busnum, profile->usb_dev.devnum); + + while(1) { + nreads = read(profile->usbmon_fd, tbuff + tsize, sizeof(tbuff) - tsize - 1); + if (nreads <= 0) { + if (nreads == -1 && errno == EINTR) + continue; + break; + } + + tbuff[tsize+nreads] = '\0'; // printf("%s", buff); + + if (!strstr(tbuff+tsize, filter)) + continue; + + snprintf(tbuff, sizeof(tbuff), "%s", get_time()); + tbuff[tsize-1] = ' '; + + fwrite(tbuff, strlen(tbuff), 1, profile->usbmon_logfile_fp); + } + + return NULL; +} + +int ql_capture_usbmon_log(PROFILE_T *profile, const char *log_path) +{ + char usbmon_path[256]; + pthread_t pt; + pthread_attr_t attr; + + if (access("/sys/module/usbmon", F_OK)) { + dbg_time("usbmon is not load, please execute \"modprobe usbmon\" or \"insmod usbmon.ko\""); + return -1; + } + + if (access("/sys/kernel/debug/usb", F_OK)) { + dbg_time("debugfs is not mount, please execute \"mount -t debugfs none_debugs /sys/kernel/debug\""); + return -1; + } + + snprintf(usbmon_path, sizeof(usbmon_path), "/sys/kernel/debug/usb/usbmon/%du", profile->usb_dev.busnum); + profile->usbmon_fd = open(usbmon_path, O_RDONLY); + if (profile->usbmon_fd < 0) { + dbg_time("open %s error(%d) (%s)", usbmon_path, errno, strerror(errno)); + return -1; + } + + snprintf(usbmon_path, sizeof(usbmon_path), "cat /sys/kernel/debug/usb/devices >> %s", log_path); + if (system(usbmon_path) == -1) {}; + + profile->usbmon_logfile_fp = fopen(log_path, "wb"); + if (!profile->usbmon_logfile_fp) { + dbg_time("open %s error(%d) (%s)", log_path, errno, strerror(errno)); + close(profile->usbmon_fd); + profile->usbmon_fd = -1; + return -1; + } + + pthread_attr_init(&attr); + pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); + + pthread_create(&pt, &attr, catch_log, (void *)profile); + + return 0; +} + +void ql_stop_usbmon_log(PROFILE_T *profile) { + if (profile->usbmon_fd > 0) + close(profile->usbmon_fd); + if (profile->usbmon_logfile_fp) + fclose(profile->usbmon_logfile_fp); +} diff --git a/package/wwan/driver/quectel_cm_5G/src/ethtool-copy.h b/package/wwan/driver/quectel_cm_5G/src/ethtool-copy.h new file mode 100644 index 000000000..b5515c2ff --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/ethtool-copy.h @@ -0,0 +1,1100 @@ +/* + * ethtool.h: Defines for Linux ethtool. + * + * Copyright (C) 1998 David S. Miller (davem@redhat.com) + * Copyright 2001 Jeff Garzik + * Portions Copyright 2001 Sun Microsystems (thockin@sun.com) + * Portions Copyright 2002 Intel (eli.kupermann@intel.com, + * christopher.leech@intel.com, + * scott.feldman@intel.com) + * Portions Copyright (C) Sun Microsystems 2008 + */ + +#ifndef _LINUX_ETHTOOL_H +#define _LINUX_ETHTOOL_H + +#include +#include + +/* This should work for both 32 and 64 bit userland. */ +struct ethtool_cmd { + __u32 cmd; + __u32 supported; /* Features this interface supports */ + __u32 advertising; /* Features this interface advertises */ + __u16 speed; /* The forced speed (lower bits) in + * Mbps. Please use + * ethtool_cmd_speed()/_set() to + * access it */ + __u8 duplex; /* Duplex, half or full */ + __u8 port; /* Which connector port */ + __u8 phy_address; /* MDIO PHY address (PRTAD for clause 45). + * May be read-only or read-write + * depending on the driver. + */ + __u8 transceiver; /* Which transceiver to use */ + __u8 autoneg; /* Enable or disable autonegotiation */ + __u8 mdio_support; /* MDIO protocols supported. Read-only. + * Not set by all drivers. + */ + __u32 maxtxpkt; /* Tx pkts before generating tx int */ + __u32 maxrxpkt; /* Rx pkts before generating rx int */ + __u16 speed_hi; /* The forced speed (upper + * bits) in Mbps. Please use + * ethtool_cmd_speed()/_set() to + * access it */ + __u8 eth_tp_mdix; /* twisted pair MDI-X status */ + __u8 eth_tp_mdix_ctrl; /* twisted pair MDI-X control, when set, + * link should be renegotiated if necessary + */ + __u32 lp_advertising; /* Features the link partner advertises */ + __u32 reserved[2]; +}; + +static __inline__ void ethtool_cmd_speed_set(struct ethtool_cmd *ep, + __u32 speed) +{ + + ep->speed = (__u16)speed; + ep->speed_hi = (__u16)(speed >> 16); +} + +static __inline__ __u32 ethtool_cmd_speed(const struct ethtool_cmd *ep) +{ + return (ep->speed_hi << 16) | ep->speed; +} + +/* Device supports clause 22 register access to PHY or peripherals + * using the interface defined in . This should not be + * set if there are known to be no such peripherals present or if + * the driver only emulates clause 22 registers for compatibility. + */ +#define ETH_MDIO_SUPPORTS_C22 1 + +/* Device supports clause 45 register access to PHY or peripherals + * using the interface defined in and . + * This should not be set if there are known to be no such peripherals + * present. + */ +#define ETH_MDIO_SUPPORTS_C45 2 + +#define ETHTOOL_FWVERS_LEN 32 +#define ETHTOOL_BUSINFO_LEN 32 +/* these strings are set to whatever the driver author decides... */ +struct ethtool_drvinfo { + __u32 cmd; + char driver[32]; /* driver short name, "tulip", "eepro100" */ + char version[32]; /* driver version string */ + char fw_version[ETHTOOL_FWVERS_LEN]; /* firmware version string */ + char bus_info[ETHTOOL_BUSINFO_LEN]; /* Bus info for this IF. */ + /* For PCI devices, use pci_name(pci_dev). */ + char reserved1[32]; + char reserved2[12]; + /* + * Some struct members below are filled in + * using ops->get_sset_count(). Obtaining + * this info from ethtool_drvinfo is now + * deprecated; Use ETHTOOL_GSSET_INFO + * instead. + */ + __u32 n_priv_flags; /* number of flags valid in ETHTOOL_GPFLAGS */ + __u32 n_stats; /* number of u64's from ETHTOOL_GSTATS */ + __u32 testinfo_len; + __u32 eedump_len; /* Size of data from ETHTOOL_GEEPROM (bytes) */ + __u32 regdump_len; /* Size of data from ETHTOOL_GREGS (bytes) */ +}; + +#define SOPASS_MAX 6 +/* wake-on-lan settings */ +struct ethtool_wolinfo { + __u32 cmd; + __u32 supported; + __u32 wolopts; + __u8 sopass[SOPASS_MAX]; /* SecureOn(tm) password */ +}; + +/* for passing single values */ +struct ethtool_value { + __u32 cmd; + __u32 data; +}; + +/* for passing big chunks of data */ +struct ethtool_regs { + __u32 cmd; + __u32 version; /* driver-specific, indicates different chips/revs */ + __u32 len; /* bytes */ + __u8 data[0]; +}; + +/* for passing EEPROM chunks */ +struct ethtool_eeprom { + __u32 cmd; + __u32 magic; + __u32 offset; /* in bytes */ + __u32 len; /* in bytes */ + __u8 data[0]; +}; + +/** + * struct ethtool_eee - Energy Efficient Ethernet information + * @cmd: ETHTOOL_{G,S}EEE + * @supported: Mask of %SUPPORTED_* flags for the speed/duplex combinations + * for which there is EEE support. + * @advertised: Mask of %ADVERTISED_* flags for the speed/duplex combinations + * advertised as eee capable. + * @lp_advertised: Mask of %ADVERTISED_* flags for the speed/duplex + * combinations advertised by the link partner as eee capable. + * @eee_active: Result of the eee auto negotiation. + * @eee_enabled: EEE configured mode (enabled/disabled). + * @tx_lpi_enabled: Whether the interface should assert its tx lpi, given + * that eee was negotiated. + * @tx_lpi_timer: Time in microseconds the interface delays prior to asserting + * its tx lpi (after reaching 'idle' state). Effective only when eee + * was negotiated and tx_lpi_enabled was set. + */ +struct ethtool_eee { + __u32 cmd; + __u32 supported; + __u32 advertised; + __u32 lp_advertised; + __u32 eee_active; + __u32 eee_enabled; + __u32 tx_lpi_enabled; + __u32 tx_lpi_timer; + __u32 reserved[2]; +}; + +/** + * struct ethtool_modinfo - plugin module eeprom information + * @cmd: %ETHTOOL_GMODULEINFO + * @type: Standard the module information conforms to %ETH_MODULE_SFF_xxxx + * @eeprom_len: Length of the eeprom + * + * This structure is used to return the information to + * properly size memory for a subsequent call to %ETHTOOL_GMODULEEEPROM. + * The type code indicates the eeprom data format + */ +struct ethtool_modinfo { + __u32 cmd; + __u32 type; + __u32 eeprom_len; + __u32 reserved[8]; +}; + +/** + * struct ethtool_coalesce - coalescing parameters for IRQs and stats updates + * @cmd: ETHTOOL_{G,S}COALESCE + * @rx_coalesce_usecs: How many usecs to delay an RX interrupt after + * a packet arrives. + * @rx_max_coalesced_frames: Maximum number of packets to receive + * before an RX interrupt. + * @rx_coalesce_usecs_irq: Same as @rx_coalesce_usecs, except that + * this value applies while an IRQ is being serviced by the host. + * @rx_max_coalesced_frames_irq: Same as @rx_max_coalesced_frames, + * except that this value applies while an IRQ is being serviced + * by the host. + * @tx_coalesce_usecs: How many usecs to delay a TX interrupt after + * a packet is sent. + * @tx_max_coalesced_frames: Maximum number of packets to be sent + * before a TX interrupt. + * @tx_coalesce_usecs_irq: Same as @tx_coalesce_usecs, except that + * this value applies while an IRQ is being serviced by the host. + * @tx_max_coalesced_frames_irq: Same as @tx_max_coalesced_frames, + * except that this value applies while an IRQ is being serviced + * by the host. + * @stats_block_coalesce_usecs: How many usecs to delay in-memory + * statistics block updates. Some drivers do not have an + * in-memory statistic block, and in such cases this value is + * ignored. This value must not be zero. + * @use_adaptive_rx_coalesce: Enable adaptive RX coalescing. + * @use_adaptive_tx_coalesce: Enable adaptive TX coalescing. + * @pkt_rate_low: Threshold for low packet rate (packets per second). + * @rx_coalesce_usecs_low: How many usecs to delay an RX interrupt after + * a packet arrives, when the packet rate is below @pkt_rate_low. + * @rx_max_coalesced_frames_low: Maximum number of packets to be received + * before an RX interrupt, when the packet rate is below @pkt_rate_low. + * @tx_coalesce_usecs_low: How many usecs to delay a TX interrupt after + * a packet is sent, when the packet rate is below @pkt_rate_low. + * @tx_max_coalesced_frames_low: Maximum nuumber of packets to be sent before + * a TX interrupt, when the packet rate is below @pkt_rate_low. + * @pkt_rate_high: Threshold for high packet rate (packets per second). + * @rx_coalesce_usecs_high: How many usecs to delay an RX interrupt after + * a packet arrives, when the packet rate is above @pkt_rate_high. + * @rx_max_coalesced_frames_high: Maximum number of packets to be received + * before an RX interrupt, when the packet rate is above @pkt_rate_high. + * @tx_coalesce_usecs_high: How many usecs to delay a TX interrupt after + * a packet is sent, when the packet rate is above @pkt_rate_high. + * @tx_max_coalesced_frames_high: Maximum number of packets to be sent before + * a TX interrupt, when the packet rate is above @pkt_rate_high. + * @rate_sample_interval: How often to do adaptive coalescing packet rate + * sampling, measured in seconds. Must not be zero. + * + * Each pair of (usecs, max_frames) fields specifies this exit + * condition for interrupt coalescing: + * (usecs > 0 && time_since_first_completion >= usecs) || + * (max_frames > 0 && completed_frames >= max_frames) + * It is illegal to set both usecs and max_frames to zero as this + * would cause interrupts to never be generated. To disable + * coalescing, set usecs = 0 and max_frames = 1. + * + * Some implementations ignore the value of max_frames and use the + * condition: + * time_since_first_completion >= usecs + * This is deprecated. Drivers for hardware that does not support + * counting completions should validate that max_frames == !rx_usecs. + * + * Adaptive RX/TX coalescing is an algorithm implemented by some + * drivers to improve latency under low packet rates and improve + * throughput under high packet rates. Some drivers only implement + * one of RX or TX adaptive coalescing. Anything not implemented by + * the driver causes these values to be silently ignored. + * + * When the packet rate is below @pkt_rate_high but above + * @pkt_rate_low (both measured in packets per second) the + * normal {rx,tx}_* coalescing parameters are used. + */ +struct ethtool_coalesce { + __u32 cmd; + __u32 rx_coalesce_usecs; + __u32 rx_max_coalesced_frames; + __u32 rx_coalesce_usecs_irq; + __u32 rx_max_coalesced_frames_irq; + __u32 tx_coalesce_usecs; + __u32 tx_max_coalesced_frames; + __u32 tx_coalesce_usecs_irq; + __u32 tx_max_coalesced_frames_irq; + __u32 stats_block_coalesce_usecs; + __u32 use_adaptive_rx_coalesce; + __u32 use_adaptive_tx_coalesce; + __u32 pkt_rate_low; + __u32 rx_coalesce_usecs_low; + __u32 rx_max_coalesced_frames_low; + __u32 tx_coalesce_usecs_low; + __u32 tx_max_coalesced_frames_low; + __u32 pkt_rate_high; + __u32 rx_coalesce_usecs_high; + __u32 rx_max_coalesced_frames_high; + __u32 tx_coalesce_usecs_high; + __u32 tx_max_coalesced_frames_high; + __u32 rate_sample_interval; +}; + +/* for configuring RX/TX ring parameters */ +struct ethtool_ringparam { + __u32 cmd; /* ETHTOOL_{G,S}RINGPARAM */ + + /* Read only attributes. These indicate the maximum number + * of pending RX/TX ring entries the driver will allow the + * user to set. + */ + __u32 rx_max_pending; + __u32 rx_mini_max_pending; + __u32 rx_jumbo_max_pending; + __u32 tx_max_pending; + + /* Values changeable by the user. The valid values are + * in the range 1 to the "*_max_pending" counterpart above. + */ + __u32 rx_pending; + __u32 rx_mini_pending; + __u32 rx_jumbo_pending; + __u32 tx_pending; +}; + +/** + * struct ethtool_channels - configuring number of network channel + * @cmd: ETHTOOL_{G,S}CHANNELS + * @max_rx: Read only. Maximum number of receive channel the driver support. + * @max_tx: Read only. Maximum number of transmit channel the driver support. + * @max_other: Read only. Maximum number of other channel the driver support. + * @max_combined: Read only. Maximum number of combined channel the driver + * support. Set of queues RX, TX or other. + * @rx_count: Valid values are in the range 1 to the max_rx. + * @tx_count: Valid values are in the range 1 to the max_tx. + * @other_count: Valid values are in the range 1 to the max_other. + * @combined_count: Valid values are in the range 1 to the max_combined. + * + * This can be used to configure RX, TX and other channels. + */ + +struct ethtool_channels { + __u32 cmd; + __u32 max_rx; + __u32 max_tx; + __u32 max_other; + __u32 max_combined; + __u32 rx_count; + __u32 tx_count; + __u32 other_count; + __u32 combined_count; +}; + +/* for configuring link flow control parameters */ +struct ethtool_pauseparam { + __u32 cmd; /* ETHTOOL_{G,S}PAUSEPARAM */ + + /* If the link is being auto-negotiated (via ethtool_cmd.autoneg + * being true) the user may set 'autoneg' here non-zero to have the + * pause parameters be auto-negotiated too. In such a case, the + * {rx,tx}_pause values below determine what capabilities are + * advertised. + * + * If 'autoneg' is zero or the link is not being auto-negotiated, + * then {rx,tx}_pause force the driver to use/not-use pause + * flow control. + */ + __u32 autoneg; + __u32 rx_pause; + __u32 tx_pause; +}; + +#define ETH_GSTRING_LEN 32 +enum ethtool_stringset { + ETH_SS_TEST = 0, + ETH_SS_STATS, + ETH_SS_PRIV_FLAGS, + ETH_SS_NTUPLE_FILTERS, /* Do not use, GRXNTUPLE is now deprecated */ + ETH_SS_FEATURES, +}; + +/* for passing string sets for data tagging */ +struct ethtool_gstrings { + __u32 cmd; /* ETHTOOL_GSTRINGS */ + __u32 string_set; /* string set id e.c. ETH_SS_TEST, etc*/ + __u32 len; /* number of strings in the string set */ + __u8 data[0]; +}; + +struct ethtool_sset_info { + __u32 cmd; /* ETHTOOL_GSSET_INFO */ + __u32 reserved; + __u64 sset_mask; /* input: each bit selects an sset to query */ + /* output: each bit a returned sset */ + __u32 data[0]; /* ETH_SS_xxx count, in order, based on bits + in sset_mask. One bit implies one + __u32, two bits implies two + __u32's, etc. */ +}; + +/** + * enum ethtool_test_flags - flags definition of ethtool_test + * @ETH_TEST_FL_OFFLINE: if set perform online and offline tests, otherwise + * only online tests. + * @ETH_TEST_FL_FAILED: Driver set this flag if test fails. + * @ETH_TEST_FL_EXTERNAL_LB: Application request to perform external loopback + * test. + * @ETH_TEST_FL_EXTERNAL_LB_DONE: Driver performed the external loopback test + */ + +enum ethtool_test_flags { + ETH_TEST_FL_OFFLINE = (1 << 0), + ETH_TEST_FL_FAILED = (1 << 1), + ETH_TEST_FL_EXTERNAL_LB = (1 << 2), + ETH_TEST_FL_EXTERNAL_LB_DONE = (1 << 3), +}; + +/* for requesting NIC test and getting results*/ +struct ethtool_test { + __u32 cmd; /* ETHTOOL_TEST */ + __u32 flags; /* ETH_TEST_FL_xxx */ + __u32 reserved; + __u32 len; /* result length, in number of u64 elements */ + __u64 data[0]; +}; + +/* for dumping NIC-specific statistics */ +struct ethtool_stats { + __u32 cmd; /* ETHTOOL_GSTATS */ + __u32 n_stats; /* number of u64's being returned */ + __u64 data[0]; +}; + +struct ethtool_perm_addr { + __u32 cmd; /* ETHTOOL_GPERMADDR */ + __u32 size; + __u8 data[0]; +}; + +/* boolean flags controlling per-interface behavior characteristics. + * When reading, the flag indicates whether or not a certain behavior + * is enabled/present. When writing, the flag indicates whether + * or not the driver should turn on (set) or off (clear) a behavior. + * + * Some behaviors may read-only (unconditionally absent or present). + * If such is the case, return EINVAL in the set-flags operation if the + * flag differs from the read-only value. + */ +enum ethtool_flags { + ETH_FLAG_TXVLAN = (1 << 7), /* TX VLAN offload enabled */ + ETH_FLAG_RXVLAN = (1 << 8), /* RX VLAN offload enabled */ + ETH_FLAG_LRO = (1 << 15), /* LRO is enabled */ + ETH_FLAG_NTUPLE = (1 << 27), /* N-tuple filters enabled */ + ETH_FLAG_RXHASH = (1 << 28), +}; + +/* The following structures are for supporting RX network flow + * classification and RX n-tuple configuration. Note, all multibyte + * fields, e.g., ip4src, ip4dst, psrc, pdst, spi, etc. are expected to + * be in network byte order. + */ + +/** + * struct ethtool_tcpip4_spec - flow specification for TCP/IPv4 etc. + * @ip4src: Source host + * @ip4dst: Destination host + * @psrc: Source port + * @pdst: Destination port + * @tos: Type-of-service + * + * This can be used to specify a TCP/IPv4, UDP/IPv4 or SCTP/IPv4 flow. + */ +struct ethtool_tcpip4_spec { + __be32 ip4src; + __be32 ip4dst; + __be16 psrc; + __be16 pdst; + __u8 tos; +}; + +/** + * struct ethtool_ah_espip4_spec - flow specification for IPsec/IPv4 + * @ip4src: Source host + * @ip4dst: Destination host + * @spi: Security parameters index + * @tos: Type-of-service + * + * This can be used to specify an IPsec transport or tunnel over IPv4. + */ +struct ethtool_ah_espip4_spec { + __be32 ip4src; + __be32 ip4dst; + __be32 spi; + __u8 tos; +}; + +#define ETH_RX_NFC_IP4 1 + +/** + * struct ethtool_usrip4_spec - general flow specification for IPv4 + * @ip4src: Source host + * @ip4dst: Destination host + * @l4_4_bytes: First 4 bytes of transport (layer 4) header + * @tos: Type-of-service + * @ip_ver: Value must be %ETH_RX_NFC_IP4; mask must be 0 + * @proto: Transport protocol number; mask must be 0 + */ +struct ethtool_usrip4_spec { + __be32 ip4src; + __be32 ip4dst; + __be32 l4_4_bytes; + __u8 tos; + __u8 ip_ver; + __u8 proto; +}; + +union ethtool_flow_union { + struct ethtool_tcpip4_spec tcp_ip4_spec; + struct ethtool_tcpip4_spec udp_ip4_spec; + struct ethtool_tcpip4_spec sctp_ip4_spec; + struct ethtool_ah_espip4_spec ah_ip4_spec; + struct ethtool_ah_espip4_spec esp_ip4_spec; + struct ethtool_usrip4_spec usr_ip4_spec; + struct ethhdr ether_spec; + __u8 hdata[52]; +}; + +/** + * struct ethtool_flow_ext - additional RX flow fields + * @h_dest: destination MAC address + * @vlan_etype: VLAN EtherType + * @vlan_tci: VLAN tag control information + * @data: user defined data + * + * Note, @vlan_etype, @vlan_tci, and @data are only valid if %FLOW_EXT + * is set in &struct ethtool_rx_flow_spec @flow_type. + * @h_dest is valid if %FLOW_MAC_EXT is set. + */ +struct ethtool_flow_ext { + __u8 padding[2]; + unsigned char h_dest[ETH_ALEN]; + __be16 vlan_etype; + __be16 vlan_tci; + __be32 data[2]; +}; + +/** + * struct ethtool_rx_flow_spec - classification rule for RX flows + * @flow_type: Type of match to perform, e.g. %TCP_V4_FLOW + * @h_u: Flow fields to match (dependent on @flow_type) + * @h_ext: Additional fields to match + * @m_u: Masks for flow field bits to be matched + * @m_ext: Masks for additional field bits to be matched + * Note, all additional fields must be ignored unless @flow_type + * includes the %FLOW_EXT or %FLOW_MAC_EXT flag + * (see &struct ethtool_flow_ext description). + * @ring_cookie: RX ring/queue index to deliver to, or %RX_CLS_FLOW_DISC + * if packets should be discarded + * @location: Location of rule in the table. Locations must be + * numbered such that a flow matching multiple rules will be + * classified according to the first (lowest numbered) rule. + */ +struct ethtool_rx_flow_spec { + __u32 flow_type; + union ethtool_flow_union h_u; + struct ethtool_flow_ext h_ext; + union ethtool_flow_union m_u; + struct ethtool_flow_ext m_ext; + __u64 ring_cookie; + __u32 location; +}; + +/** + * struct ethtool_rxnfc - command to get or set RX flow classification rules + * @cmd: Specific command number - %ETHTOOL_GRXFH, %ETHTOOL_SRXFH, + * %ETHTOOL_GRXRINGS, %ETHTOOL_GRXCLSRLCNT, %ETHTOOL_GRXCLSRULE, + * %ETHTOOL_GRXCLSRLALL, %ETHTOOL_SRXCLSRLDEL or %ETHTOOL_SRXCLSRLINS + * @flow_type: Type of flow to be affected, e.g. %TCP_V4_FLOW + * @data: Command-dependent value + * @fs: Flow classification rule + * @rule_cnt: Number of rules to be affected + * @rule_locs: Array of used rule locations + * + * For %ETHTOOL_GRXFH and %ETHTOOL_SRXFH, @data is a bitmask indicating + * the fields included in the flow hash, e.g. %RXH_IP_SRC. The following + * structure fields must not be used. + * + * For %ETHTOOL_GRXRINGS, @data is set to the number of RX rings/queues + * on return. + * + * For %ETHTOOL_GRXCLSRLCNT, @rule_cnt is set to the number of defined + * rules on return. If @data is non-zero on return then it is the + * size of the rule table, plus the flag %RX_CLS_LOC_SPECIAL if the + * driver supports any special location values. If that flag is not + * set in @data then special location values should not be used. + * + * For %ETHTOOL_GRXCLSRULE, @fs.@location specifies the location of an + * existing rule on entry and @fs contains the rule on return. + * + * For %ETHTOOL_GRXCLSRLALL, @rule_cnt specifies the array size of the + * user buffer for @rule_locs on entry. On return, @data is the size + * of the rule table, @rule_cnt is the number of defined rules, and + * @rule_locs contains the locations of the defined rules. Drivers + * must use the second parameter to get_rxnfc() instead of @rule_locs. + * + * For %ETHTOOL_SRXCLSRLINS, @fs specifies the rule to add or update. + * @fs.@location either specifies the location to use or is a special + * location value with %RX_CLS_LOC_SPECIAL flag set. On return, + * @fs.@location is the actual rule location. + * + * For %ETHTOOL_SRXCLSRLDEL, @fs.@location specifies the location of an + * existing rule on entry. + * + * A driver supporting the special location values for + * %ETHTOOL_SRXCLSRLINS may add the rule at any suitable unused + * location, and may remove a rule at a later location (lower + * priority) that matches exactly the same set of flows. The special + * values are: %RX_CLS_LOC_ANY, selecting any location; + * %RX_CLS_LOC_FIRST, selecting the first suitable location (maximum + * priority); and %RX_CLS_LOC_LAST, selecting the last suitable + * location (minimum priority). Additional special values may be + * defined in future and drivers must return -%EINVAL for any + * unrecognised value. + */ +struct ethtool_rxnfc { + __u32 cmd; + __u32 flow_type; + __u64 data; + struct ethtool_rx_flow_spec fs; + __u32 rule_cnt; + __u32 rule_locs[0]; +}; + + +/** + * struct ethtool_rxfh_indir - command to get or set RX flow hash indirection + * @cmd: Specific command number - %ETHTOOL_GRXFHINDIR or %ETHTOOL_SRXFHINDIR + * @size: On entry, the array size of the user buffer, which may be zero. + * On return from %ETHTOOL_GRXFHINDIR, the array size of the hardware + * indirection table. + * @ring_index: RX ring/queue index for each hash value + * + * For %ETHTOOL_GRXFHINDIR, a @size of zero means that only the size + * should be returned. For %ETHTOOL_SRXFHINDIR, a @size of zero means + * the table should be reset to default values. This last feature + * is not supported by the original implementations. + */ +struct ethtool_rxfh_indir { + __u32 cmd; + __u32 size; + __u32 ring_index[0]; +}; + +/** + * struct ethtool_rx_ntuple_flow_spec - specification for RX flow filter + * @flow_type: Type of match to perform, e.g. %TCP_V4_FLOW + * @h_u: Flow field values to match (dependent on @flow_type) + * @m_u: Masks for flow field value bits to be ignored + * @vlan_tag: VLAN tag to match + * @vlan_tag_mask: Mask for VLAN tag bits to be ignored + * @data: Driver-dependent data to match + * @data_mask: Mask for driver-dependent data bits to be ignored + * @action: RX ring/queue index to deliver to (non-negative) or other action + * (negative, e.g. %ETHTOOL_RXNTUPLE_ACTION_DROP) + * + * For flow types %TCP_V4_FLOW, %UDP_V4_FLOW and %SCTP_V4_FLOW, where + * a field value and mask are both zero this is treated as if all mask + * bits are set i.e. the field is ignored. + */ +struct ethtool_rx_ntuple_flow_spec { + __u32 flow_type; + union { + struct ethtool_tcpip4_spec tcp_ip4_spec; + struct ethtool_tcpip4_spec udp_ip4_spec; + struct ethtool_tcpip4_spec sctp_ip4_spec; + struct ethtool_ah_espip4_spec ah_ip4_spec; + struct ethtool_ah_espip4_spec esp_ip4_spec; + struct ethtool_usrip4_spec usr_ip4_spec; + struct ethhdr ether_spec; + __u8 hdata[72]; + } h_u, m_u; + + __u16 vlan_tag; + __u16 vlan_tag_mask; + __u64 data; + __u64 data_mask; + + __s32 action; +#define ETHTOOL_RXNTUPLE_ACTION_DROP (-1) /* drop packet */ +#define ETHTOOL_RXNTUPLE_ACTION_CLEAR (-2) /* clear filter */ +}; + +/** + * struct ethtool_rx_ntuple - command to set or clear RX flow filter + * @cmd: Command number - %ETHTOOL_SRXNTUPLE + * @fs: Flow filter specification + */ +struct ethtool_rx_ntuple { + __u32 cmd; + struct ethtool_rx_ntuple_flow_spec fs; +}; + +#define ETHTOOL_FLASH_MAX_FILENAME 128 +enum ethtool_flash_op_type { + ETHTOOL_FLASH_ALL_REGIONS = 0, +}; + +/* for passing firmware flashing related parameters */ +struct ethtool_flash { + __u32 cmd; + __u32 region; + char data[ETHTOOL_FLASH_MAX_FILENAME]; +}; + +/** + * struct ethtool_dump - used for retrieving, setting device dump + * @cmd: Command number - %ETHTOOL_GET_DUMP_FLAG, %ETHTOOL_GET_DUMP_DATA, or + * %ETHTOOL_SET_DUMP + * @version: FW version of the dump, filled in by driver + * @flag: driver dependent flag for dump setting, filled in by driver during + * get and filled in by ethtool for set operation. + * flag must be initialized by macro ETH_FW_DUMP_DISABLE value when + * firmware dump is disabled. + * @len: length of dump data, used as the length of the user buffer on entry to + * %ETHTOOL_GET_DUMP_DATA and this is returned as dump length by driver + * for %ETHTOOL_GET_DUMP_FLAG command + * @data: data collected for get dump data operation + */ + +#define ETH_FW_DUMP_DISABLE 0 + +struct ethtool_dump { + __u32 cmd; + __u32 version; + __u32 flag; + __u32 len; + __u8 data[0]; +}; + +/* for returning and changing feature sets */ + +/** + * struct ethtool_get_features_block - block with state of 32 features + * @available: mask of changeable features + * @requested: mask of features requested to be enabled if possible + * @active: mask of currently enabled features + * @never_changed: mask of features not changeable for any device + */ +struct ethtool_get_features_block { + __u32 available; + __u32 requested; + __u32 active; + __u32 never_changed; +}; + +/** + * struct ethtool_gfeatures - command to get state of device's features + * @cmd: command number = %ETHTOOL_GFEATURES + * @size: in: number of elements in the features[] array; + * out: number of elements in features[] needed to hold all features + * @features: state of features + */ +struct ethtool_gfeatures { + __u32 cmd; + __u32 size; + struct ethtool_get_features_block features[0]; +}; + +/** + * struct ethtool_set_features_block - block with request for 32 features + * @valid: mask of features to be changed + * @requested: values of features to be changed + */ +struct ethtool_set_features_block { + __u32 valid; + __u32 requested; +}; + +/** + * struct ethtool_sfeatures - command to request change in device's features + * @cmd: command number = %ETHTOOL_SFEATURES + * @size: array size of the features[] array + * @features: feature change masks + */ +struct ethtool_sfeatures { + __u32 cmd; + __u32 size; + struct ethtool_set_features_block features[0]; +}; + +/** + * struct ethtool_ts_info - holds a device's timestamping and PHC association + * @cmd: command number = %ETHTOOL_GET_TS_INFO + * @so_timestamping: bit mask of the sum of the supported SO_TIMESTAMPING flags + * @phc_index: device index of the associated PHC, or -1 if there is none + * @tx_types: bit mask of the supported hwtstamp_tx_types enumeration values + * @rx_filters: bit mask of the supported hwtstamp_rx_filters enumeration values + * + * The bits in the 'tx_types' and 'rx_filters' fields correspond to + * the 'hwtstamp_tx_types' and 'hwtstamp_rx_filters' enumeration values, + * respectively. For example, if the device supports HWTSTAMP_TX_ON, + * then (1 << HWTSTAMP_TX_ON) in 'tx_types' will be set. + */ +struct ethtool_ts_info { + __u32 cmd; + __u32 so_timestamping; + __s32 phc_index; + __u32 tx_types; + __u32 tx_reserved[3]; + __u32 rx_filters; + __u32 rx_reserved[3]; +}; + +/* + * %ETHTOOL_SFEATURES changes features present in features[].valid to the + * values of corresponding bits in features[].requested. Bits in .requested + * not set in .valid or not changeable are ignored. + * + * Returns %EINVAL when .valid contains undefined or never-changeable bits + * or size is not equal to required number of features words (32-bit blocks). + * Returns >= 0 if request was completed; bits set in the value mean: + * %ETHTOOL_F_UNSUPPORTED - there were bits set in .valid that are not + * changeable (not present in %ETHTOOL_GFEATURES' features[].available) + * those bits were ignored. + * %ETHTOOL_F_WISH - some or all changes requested were recorded but the + * resulting state of bits masked by .valid is not equal to .requested. + * Probably there are other device-specific constraints on some features + * in the set. When %ETHTOOL_F_UNSUPPORTED is set, .valid is considered + * here as though ignored bits were cleared. + * %ETHTOOL_F_COMPAT - some or all changes requested were made by calling + * compatibility functions. Requested offload state cannot be properly + * managed by kernel. + * + * Meaning of bits in the masks are obtained by %ETHTOOL_GSSET_INFO (number of + * bits in the arrays - always multiple of 32) and %ETHTOOL_GSTRINGS commands + * for ETH_SS_FEATURES string set. First entry in the table corresponds to least + * significant bit in features[0] fields. Empty strings mark undefined features. + */ +enum ethtool_sfeatures_retval_bits { + ETHTOOL_F_UNSUPPORTED__BIT, + ETHTOOL_F_WISH__BIT, + ETHTOOL_F_COMPAT__BIT, +}; + +#define ETHTOOL_F_UNSUPPORTED (1 << ETHTOOL_F_UNSUPPORTED__BIT) +#define ETHTOOL_F_WISH (1 << ETHTOOL_F_WISH__BIT) +#define ETHTOOL_F_COMPAT (1 << ETHTOOL_F_COMPAT__BIT) + + +/* CMDs currently supported */ +#define ETHTOOL_GSET 0x00000001 /* Get settings. */ +#define ETHTOOL_SSET 0x00000002 /* Set settings. */ +#define ETHTOOL_GDRVINFO 0x00000003 /* Get driver info. */ +#define ETHTOOL_GREGS 0x00000004 /* Get NIC registers. */ +#define ETHTOOL_GWOL 0x00000005 /* Get wake-on-lan options. */ +#define ETHTOOL_SWOL 0x00000006 /* Set wake-on-lan options. */ +#define ETHTOOL_GMSGLVL 0x00000007 /* Get driver message level */ +#define ETHTOOL_SMSGLVL 0x00000008 /* Set driver msg level. */ +#define ETHTOOL_NWAY_RST 0x00000009 /* Restart autonegotiation. */ +/* Get link status for host, i.e. whether the interface *and* the + * physical port (if there is one) are up (ethtool_value). */ +#define ETHTOOL_GLINK 0x0000000a +#define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */ +#define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data. */ +#define ETHTOOL_GCOALESCE 0x0000000e /* Get coalesce config */ +#define ETHTOOL_SCOALESCE 0x0000000f /* Set coalesce config. */ +#define ETHTOOL_GRINGPARAM 0x00000010 /* Get ring parameters */ +#define ETHTOOL_SRINGPARAM 0x00000011 /* Set ring parameters. */ +#define ETHTOOL_GPAUSEPARAM 0x00000012 /* Get pause parameters */ +#define ETHTOOL_SPAUSEPARAM 0x00000013 /* Set pause parameters. */ +#define ETHTOOL_GRXCSUM 0x00000014 /* Get RX hw csum enable (ethtool_value) */ +#define ETHTOOL_SRXCSUM 0x00000015 /* Set RX hw csum enable (ethtool_value) */ +#define ETHTOOL_GTXCSUM 0x00000016 /* Get TX hw csum enable (ethtool_value) */ +#define ETHTOOL_STXCSUM 0x00000017 /* Set TX hw csum enable (ethtool_value) */ +#define ETHTOOL_GSG 0x00000018 /* Get scatter-gather enable + * (ethtool_value) */ +#define ETHTOOL_SSG 0x00000019 /* Set scatter-gather enable + * (ethtool_value). */ +#define ETHTOOL_TEST 0x0000001a /* execute NIC self-test. */ +#define ETHTOOL_GSTRINGS 0x0000001b /* get specified string set */ +#define ETHTOOL_PHYS_ID 0x0000001c /* identify the NIC */ +#define ETHTOOL_GSTATS 0x0000001d /* get NIC-specific statistics */ +#define ETHTOOL_GTSO 0x0000001e /* Get TSO enable (ethtool_value) */ +#define ETHTOOL_STSO 0x0000001f /* Set TSO enable (ethtool_value) */ +#define ETHTOOL_GPERMADDR 0x00000020 /* Get permanent hardware address */ +#define ETHTOOL_GUFO 0x00000021 /* Get UFO enable (ethtool_value) */ +#define ETHTOOL_SUFO 0x00000022 /* Set UFO enable (ethtool_value) */ +#define ETHTOOL_GGSO 0x00000023 /* Get GSO enable (ethtool_value) */ +#define ETHTOOL_SGSO 0x00000024 /* Set GSO enable (ethtool_value) */ +#define ETHTOOL_GFLAGS 0x00000025 /* Get flags bitmap(ethtool_value) */ +#define ETHTOOL_SFLAGS 0x00000026 /* Set flags bitmap(ethtool_value) */ +#define ETHTOOL_GPFLAGS 0x00000027 /* Get driver-private flags bitmap */ +#define ETHTOOL_SPFLAGS 0x00000028 /* Set driver-private flags bitmap */ + +#define ETHTOOL_GRXFH 0x00000029 /* Get RX flow hash configuration */ +#define ETHTOOL_SRXFH 0x0000002a /* Set RX flow hash configuration */ +#define ETHTOOL_GGRO 0x0000002b /* Get GRO enable (ethtool_value) */ +#define ETHTOOL_SGRO 0x0000002c /* Set GRO enable (ethtool_value) */ +#define ETHTOOL_GRXRINGS 0x0000002d /* Get RX rings available for LB */ +#define ETHTOOL_GRXCLSRLCNT 0x0000002e /* Get RX class rule count */ +#define ETHTOOL_GRXCLSRULE 0x0000002f /* Get RX classification rule */ +#define ETHTOOL_GRXCLSRLALL 0x00000030 /* Get all RX classification rule */ +#define ETHTOOL_SRXCLSRLDEL 0x00000031 /* Delete RX classification rule */ +#define ETHTOOL_SRXCLSRLINS 0x00000032 /* Insert RX classification rule */ +#define ETHTOOL_FLASHDEV 0x00000033 /* Flash firmware to device */ +#define ETHTOOL_RESET 0x00000034 /* Reset hardware */ +#define ETHTOOL_SRXNTUPLE 0x00000035 /* Add an n-tuple filter to device */ +#define ETHTOOL_GRXNTUPLE 0x00000036 /* deprecated */ +#define ETHTOOL_GSSET_INFO 0x00000037 /* Get string set info */ +#define ETHTOOL_GRXFHINDIR 0x00000038 /* Get RX flow hash indir'n table */ +#define ETHTOOL_SRXFHINDIR 0x00000039 /* Set RX flow hash indir'n table */ + +#define ETHTOOL_GFEATURES 0x0000003a /* Get device offload settings */ +#define ETHTOOL_SFEATURES 0x0000003b /* Change device offload settings */ +#define ETHTOOL_GCHANNELS 0x0000003c /* Get no of channels */ +#define ETHTOOL_SCHANNELS 0x0000003d /* Set no of channels */ +#define ETHTOOL_SET_DUMP 0x0000003e /* Set dump settings */ +#define ETHTOOL_GET_DUMP_FLAG 0x0000003f /* Get dump settings */ +#define ETHTOOL_GET_DUMP_DATA 0x00000040 /* Get dump data */ +#define ETHTOOL_GET_TS_INFO 0x00000041 /* Get time stamping and PHC info */ +#define ETHTOOL_GMODULEINFO 0x00000042 /* Get plug-in module information */ +#define ETHTOOL_GMODULEEEPROM 0x00000043 /* Get plug-in module eeprom */ +#define ETHTOOL_GEEE 0x00000044 /* Get EEE settings */ +#define ETHTOOL_SEEE 0x00000045 /* Set EEE settings */ + +/* compatibility with older code */ +#define SPARC_ETH_GSET ETHTOOL_GSET +#define SPARC_ETH_SSET ETHTOOL_SSET + +/* Indicates what features are supported by the interface. */ +#define SUPPORTED_10baseT_Half (1 << 0) +#define SUPPORTED_10baseT_Full (1 << 1) +#define SUPPORTED_100baseT_Half (1 << 2) +#define SUPPORTED_100baseT_Full (1 << 3) +#define SUPPORTED_1000baseT_Half (1 << 4) +#define SUPPORTED_1000baseT_Full (1 << 5) +#define SUPPORTED_Autoneg (1 << 6) +#define SUPPORTED_TP (1 << 7) +#define SUPPORTED_AUI (1 << 8) +#define SUPPORTED_MII (1 << 9) +#define SUPPORTED_FIBRE (1 << 10) +#define SUPPORTED_BNC (1 << 11) +#define SUPPORTED_10000baseT_Full (1 << 12) +#define SUPPORTED_Pause (1 << 13) +#define SUPPORTED_Asym_Pause (1 << 14) +#define SUPPORTED_2500baseX_Full (1 << 15) +#define SUPPORTED_Backplane (1 << 16) +#define SUPPORTED_1000baseKX_Full (1 << 17) +#define SUPPORTED_10000baseKX4_Full (1 << 18) +#define SUPPORTED_10000baseKR_Full (1 << 19) +#define SUPPORTED_10000baseR_FEC (1 << 20) +#define SUPPORTED_20000baseMLD2_Full (1 << 21) +#define SUPPORTED_20000baseKR2_Full (1 << 22) +#define SUPPORTED_40000baseKR4_Full (1 << 23) +#define SUPPORTED_40000baseCR4_Full (1 << 24) +#define SUPPORTED_40000baseSR4_Full (1 << 25) +#define SUPPORTED_40000baseLR4_Full (1 << 26) + +/* Indicates what features are advertised by the interface. */ +#define ADVERTISED_10baseT_Half (1 << 0) +#define ADVERTISED_10baseT_Full (1 << 1) +#define ADVERTISED_100baseT_Half (1 << 2) +#define ADVERTISED_100baseT_Full (1 << 3) +#define ADVERTISED_1000baseT_Half (1 << 4) +#define ADVERTISED_1000baseT_Full (1 << 5) +#define ADVERTISED_Autoneg (1 << 6) +#define ADVERTISED_TP (1 << 7) +#define ADVERTISED_AUI (1 << 8) +#define ADVERTISED_MII (1 << 9) +#define ADVERTISED_FIBRE (1 << 10) +#define ADVERTISED_BNC (1 << 11) +#define ADVERTISED_10000baseT_Full (1 << 12) +#define ADVERTISED_Pause (1 << 13) +#define ADVERTISED_Asym_Pause (1 << 14) +#define ADVERTISED_2500baseX_Full (1 << 15) +#define ADVERTISED_Backplane (1 << 16) +#define ADVERTISED_1000baseKX_Full (1 << 17) +#define ADVERTISED_10000baseKX4_Full (1 << 18) +#define ADVERTISED_10000baseKR_Full (1 << 19) +#define ADVERTISED_10000baseR_FEC (1 << 20) +#define ADVERTISED_20000baseMLD2_Full (1 << 21) +#define ADVERTISED_20000baseKR2_Full (1 << 22) +#define ADVERTISED_40000baseKR4_Full (1 << 23) +#define ADVERTISED_40000baseCR4_Full (1 << 24) +#define ADVERTISED_40000baseSR4_Full (1 << 25) +#define ADVERTISED_40000baseLR4_Full (1 << 26) + +/* The following are all involved in forcing a particular link + * mode for the device for setting things. When getting the + * devices settings, these indicate the current mode and whether + * it was forced up into this mode or autonegotiated. + */ + +/* The forced speed, 10Mb, 100Mb, gigabit, 2.5Gb, 10GbE. */ +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define SPEED_2500 2500 +#define SPEED_10000 10000 +#define SPEED_UNKNOWN -1 + +/* Duplex, half or full. */ +#define DUPLEX_HALF 0x00 +#define DUPLEX_FULL 0x01 +#define DUPLEX_UNKNOWN 0xff + +/* Which connector port. */ +#define PORT_TP 0x00 +#define PORT_AUI 0x01 +#define PORT_MII 0x02 +#define PORT_FIBRE 0x03 +#define PORT_BNC 0x04 +#define PORT_DA 0x05 +#define PORT_NONE 0xef +#define PORT_OTHER 0xff + +/* Which transceiver to use. */ +#define XCVR_INTERNAL 0x00 +#define XCVR_EXTERNAL 0x01 +#define XCVR_DUMMY1 0x02 +#define XCVR_DUMMY2 0x03 +#define XCVR_DUMMY3 0x04 + +/* Enable or disable autonegotiation. If this is set to enable, + * the forced link modes above are completely ignored. + */ +#define AUTONEG_DISABLE 0x00 +#define AUTONEG_ENABLE 0x01 + +/* MDI or MDI-X status/control - if MDI/MDI_X/AUTO is set then + * the driver is required to renegotiate link + */ +#define ETH_TP_MDI_INVALID 0x00 /* status: unknown; control: unsupported */ +#define ETH_TP_MDI 0x01 /* status: MDI; control: force MDI */ +#define ETH_TP_MDI_X 0x02 /* status: MDI-X; control: force MDI-X */ +#define ETH_TP_MDI_AUTO 0x03 /* control: auto-select */ + +/* Wake-On-Lan options. */ +#define WAKE_PHY (1 << 0) +#define WAKE_UCAST (1 << 1) +#define WAKE_MCAST (1 << 2) +#define WAKE_BCAST (1 << 3) +#define WAKE_ARP (1 << 4) +#define WAKE_MAGIC (1 << 5) +#define WAKE_MAGICSECURE (1 << 6) /* only meaningful if WAKE_MAGIC */ + +/* L2-L4 network traffic flow types */ +#define TCP_V4_FLOW 0x01 /* hash or spec (tcp_ip4_spec) */ +#define UDP_V4_FLOW 0x02 /* hash or spec (udp_ip4_spec) */ +#define SCTP_V4_FLOW 0x03 /* hash or spec (sctp_ip4_spec) */ +#define AH_ESP_V4_FLOW 0x04 /* hash only */ +#define TCP_V6_FLOW 0x05 /* hash only */ +#define UDP_V6_FLOW 0x06 /* hash only */ +#define SCTP_V6_FLOW 0x07 /* hash only */ +#define AH_ESP_V6_FLOW 0x08 /* hash only */ +#define AH_V4_FLOW 0x09 /* hash or spec (ah_ip4_spec) */ +#define ESP_V4_FLOW 0x0a /* hash or spec (esp_ip4_spec) */ +#define AH_V6_FLOW 0x0b /* hash only */ +#define ESP_V6_FLOW 0x0c /* hash only */ +#define IP_USER_FLOW 0x0d /* spec only (usr_ip4_spec) */ +#define IPV4_FLOW 0x10 /* hash only */ +#define IPV6_FLOW 0x11 /* hash only */ +#define ETHER_FLOW 0x12 /* spec only (ether_spec) */ +/* Flag to enable additional fields in struct ethtool_rx_flow_spec */ +#define FLOW_EXT 0x80000000 +#define FLOW_MAC_EXT 0x40000000 + +/* L3-L4 network traffic flow hash options */ +#define RXH_L2DA (1 << 1) +#define RXH_VLAN (1 << 2) +#define RXH_L3_PROTO (1 << 3) +#define RXH_IP_SRC (1 << 4) +#define RXH_IP_DST (1 << 5) +#define RXH_L4_B_0_1 (1 << 6) /* src port in case of TCP/UDP/SCTP */ +#define RXH_L4_B_2_3 (1 << 7) /* dst port in case of TCP/UDP/SCTP */ +#define RXH_DISCARD (1 << 31) + +#define RX_CLS_FLOW_DISC 0xffffffffffffffffULL + +/* Special RX classification rule insert location values */ +#define RX_CLS_LOC_SPECIAL 0x80000000 /* flag */ +#define RX_CLS_LOC_ANY 0xffffffff +#define RX_CLS_LOC_FIRST 0xfffffffe +#define RX_CLS_LOC_LAST 0xfffffffd + +/* EEPROM Standards for plug in modules */ +#define ETH_MODULE_SFF_8079 0x1 +#define ETH_MODULE_SFF_8079_LEN 256 +#define ETH_MODULE_SFF_8472 0x2 +#define ETH_MODULE_SFF_8472_LEN 512 + +/* Reset flags */ +/* The reset() operation must clear the flags for the components which + * were actually reset. On successful return, the flags indicate the + * components which were not reset, either because they do not exist + * in the hardware or because they cannot be reset independently. The + * driver must never reset any components that were not requested. + */ +enum ethtool_reset_flags { + /* These flags represent components dedicated to the interface + * the command is addressed to. Shift any flag left by + * ETH_RESET_SHARED_SHIFT to reset a shared component of the + * same type. + */ + ETH_RESET_MGMT = 1 << 0, /* Management processor */ + ETH_RESET_IRQ = 1 << 1, /* Interrupt requester */ + ETH_RESET_DMA = 1 << 2, /* DMA engine */ + ETH_RESET_FILTER = 1 << 3, /* Filtering/flow direction */ + ETH_RESET_OFFLOAD = 1 << 4, /* Protocol offload */ + ETH_RESET_MAC = 1 << 5, /* Media access controller */ + ETH_RESET_PHY = 1 << 6, /* Transceiver/PHY */ + ETH_RESET_RAM = 1 << 7, /* RAM shared between + * multiple components */ + + ETH_RESET_DEDICATED = 0x0000ffff, /* All components dedicated to + * this interface */ + ETH_RESET_ALL = 0xffffffff, /* All components used by this + * interface, even if shared */ +}; +#define ETH_RESET_SHARED_SHIFT 16 + +#endif /* _LINUX_ETHTOOL_H */ diff --git a/package/wwan/driver/quectel_cm_5G/src/log/cdc_mbim.txt b/package/wwan/driver/quectel_cm_5G/src/log/cdc_mbim.txt new file mode 100644 index 000000000..8d5d3bae1 --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/log/cdc_mbim.txt @@ -0,0 +1,71 @@ +root@ZhuoTK:/# dmesg +[ 788.920000] usb 1-1.3: new high-speed USB device number 4 using ehci-platform +[ 789.160000] cdc_mbim 1-1.3:1.4: cdc-wdm0: USB WDM device +[ 789.170000] cdc_mbim 1-1.3:1.4 wwan0: register 'cdc_mbim' at usb-101c0000.ehci-1.3, CDC MBIM, a2:58:dc:4d:dd:ca + +root@ZhuoTK:/# quectel-CM -s cmnet & +[04-13_05:24:38:767] Quectel_QConnectManager_Linux_V1.6.0.25 +[04-13_05:24:38:769] Find /sys/bus/usb/devices/1-1.3 idVendor=0x2c7c idProduct=0x125, bus=0x001, dev=0x004 +[04-13_05:24:38:771] Auto find qmichannel = /dev/cdc-wdm0 +[04-13_05:24:38:771] Auto find usbnet_adapter = wwan0 +[04-13_05:24:38:771] netcard driver = cdc_mbim, driver version = 22-Aug-2005 +[04-13_05:24:38:771] Modem works in MBIM mode +[04-13_05:24:38:779] cdc_wdm_fd = 7 +[04-13_05:24:38:779] mbim_open_device() +[04-13_05:24:39:624] mbim_device_caps_query() +[04-13_05:24:39:656] DeviceId: 866758045439136 +[04-13_05:24:39:656] FirmwareInfo: EC25EFAR06A11M4G +[04-13_05:24:39:656] HardwareInfo: QUECTEL Mobile Broadband Modul +[04-13_05:24:39:657] mbim_device_services_query() +[04-13_05:24:39:688] mbim_set_radio_state( 1 ) +[04-13_05:24:39:721] HwRadioState: 1, SwRadioState: 1 +[04-13_05:24:39:721] mbim_subscriber_status_query() +[04-13_05:24:39:784] SubscriberId: 460028563800461 +[04-13_05:24:39:784] SimIccId: 89860015120716380461 +[04-13_05:24:39:785] SubscriberReadyState NotInitialized -> Initialized +[04-13_05:24:39:785] mbim_register_state_query() +[04-13_05:24:39:816] RegisterState Unknown -> Home +[04-13_05:24:39:816] mbim_packet_service_query() +[04-13_05:24:39:848] PacketServiceState Unknown -> Attached +[04-13_05:24:39:848] mbim_query_connect(sessionID=0) +[04-13_05:24:39:880] ActivationState Unknown -> Deactivated +[04-13_05:24:39:881] ifconfig wwan0 0.0.0.0 +[04-13_05:24:39:899] ifconfig wwan0 down +[04-13_05:24:39:913] mbim_set_connect(onoff=1, sessionID=0) +[04-13_05:24:39:976] ActivationState Deactivated -> Activated +[04-13_05:24:39:977] mbim_ip_config(sessionID=0) +[04-13_05:24:40:008] < SessionId = 0 +[04-13_05:24:40:008] < IPv4ConfigurationAvailable = 0xf +[04-13_05:24:40:008] < IPv6ConfigurationAvailable = 0x0 +[04-13_05:24:40:008] < IPv4AddressCount = 0x1 +[04-13_05:24:40:008] < IPv4AddressOffset = 0x3c +[04-13_05:24:40:009] < IPv6AddressCount = 0x0 +[04-13_05:24:40:009] < IPv6AddressOffset = 0x0 +[04-13_05:24:40:009] < IPv4 = 10.129.90.29/30 +[04-13_05:24:40:009] < gw = 10.129.90.30 +[04-13_05:24:40:009] < dns1 = 211.138.180.2 +[04-13_05:24:40:009 < dns2 = 211.138.180.3 +[04-13_05:24:40:009] < ipv4 mtu = 1500 +[04-13_05:24:40:041] ifconfig wwan0 up +[04-13_05:24:40:063] ip -4 address flush dev wwan0 +[04-13_05:24:40:073] ip -4 address add 10.129.90.29/30 dev wwan0 +[04-13_05:24:40:084] ip -4 route add default via 10.129.90.30 dev wwan0 + +root@ZhuoTK:/# ifconfig wwan0 +wwan0 Link encap:Ethernet HWaddr A2:58:DC:4D:DD:CA + inet addr:10.129.90.29 Bcast:0.0.0.0 Mask:255.255.255.252 + inet6 addr: fe80::a058:dcff:fe4d:ddca/64 Scope:Link + UP BROADCAST RUNNING NOARP MULTICAST MTU:1500 Metric:1 + RX packets:0 errors:0 dropped:0 overruns:0 frame:0 + TX packets:5 errors:0 dropped:0 overruns:0 carrier:0 + collisions:0 txqueuelen:1000 + RX bytes:0 (0.0 B) TX bytes:380 (380.0 B) + +root@ZhuoTK:/# ip ro show +default via 10.129.90.30 dev wwan0 +10.129.90.28/30 dev wwan0 proto kernel scope link src 10.129.90.29 +192.168.1.0/24 dev br-lan proto kernel scope link src 192.168.1.251 + +root@ZhuoTK:/# ping www.qq.com +PING www.qq.com (183.194.238.117): 56 data bytes +64 bytes from 183.194.238.117: seq=0 ttl=53 time=58.674 ms \ No newline at end of file diff --git a/package/wwan/driver/quectel_cm_5G/src/log/cdc_mbim_vlan.txt b/package/wwan/driver/quectel_cm_5G/src/log/cdc_mbim_vlan.txt new file mode 100644 index 000000000..d6988c9e8 --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/log/cdc_mbim_vlan.txt @@ -0,0 +1,168 @@ +root@ZhuoTK:/# dmesg +[ 788.920000] usb 1-1.3: new high-speed USB device number 4 using ehci-platform +[ 789.160000] cdc_mbim 1-1.3:1.4: cdc-wdm0: USB WDM device +[ 789.170000] cdc_mbim 1-1.3:1.4 wwan0: register 'cdc_mbim' at usb-101c0000.ehci-1.3, CDC MBIM, a2:58:dc:4d:dd:ca + +root@ZhuoTK:/# ip link add link wwan0 name wwan0.1 type vlan id 1 +root@ZhuoTK:/# ip link add link wwan0 name wwan0.2 type vlan id 2 +root@ZhuoTK:/# ifconfig wwan0.1 +wwan0.1 Link encap:Ethernet HWaddr A2:58:DC:4D:DD:CA + BROADCAST NOARP MULTICAST MTU:1500 Metric:1 + RX packets:0 errors:0 dropped:0 overruns:0 frame:0 + TX packets:0 errors:0 dropped:0 overruns:0 carrier:0 + collisions:0 txqueuelen:0 + RX bytes:0 (0.0 B) TX bytes:0 (0.0 B) + +root@ZhuoTK:/# ifconfig wwan0.2 +wwan0.2 Link encap:Ethernet HWaddr A2:58:DC:4D:DD:CA + BROADCAST NOARP MULTICAST MTU:1500 Metric:1 + RX packets:0 errors:0 dropped:0 overruns:0 frame:0 + TX packets:0 errors:0 dropped:0 overruns:0 carrier:0 + collisions:0 txqueuelen:0 + RX bytes:0 (0.0 B) TX bytes:0 (0.0 B) + +root@ZhuoTK:/# quectel-mbim-proxy & +root@ZhuoTK:/# [04-13_07:04:27:543] mbim_dev_fd=3 +[04-13_07:04:27:543] mbim_send_open_msg() +[04-13_07:04:28:321] receive MBIM_OPEN_DONE, status=0 +[04-13_07:04:28:321] mbim_server_fd=4 + +root@ZhuoTK:/# quectel-CM -n 1 -s cmnet & +[04-13_07:04:34:256] Quectel_QConnectManager_Linux_V1.6.0.25 +[04-13_07:04:34:259] Find /sys/bus/usb/devices/1-1.3 idVendor=0x2c7c idProduct=0x125, bus=0x001, dev=0x004 +[04-13_07:04:34:260] Auto find qmichannel = /dev/cdc-wdm0 +[04-13_07:04:34:260] Auto find usbnet_adapter = wwan0 +[04-13_07:04:34:260] netcard driver = cdc_mbim, driver version = 22-Aug-2005 +[04-13_07:04:34:261] mbim_qmap_mode = 4, vlan_id = 0x01, qmap_netcard = wwan0.1 +[04-13_07:04:34:261] Modem works in MBIM mode +[04-13_07:04:34:261] handle_client_connect client_fd=5, client_idx=1 +[04-13_07:04:34:262] connect to quectel-mbim-proxy sockfd = 7 +[04-13_07:04:34:262] cdc_wdm_fd = 7 +[04-13_07:04:34:262] mbim_open_device() +[04-13_07:04:35:106] mbim_device_caps_query() +[04-13_07:04:35:139] DeviceId: 866758045439136 +[04-13_07:04:35:139] FirmwareInfo: EC25EFAR06A11M4G +[04-13_07:04:35:139] HardwareInfo: QUECTEL Mobile Broadband Modul +[04-13_07:04:35:139] mbim_device_services_query() +[04-13_07:04:35:170] mbim_set_radio_state( 1 ) +[04-13_07:04:35:202] HwRadioState: 1, SwRadioState: 1 +[04-13_07:04:35:202] mbim_subscriber_status_query() +[04-13_07:04:35:267] SubscriberId: 460028563800461 +[04-13_07:04:35:267] SimIccId: 89860015120716380461 +[04-13_07:04:35:267] SubscriberReadyState NotInitialized -> Initialized +[04-13_07:04:35:267] mbim_register_state_query() +[04-13_07:04:35:297] RegisterState Unknown -> Home +[04-13_07:04:35:298] mbim_packet_service_query() +[04-13_07:04:35:329] PacketServiceState Unknown -> Attached +[04-13_07:04:35:330] mbim_query_connect(sessionID=1) +[04-13_07:04:35:361] ActivationState Unknown -> Deactivated +[04-13_07:04:35:362] ifconfig wwan0.1 0.0.0.0 +[04-13_07:04:35:373] ifconfig wwan0.1 down +[04-13_07:04:35:383] mbim_set_connect(onoff=1, sessionID=1) +[04-13_07:04:35:426] ActivationState Deactivated -> Activated +[04-13_07:04:35:426] mbim_ip_config(sessionID=1) +[04-13_07:04:35:457] < SessionId = 1 +[04-13_07:04:35:457] < IPv4ConfigurationAvailable = 0xf +[04-13_07:04:35:457] < IPv6ConfigurationAvailable = 0x0 +[04-13_07:04:35:457] < IPv4AddressCount = 0x1 +[04-13_07:04:35:458] < IPv4AddressOffset = 0x3c +[04-13_07:04:35:458] < IPv6AddressCount = 0x0 +[04-13_07:04:35:458] < IPv6AddressOffset = 0x0 +[04-13_07:04:35:458] < IPv4 = 10.129.90.29/30 +[04-13_07:04:35:458] < gw = 10.129.90.30 +[04-13_07:04:35:458] < dns1 = 211.138.180.2 +[04-13_07:04:35:458] < dns2 = 211.138.180.3 +[04-13_07:04:35:458] < ipv4 mtu = 1500 +[04-13_07:04:35:489] ifconfig wwan0 up +[04-13_07:04:35:509] ifconfig wwan0.1 down +[04-13_07:04:35:522] ifconfig wwan0.1 up +[04-13_07:04:35:535] ip -4 address flush dev wwan0.1 +[04-13_07:04:35:545] ip -4 address add 10.129.90.29/30 dev wwan0.1 +[04-13_07:04:35:556] ip -4 route add default via 10.129.90.30 dev wwan0.1 + +root@ZhuoTK:/# quectel-CM -n 2 -s 4gnet & +[04-13_07:04:45:150] Quectel_QConnectManager_Linux_V1.6.0.25 +[04-13_07:04:45:152] Find /sys/bus/usb/devices/1-1.3 idVendor=0x2c7c idProduct=0x125, bus=0x001, dev=0x004 +[04-13_07:04:45:154] Auto find qmichannel = /dev/cdc-wdm0 +[04-13_07:04:45:154] Auto find usbnet_adapter = wwan0 +[04-13_07:04:45:154] netcard driver = cdc_mbim, driver version = 22-Aug-2005 +[04-13_07:04:45:155] mbim_qmap_mode = 4, vlan_id = 0x02, qmap_netcard = wwan0.2 +[04-13_07:04:45:155] Modem works in MBIM mode +[04-13_07:04:45:155] handle_client_connect client_fd=6, client_idx=2 +[04-13_07:04:45:156] connect to quectel-mbim-proxy sockfd = 7 +[04-13_07:04:45:156] cdc_wdm_fd = 7 +[04-13_07:04:45:156] mbim_open_device() +[04-13_07:04:46:025] mbim_device_caps_query() +[04-13_07:04:46:056] DeviceId: 866758045439136 +[04-13_07:04:46:056] FirmwareInfo: EC25EFAR06A11M4G +[04-13_07:04:46:056] HardwareInfo: QUECTEL Mobile Broadband Modul +[04-13_07:04:46:056] mbim_device_services_query() +[04-13_07:04:46:088] mbim_set_radio_state( 1 ) +[04-13_07:04:46:119] HwRadioState: 1, SwRadioState: 1 +[04-13_07:04:46:119] mbim_subscriber_status_query() +[04-13_07:04:46:183] SubscriberId: 460028563800461 +[04-13_07:04:46:184] SimIccId: 89860015120716380461 +[04-13_07:04:46:184] SubscriberReadyState NotInitialized -> Initialized +[04-13_07:04:46:184] mbim_register_state_query() +[04-13_07:04:46:216] RegisterState Unknown -> Home +[04-13_07:04:46:216] mbim_packet_service_query() +[04-13_07:04:46:248] PacketServiceState Unknown -> Attached +[04-13_07:04:46:248] mbim_query_connect(sessionID=2) +[04-13_07:04:46:280] ActivationState Unknown -> Deactivated +[04-13_07:04:46:280] ifconfig wwan0.2 0.0.0.0 +[04-13_07:04:46:291] ifconfig wwan0.2 down +[04-13_07:04:46:304] mbim_set_connect(onoff=1, sessionID=2) +[04-13_07:04:46:504] ActivationState Deactivated -> Activated +[04-13_07:04:46:505] mbim_ip_config(sessionID=2) +[04-13_07:04:46:537] < SessionId = 2 +[04-13_07:04:46:537] < IPv4ConfigurationAvailable = 0xf +[04-13_07:04:46:537] < IPv6ConfigurationAvailable = 0x0 +[04-13_07:04:46:538] < IPv4AddressCount = 0x1 +[04-13_07:04:46:538] < IPv4AddressOffset = 0x3c +[04-13_07:04:46:538] < IPv6AddressCount = 0x0 +[04-13_07:04:46:538] < IPv6AddressOffset = 0x0 +[04-13_07:04:46:538] < IPv4 = 10.129.37.205/30 +[04-13_07:04:46:538] < gw = 10.129.37.206 +[04-13_07:04:46:538] < dns1 = 211.138.180.2 +[04-13_07:04:46:538] < dns2 = 211.138.180.3 +[04-13_07:04:46:538] < ipv4 mtu = 1500 +[04-13_07:04:46:569] ifconfig wwan0 up +[04-13_07:04:46:579] ifconfig wwan0.2 up +[04-13_07:04:46:592] ip -4 address flush dev wwan0.2 +[04-13_07:04:46:602] ip -4 address add 10.129.37.205/30 dev wwan0.2 +[04-13_07:04:46:613] ip -4 route add default via 10.129.37.206 dev wwan0.2 + +root@ZhuoTK:/# ifconfig wwan0.1 +wwan0.1 Link encap:Ethernet HWaddr A2:58:DC:4D:DD:CA + inet addr:10.129.90.29 Bcast:0.0.0.0 Mask:255.255.255.252 + inet6 addr: fe80::a058:dcff:fe4d:ddca/64 Scope:Link + UP BROADCAST RUNNING NOARP MULTICAST MTU:1500 Metric:1 + RX packets:4 errors:0 dropped:0 overruns:0 frame:0 + TX packets:13 errors:0 dropped:0 overruns:0 carrier:0 + collisions:0 txqueuelen:0 + RX bytes:304 (304.0 B) TX bytes:1170 (1.1 KiB) + +root@ZhuoTK:/# ifconfig wwan0.2 +wwan0.2 Link encap:Ethernet HWaddr A2:58:DC:4D:DD:CA + inet addr:10.129.37.205 Bcast:0.0.0.0 Mask:255.255.255.252 + inet6 addr: fe80::a058:dcff:fe4d:ddca/64 Scope:Link + UP BROADCAST RUNNING NOARP MULTICAST MTU:1500 Metric:1 + RX packets:0 errors:0 dropped:0 overruns:0 frame:0 + TX packets:8 errors:0 dropped:0 overruns:0 carrier:0 + collisions:0 txqueuelen:0 + RX bytes:0 (0.0 B) TX bytes:720 (720.0 B) + +root@ZhuoTK:/# ip ro del 8.8.8.8/32 +RTNETLINK answers: No such process +root@ZhuoTK:/# ip ro add 8.8.8.8/32 dev wwan0.1 +root@ZhuoTK:/# ping 8.8.8.8 +PING 8.8.8.8 (8.8.8.8): 56 data bytes +64 bytes from 8.8.8.8: seq=0 ttl=52 time=98.584 ms + +root@ZhuoTK:/# ip ro del 8.8.8.8/32 +root@ZhuoTK:/# ip ro del 8.8.8.8/32 +RTNETLINK answers: No such process +root@ZhuoTK:/# ip ro add 8.8.8.8/32 dev wwan0.2 +root@ZhuoTK:/# ping 8.8.8.8 +PING 8.8.8.8 (8.8.8.8): 56 data bytes +64 bytes from 8.8.8.8: seq=0 ttl=111 time=101.770 ms \ No newline at end of file diff --git a/package/wwan/driver/quectel_cm_5G/src/log/ecm_ncm_rndis.txt b/package/wwan/driver/quectel_cm_5G/src/log/ecm_ncm_rndis.txt new file mode 100644 index 000000000..2ae7e6b59 --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/log/ecm_ncm_rndis.txt @@ -0,0 +1,129 @@ +# ./quectel-CM -s cmnet & +[04-21_17:35:14:362] Quectel_QConnectManager_Linux_V1.6.0.26 +[04-21_17:35:14:363] Find /sys/bus/usb/devices/2-4 idVendor=0x2c7c idProduct=0x8101, bus=0x002, dev=0x016 +[04-21_17:35:14:363] Auto find qmichannel = /dev/ttyUSB0 +[04-21_17:35:14:363] Auto find usbnet_adapter = usb0 +[04-21_17:35:14:363] netcard driver = cdc_ncm, driver version = 22-Aug-2005 +[04-21_17:35:14:363] Modem works in ECM_RNDIS_NCM mode +[04-21_17:35:14:371] atc_fd = 7 +[04-21_17:35:14:371] AT> ATE0Q0V1 +[04-21_17:35:14:372] AT< RDATE0Q0V1 +[04-21_17:35:14:372] AT< COMMAND NOT SUPPORT +[04-21_17:35:15:373] AT> AT+QCFG="usbnet" +[04-21_17:35:15:373] AT< +QCFG: "usbnet",5 +[04-21_17:35:15:373] AT< OK +[04-21_17:35:15:373] AT> AT+QNETDEVCTL=? +[04-21_17:35:15:374] AT< +QNETDEVCTL: (1-11),(0,1),(0,1) +[04-21_17:35:15:374] AT< OK +[04-21_17:35:15:374] AT> AT+CGREG=2 +[04-21_17:35:15:376] AT< OK +[04-21_17:35:15:376] AT> AT+CEREG=2 +[04-21_17:35:15:381] AT< OK +[04-21_17:35:15:381] AT> AT+C5GREG=2 +[04-21_17:35:15:384] AT< OK +[04-21_17:35:15:384] AT> AT+QNETDEVSTATUS=? +[04-21_17:35:15:385] AT< +QNETDEVSTATUS: (1-11) +[04-21_17:35:15:385] AT< OK +[04-21_17:35:15:385] AT> AT+QCFG="NAT" +[04-21_17:35:15:385] AT< +QCFG: "nat",0 +[04-21_17:35:15:385] AT< OK +[04-21_17:35:15:385] AT> AT+CGMR +[04-21_17:35:15:386] AT< RG801HEAAAR03A01M8G +[04-21_17:35:15:386] AT< OK +[04-21_17:35:15:386] AT> AT+CPIN? +[04-21_17:35:15:388] AT< +CPIN: READY +[04-21_17:35:15:388] AT< OK +[04-21_17:35:15:389] AT> AT+QCCID +[04-21_17:35:15:393] AT< +QCCID: 89860015120716380461 +[04-21_17:35:15:393] AT< OK +[04-21_17:35:15:393] requestGetICCID 89860015120716380461 +[04-21_17:35:15:393] AT> AT+CIMI +[04-21_17:35:15:409] AT< 460028563800461 +[04-21_17:35:15:409] AT< OK +[04-21_17:35:15:409] requestGetIMSI 460028563800461 +[04-21_17:35:15:409] AT> AT+QICSGP=1 +[04-21_17:35:15:411] AT< +QICSGP: 1,1,"cment","","",0,,0, +[04-21_17:35:15:411] AT< OK +[04-21_17:35:15:411] AT> AT+QICSGP=1 +[04-21_17:35:15:415] AT< +QICSGP: 1,1,"cment","","",0,,0, +[04-21_17:35:15:415] AT< OK +[04-21_17:35:15:415] AT> AT+COPS=3,0;+COPS?;+COPS=3,1;+COPS?;+COPS=3,2;+COPS? +[04-21_17:35:15:421] AT< +COPS: 0,0,"CHINA MOBILE",12 +[04-21_17:35:15:421] AT< +COPS: 0,1,"CMCC",12 +[04-21_17:35:15:421] AT< +COPS: 0,2,"46000",12 +[04-21_17:35:15:421] AT< OK +[04-21_17:35:15:421] AT> AT+C5GREG? +[04-21_17:35:15:424] AT< +C5GREG: 2,1,"46550B","0000000170C23000",11,1,"01" +[04-21_17:35:15:424] AT< OK +[04-21_17:35:15:424] AT> at+cops? +[04-21_17:35:15:427] AT< +COPS: 0,2,"46000",12 +[04-21_17:35:15:427] AT< OK +[04-21_17:35:15:427] AT> at+qeng="servingcell" +[04-21_17:35:15:441] AT< +QENG: "servingcell","NOCONN","NR5G-SA","TDD",460,00,170C23000,901,46550B,504990,41,-,-54,-10,16,-,- +[04-21_17:35:15:441] AT< OK +[04-21_17:35:15:441] AT> AT+QNETDEVSTATUS=1 +[04-21_17:35:15:445] AT< ERROR +[04-21_17:35:15:445] ifconfig usb0 0.0.0.0 +[04-21_17:35:15:446] ifconfig usb0 down +[04-21_17:35:15:448] AT> AT+QNETDEVCTL=1,1,0 +[04-21_17:35:15:454] AT< OK +[04-21_17:35:15:456] AT> AT+QNETDEVSTATUS=1 +[04-21_17:35:15:458] AT< ERROR +[04-21_17:35:15:989] AT< +QNETDEVSTATUS:1,1,"IPV4" +[04-21_17:35:16:459] AT> AT+QNETDEVSTATUS=1 +[04-21_17:35:16:461] AT< +QNETDEVSTATUS: 4F10190A,E0FFFFFF,4110190A,4110190A,02B48AD3,03B48AD3, 85600, 85600 +[04-21_17:35:16:461] AT< OK +[04-21_17:35:16:461] AT> AT+QNETDEVSTATUS=1 +[04-21_17:35:16:462] AT< +QNETDEVSTATUS: 4F10190A,E0FFFFFF,4110190A,4110190A,02B48AD3,03B48AD3, 85600, 85600 +[04-21_17:35:16:462] AT< OK +[04-21_17:35:16:462] requestGetIPAddress 10.25.16.79 +[04-21_17:35:16:462] AT> at+cops? +[04-21_17:35:16:463] AT< +COPS: 0,2,"46000",12 +[04-21_17:35:16:463] AT< OK +[04-21_17:35:16:463] AT> at+qeng="servingcell" +[04-21_17:35:16:465] AT< +QENG: "servingcell","CONNECT","NR5G-SA","TDD",460,00,170C23000,901,46550B,504990,41,-,-52,-11,15,-,- +[04-21_17:35:16:465] AT< OK +[04-21_17:35:16:465] AT> AT+QNETDEVSTATUS=1 +[04-21_17:35:16:466] AT< +QNETDEVSTATUS: 4F10190A,E0FFFFFF,4110190A,4110190A,02B48AD3,03B48AD3, 85600, 85600 +[04-21_17:35:16:466] AT< OK +[04-21_17:35:16:466] ifconfig usb0 up +[04-21_17:35:16:470] busybox udhcpc -f -n -q -t 5 -i usb0 +udhcpc: started, v1.30.1 +udhcpc: sending discover +udhcpc: sending select for 10.25.16.79 +udhcpc: lease of 10.25.16.79 obtained, lease time 518400 +[04-21_17:35:16:602] /etc/udhcpc/default.script: Resetting default routes +SIOCDELRT: No such process +SIOCADDRT: Network is unreachable +[04-21_17:35:16:606] /etc/udhcpc/default.script: Adding DNS 211.138.180.2 +[04-21_17:35:16:606] /etc/udhcpc/default.script: Adding DNS 211.138.180.3 +[04-21_17:35:16:655] AT> at+cops? +[04-21_17:35:16:656] AT< +COPS: 0,2,"46000",12 +[04-21_17:35:16:656] AT< OK +[04-21_17:35:16:656] AT> at+qeng="servingcell" +[04-21_17:35:16:657] AT< +QENG: "servingcell","CONNECT","NR5G-SA","TDD",460,00,170C23000,901,46550B,504990,41,-,-50,-11,17,-,- +[04-21_17:35:16:658] AT< OK +err = 16 +[04-21_17:35:16:658] AT> AT+QNETDEVSTATUS=1 +[04-21_17:35:16:659] AT< +QNETDEVSTATUS: 4F10190A,E0FFFFFF,4110190A,4110190A,02B48AD3,03B48AD3, 85600, 85600 +[04-21_17:35:16:659] AT< OK + +root@carl-ThinkPad-X1-Carbon-7th:/home/carl/q/quectel-CM# ifconfig usb0 +usb0: flags=4163 mtu 1500 + inet 10.25.16.79 netmask 255.255.255.224 broadcast 10.25.16.95 + inet6 fe80::5c98:e9d4:c82d:5f prefixlen 64 scopeid 0x20 + ether 0c:5b:8f:27:9a:64 txqueuelen 1000 (Ethernet) + RX packets 7 bytes 1656 (1.6 KB) + RX errors 0 dropped 0 overruns 0 frame 0 + TX packets 32 bytes 5112 (5.1 KB) + TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0 + +root@carl-ThinkPad-X1-Carbon-7th:/home/carl/q/quectel-CM# [04-21_17:35:31:670] AT> at+cops? +[04-21_17:35:31:671] AT< +COPS: 0,2,"46000",12 +[04-21_17:35:31:671] AT< OK +[04-21_17:35:31:671] AT> at+qeng="servingcell" +[04-21_17:35:31:673] AT< +QENG: "servingcell","CONNECT","NR5G-SA","TDD",460,00,170C23000,901,46550B,504990,41,-,-48,-10,17,-,- +[04-21_17:35:31:673] AT< OK +[04-21_17:35:31:673] AT> AT+QNETDEVSTATUS=1 +[04-21_17:35:31:674] AT< +QNETDEVSTATUS: 4F10190A,E0FFFFFF,4110190A,4110190A,02B48AD3,03B48AD3, 85600, 85600 +[04-21_17:35:31:674] AT< OK diff --git a/package/wwan/driver/quectel_cm_5G/src/log/gobinet.txt b/package/wwan/driver/quectel_cm_5G/src/log/gobinet.txt new file mode 100644 index 000000000..dd7f50148 --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/log/gobinet.txt @@ -0,0 +1,62 @@ +root@ZhuoTK:/# dmesg +[ 230.590000] GobiNet 1-1.3:1.4 usb0: register 'GobiNet' at usb-101c0000.ehci-1.3, GobiNet Ethernet Device, 02:50:f4:00:00:00 +[ 230.600000] creating qcqmi0 + +root@ZhuoTK:/# quectel-CM -s cmnet & +[04-13_03:24:58:213] Quectel_QConnectManager_Linux_V1.6.0.25 +[04-13_03:24:58:216] Find /sys/bus/usb/devices/1-1.3 idVendor=0x2c7c idProduct=0x125, bus=0x001, dev=0x004 +[04-13_03:24:58:218] Auto find qmichannel = /dev/qcqmi0 +[04-13_03:24:58:218] Auto find usbnet_adapter = usb0 +[04-13_03:24:58:218] netcard driver = GobiNet, driver version = V1.6.2.13 +[04-13_03:24:58:219] Modem works in QMI mode +[04-13_03:24:58:260] Get clientWDS = 7 +[04-13_03:24:58:292] Get clientDMS = 8 +[04-13_03:24:58:324] Get clientNAS = 9 +[04-13_03:24:58:355] Get clientUIM = 10 +[04-13_03:24:58:388] Get clientWDA = 11 +[04-13_03:24:58:420] requestBaseBandVersion EC25EFAR06A11M4G +[04-13_03:24:58:548] requestGetSIMStatus SIMStatus: SIM_READY +[04-13_03:24:58:549] requestSetProfile[1] cmnet///0 +[04-13_03:24:58:613] requestGetProfile[1] cmnet///0 +[04-13_03:24:58:645] requestRegistrationState2 MCC: 460, MNC: 0, PS: Attached, DataCap: LTE +[04-13_03:24:58:677] requestQueryDataCall IPv4ConnectionStatus: DISCONNECTED +[04-13_03:24:58:677] ifconfig usb0 0.0.0.0 +[04-13_03:24:58:696] ifconfig usb0 down +[04-13_03:24:59:028] requestSetupDataCall WdsConnectionIPv4Handle: 0x87245bd0 +[04-13_03:24:59:189] ifconfig usb0 up +[04-13_03:24:59:214] you are use OpenWrt? +[04-13_03:24:59:215] should not calling udhcpc manually? +[04-13_03:24:59:215] should modify /etc/config/network as below? +[04-13_03:24:59:215] config interface wan +[04-13_03:24:59:215] option ifname usb0 +[04-13_03:24:59:215] option proto dhcp +[04-13_03:24:59:215] should use "/sbin/ifstaus wan" to check usb0 's status? +[04-13_03:24:59:216] busybox udhcpc -f -n -q -t 5 -i usb0 +[04-13_03:24:59:226] udhcpc (v1.23.2) started +[04-13_03:24:59:238] Sending discover... +[04-13_03:24:59:248] Sending select for 10.198.78.154... +[04-13_03:24:59:251] Lease of 10.198.78.154 obtained, lease time 7200 +[04-13_03:24:59:257] udhcpc: ifconfig usb0 10.198.78.154 netmask 255.255.255.252 broadcast + +[04-13_03:24:59:266] udhcpc: setting default routers: 10.198.78.153 + +root@ZhuoTK:/# ifconfig usb0 +usb0 Link encap:Ethernet HWaddr 02:50:F4:00:00:00 + inet addr:10.198.78.154 Mask:255.255.255.252 + inet6 addr: fe80::50:f4ff:fe00:0/64 Scope:Link + UP RUNNING NOARP MTU:1500 Metric:1 + RX packets:6 errors:0 dropped:0 overruns:0 frame:0 + TX packets:6 errors:0 dropped:0 overruns:0 carrier:0 + collisions:0 txqueuelen:1000 + RX bytes:916 (916.0 B) TX bytes:960 (960.0 B) + +root@ZhuoTK:/# ip ro show +default via 10.198.78.153 dev usb0 +10.198.78.152/30 dev usb0 proto kernel scope link src 10.198.78.154 +192.168.1.0/24 dev br-lan proto kernel scope link src 192.168.1.251 + +root@ZhuoTK:/# killall quectel-CM +[04-13_03:25:38:779] requestDeactivateDefaultPDP WdsConnectionIPv4Handle +[04-13_03:25:39:061] ifconfig usb0 0.0.0.0 +[04-13_03:25:39:072] ifconfig usb0 down +[04-13_03:25:39:284] GobiNetThread exit +[04-13_03:25:39:285] qmi_main exit diff --git a/package/wwan/driver/quectel_cm_5G/src/log/gobinet_bridge.txt b/package/wwan/driver/quectel_cm_5G/src/log/gobinet_bridge.txt new file mode 100644 index 000000000..f19214b63 --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/log/gobinet_bridge.txt @@ -0,0 +1,60 @@ +root@ZhuoTK:/# insmod GobiNet.ko +[ 80.460000] GobiNet: Quectel_Linux&Android_GobiNet_Driver_V1.6.2.13 +[ 80.460000] usbcore: registered new interface driver GobiNet +[ 97.710000] usb 1-1.3: new high-speed USB device number 3 using ehci-platform +[ 97.930000] usb 1-1.3: GSM modem (1-port) converter now attached to ttyUSB103 +[ 97.950000] GobiNet 1-1.3:1.4 usb0: register 'GobiNet' at usb-101c0000.ehci-1.3, GobiNet Ethernet Device, 02:50:f4:00:00:00 +[ 97.960000] creating qcqmi0 + +root@ZhuoTK:/# brctl addbr br0 +root@ZhuoTK:/# brctl addif br0 eth0.1 +root@ZhuoTK:/# brctl addif br0 usb0 +root@ZhuoTK:/# brctl show +bridge name bridge id STP enabled interfaces +br0 8000.00ca019197b9 no eth0.1 + usb0 + +root@ZhuoTK:/# quectel-CM -s cment -b & +[04-13_05:13:18:213] Quectel_QConnectManager_Linux_V1.6.0.25 +[04-13_05:13:18:216] Find /sys/bus/usb/devices/1-1.3 idVendor=0x2c7c idProduct=0x125, bus=0x001, dev=0x003 +[ 122.270000] net usb0: bridge_mode change to 0x1 +[04-13_05:13:18:218] Auto find qmichannel = /dev/qcqmi0 +[04-13_05:13:18:218] Auto find usbnet_adapter = usb0 +[04-13_05:13:18:218] netcard driver = GobiNet, driver version = V1.6.2.13 +[04-13_05:13:18:224] Modem works in QMI mode +[04-13_05:13:18:251] Get clientWDS = 7 +[04-13_05:13:18:282] Get clientDMS = 8 +[04-13_05:13:18:316] Get clientNAS = 9 +[04-13_05:13:18:347] Get clientUIM = 10 +[04-13_05:13:18:379] Get clientWDA = 11 +[04-13_05:13:18:411] requestBaseBandVersion EC25EFAR06A11M4G +[04-13_05:13:18:539] requestGetSIMStatus SIMStatus: SIM_READY +[04-13_05:13:18:540] requestSetProfile[1] cment///0 +[04-13_05:13:18:603] requestGetProfile[1] cment///0 +[04-13_05:13:18:637] requestRegistrationState2 MCC: 460, MNC: 0, PS: Attached, DataCap: LTE +[04-13_05:13:18:666] requestQueryDataCall IPv4ConnectionStatus: DISCONNECTED +[04-13_05:13:18:667] ifconfig usb0 0.0.0.0 +[04-13_05:13:18:687] ifconfig usb0 down +[04-13_05:13:19:083] requestSetupDataCall WdsConnectionIPv4Handle: 0x8724d220 +[04-13_05:13:19:243] ifconfig usb0 up +[04-13_05:13:19:270] echo '0xa218480' > /sys/class/net/usb0/bridge_ipv4 + +root@ZhuoTK:/# ifconfig br0 up +[ 135.530000] usb0 PC Mac Address: 00:0e:c6:a6:6c:f1 +[ 135.570000] usb0 PC Mac Address: 00:0e:c6:a6:6c:f1 +[ 135.580000] usb0 PC Mac Address: 00:0e:c6:a6:6c:f1 +[ 135.610000] usb0 sip = 10.33.132.128, tip=10.33.132.129, ipv4=10.33.132.128 +[ 135.620000] usb0 sip = 10.33.132.128, tip=10.33.132.129, ipv4=10.33.132.128 +[ 135.910000] usb0 sip = 0.0.0.0, tip=10.33.132.128, ipv4=10.33.132.128 +[ 136.000000] usb0 sip = 10.33.132.128, tip=10.33.132.129, ipv4=10.33.132.128 +[ 136.910000] usb0 sip = 0.0.0.0, tip=10.33.132.128, ipv4=10.33.132.128 +[ 137.910000] usb0 sip = 0.0.0.0, tip=10.33.132.128, ipv4=10.33.132.128 +[ 138.740000] usb0 sip = 10.33.132.128, tip=10.33.132.129, ipv4=10.33.132.128 +[ 138.910000] usb0 sip = 10.33.132.128, tip=10.33.132.128, ipv4=10.33.132.128 +[ 139.000000] usb0 sip = 10.33.132.128, tip=10.33.132.129, ipv4=10.33.132.128 +[ 140.860000] usb0 sip = 10.33.132.128, tip=10.33.132.129, ipv4=10.33.132.128 +[ 143.160000] br0: port 2(usb0) entered forwarding state +[ 143.160000] br0: port 1(eth0.1) entered forwarding state +[ 148.870000] usb0 sip = 10.33.132.128, tip=10.33.132.129, ipv4=10.33.132.128 +[ 149.010000] usb0 sip = 10.33.132.128, tip=10.33.132.129, ipv4=10.33.132.128 +[ 165.630000] usb0 sip = 10.33.132.128, tip=10.33.132.129, ipv4=10.33.132.128 diff --git a/package/wwan/driver/quectel_cm_5G/src/log/gobinet_qmap=1.txt b/package/wwan/driver/quectel_cm_5G/src/log/gobinet_qmap=1.txt new file mode 100644 index 000000000..3d9499c36 --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/log/gobinet_qmap=1.txt @@ -0,0 +1,45 @@ +root@ZhuoTK:/# insmod GobiNet.ko qmap_mode=1 +[ 798.480000] GobiNet: Quectel_Linux&Android_GobiNet_Driver_V1.6.2.13 +[ 798.490000] GobiNet 1-1.3:1.4 usb0: register 'GobiNet' at usb-101c0000.ehci-1.3, GobiNet Ethernet Device, 02:50:f4:00:00:00 +[ 798.510000] creating qcqmi0 +[ 798.510000] usbcore: registered new interface driver GobiNet +[ 799.620000] GobiNet::QMIWDASetDataFormat qmap settings qmap_version=5, rx_size=4096, tx_size=4096 +[ 799.630000] GobiNet::QMIWDASetDataFormat qmap settings ul_data_aggregation_max_size=4096, ul_data_aggregation_max_datagrams=16 + +root@ZhuoTK:/# quectel-CM -s cmnet & +[04-13_03:32:31:248] Quectel_QConnectManager_Linux_V1.6.0.25 +[04-13_03:32:31:251] Find /sys/bus/usb/devices/1-1.3 idVendor=0x2c7c idProduct=0x125, bus=0x001, dev=0x005 +[04-13_03:32:31:253] Auto find qmichannel = /dev/qcqmi0 +[04-13_03:32:31:253] Auto find usbnet_adapter = usb0 +[04-13_03:32:31:253] netcard driver = GobiNet, driver version = V1.6.2.13 +[04-13_03:32:31:253] qmap_mode = 1, qmap_version = 5, qmap_size = 4096, muxid = 0x81, qmap_netcard = usb0 +[04-13_03:32:31:254] Modem works in QMI mode +[04-13_03:32:31:289] Get clientWDS = 7 +[04-13_03:32:31:320] Get clientDMS = 8 +[04-13_03:32:31:353] Get clientNAS = 9 +[04-13_03:32:31:385] Get clientUIM = 10 +[04-13_03:32:31:417] requestBaseBandVersion EC25EFAR06A11M4G +[04-13_03:32:31:545] requestGetSIMStatus SIMStatus: SIM_READY +[04-13_03:32:31:545] requestSetProfile[1] cmnet///0 +[04-13_03:32:31:609] requestGetProfile[1] cmnet///0 +[04-13_03:32:31:641] requestRegistrationState2 MCC: 460, MNC: 0, PS: Attached, DataCap: LTE +[04-13_03:32:31:673] requestQueryDataCall IPv4ConnectionStatus: DISCONNECTED +[04-13_03:32:31:674] ifconfig usb0 0.0.0.0 +[04-13_03:32:31:698] ifconfig usb0 down +[04-13_03:32:31:770] requestSetupDataCall WdsConnectionIPv4Handle: 0x872481a0 +[ 857.000000] net usb0: link_state 0x0 -> 0x1 +[04-13_03:32:31:902] ifconfig usb0 up +[04-13_03:32:31:928] you are use OpenWrt? +[04-13_03:32:31:928] should not calling udhcpc manually? +[04-13_03:32:31:928] should modify /etc/config/network as below? +[04-13_03:32:31:928] config interface wan +[04-13_03:32:31:928] option ifname usb0 +[04-13_03:32:31:929] option proto dhcp +[04-13_03:32:31:929] should use "/sbin/ifstaus wan" to check usb0 's status? +[04-13_03:32:31:929] busybox udhcpc -f -n -q -t 5 -i usb0 +[04-13_03:32:31:939] udhcpc (v1.23.2) started +[04-13_03:32:31:951] Sending discover... +[04-13_03:32:31:956] Sending select for 10.199.102.71... +[04-13_03:32:31:959] Lease of 10.199.102.71 obtained, lease time 7200 +[04-13_03:32:31:964] udhcpc: ifconfig usb0 10.199.102.71 netmask 255.255.255.240 broadcast + +[04-13_03:32:31:974] udhcpc: setting default routers: 10.199.102.72 diff --git a/package/wwan/driver/quectel_cm_5G/src/log/gobinet_qmap=1_bridge.txt b/package/wwan/driver/quectel_cm_5G/src/log/gobinet_qmap=1_bridge.txt new file mode 100644 index 000000000..cf61443da --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/log/gobinet_qmap=1_bridge.txt @@ -0,0 +1,62 @@ +root@ZhuoTK:/# insmod GobiNet.ko qmap_mode=1 +[ 41.540000] GobiNet: Quectel_Linux&Android_GobiNet_Driver_V1.6.2.13 +[ 41.550000] GobiNet 1-1.3:1.4 usb0: register 'GobiNet' at usb-101c0000.ehci-1.3, GobiNet Ethernet Device, 02:50:f4:00:00:00 +[ 41.570000] creating qcqmi0 +[ 41.570000] usbcore: registered new interface driver GobiNet +[ 42.700000] GobiNet::QMIWDASetDataFormat qmap settings qmap_version=5, rx_size=4096, tx_size=4096 +[ 42.710000] GobiNet::QMIWDASetDataFormat qmap settings ul_data_aggregation_max_size=4096, ul_data_aggregation_max_datagrams=16 + +root@ZhuoTK:/# brctl addbr br0 +root@ZhuoTK:/# brctl addif br0 eth0.1 +root@ZhuoTK:/# brctl addif br0 usb0 +root@ZhuoTK:/# brctl show +bridge name bridge id STP enabled interfaces +br0 8000.00ca019197b9 no eth0.1 + usb0 + +root@ZhuoTK:/# quectel-CM -s cmnet -b & +# [04-13_05:12:29:338] Quectel_QConnectManager_Linux_V1.6.0.25 +[04-13_05:12:29:340] Find /sys/bus/usb/devices/1-1.3 idVendor=0x2c7c idProduct=0x125, bus=0x001, dev=0x003 +[ 73.380000] net usb0: bridge_mode change to 0x1 +[04-13_05:12:29:342] Auto find qmichannel = /dev/qcqmi0 +[04-13_05:12:29:342] Auto find usbnet_adapter = usb0 +[04-13_05:12:29:342] netcard driver = GobiNet, driver version = V1.6.2.13 +[04-13_05:12:29:343] qmap_mode = 1, qmap_version = 5, qmap_size = 4096, muxid = 0x81, qmap_netcard = usb0 +[04-13_05:12:29:348] Modem works in QMI mode +[04-13_05:12:29:382] Get clientWDS = 7 +[04-13_05:12:29:414] Get clientDMS = 8 +[04-13_05:12:29:447] Get clientNAS = 9 +[04-13_05:12:29:479] Get clientUIM = 10 +[04-13_05:12:29:512] requestBaseBandVersion EC25EFAR06A11M4G +[04-13_05:12:29:640] requestGetSIMStatus SIMStatus: SIM_READY +[04-13_05:12:29:640] requestSetProfile[1] cmnet///0 +[04-13_05:12:29:704] requestGetProfile[1] cmnet///0 +[04-13_05:12:29:735] requestRegistrationState2 MCC: 460, MNC: 0, PS: Attached, DataCap: LTE +[04-13_05:12:29:767] requestQueryDataCall IPv4ConnectionStatus: DISCONNECTED +[04-13_05:12:29:768] ifconfig usb0 0.0.0.0 +[04-13_05:12:29:792] ifconfig usb0 down +[04-13_05:12:29:863] requestSetupDataCall WdsConnectionIPv4Handle: 0x8724d820 +[ 74.030000] net usb0: link_state 0x0 -> 0x1 +[04-13_05:12:29:996] ifconfig usb0 up +[04-13_05:12:30:022] echo '0xa16b769' > /sys/class/net/usb0/bridge_ipv4 + +root@ZhuoTK:/# ifconfig br0 up +[ 82.210000] br0: port 2(usb0) entered forwarding state +[ 82.210000] br0: port 2(usb0) entered forwarding state +[ 82.220000] br0: port 1(eth0.1) entered forwarding state +[ 82.220000] br0: port 1(eth0.1) entered forwarding state +[ 88.830000] rt305x-esw 10110000.esw: link changed 0x01 +[ 89.010000] usb0 PC Mac Address: 00:0e:c6:a6:6c:f1 +[ 89.040000] usb0 PC Mac Address: 00:0e:c6:a6:6c:f1 +[ 89.050000] usb0 PC Mac Address: 00:0e:c6:a6:6c:f1 +[ 89.120000] usb0 sip = 10.22.183.105, tip=10.22.183.106, ipv4=10.22.183.105 +[ 89.350000] usb0 sip = 0.0.0.0, tip=10.22.183.105, ipv4=10.22.183.105 +[ 89.400000] usb0 sip = 10.22.183.105, tip=10.22.183.106, ipv4=10.22.183.105 +[ 89.520000] usb0 sip = 10.22.183.105, tip=10.22.183.106, ipv4=10.22.183.105 +[ 90.350000] usb0 sip = 0.0.0.0, tip=10.22.183.105, ipv4=10.22.183.105 +[ 91.350000] usb0 sip = 0.0.0.0, tip=10.22.183.105, ipv4=10.22.183.105 +[ 92.350000] usb0 sip = 10.22.183.105, tip=10.22.183.105, ipv4=10.22.183.105 +[ 92.430000] usb0 sip = 10.22.183.105, tip=10.22.183.106, ipv4=10.22.183.105 +[ 92.660000] usb0 sip = 10.22.183.105, tip=10.22.183.106, ipv4=10.22.183.105 +[ 97.240000] br0: port 2(usb0) entered forwarding state +[ 97.240000] br0: port 1(eth0.1) entered forwarding state diff --git a/package/wwan/driver/quectel_cm_5G/src/log/gobinet_qmap=4.txt b/package/wwan/driver/quectel_cm_5G/src/log/gobinet_qmap=4.txt new file mode 100644 index 000000000..e601c9863 --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/log/gobinet_qmap=4.txt @@ -0,0 +1,146 @@ +root@ZhuoTK:/# insmod GobiNet.ko qmap_mode=4 +[ 970.380000] GobiNet: Quectel_Linux&Android_GobiNet_Driver_V1.6.2.13 +[ 970.380000] usbcore: registered new interface driver GobiNet +[ 989.620000] usb 1-1.3: new high-speed USB device number 6 using ehci-platform +[ 989.860000] GobiNet 1-1.3:1.4 usb0: register 'GobiNet' at usb-101c0000.ehci-1.3, GobiNet Ethernet Device, 02:50:f4:00:00:00 +[ 989.870000] creating qcqmi0 +[ 989.880000] GobiNet::qmap_register_device usb0.1 +[ 989.880000] GobiNet::qmap_register_device usb0.2 +[ 989.890000] GobiNet::qmap_register_device usb0.3 +[ 989.890000] GobiNet::qmap_register_device usb0.4 +[ 994.820000] GobiNet::QMIWDASetDataFormat qmap settings qmap_version=5, rx_size=4096, tx_size=4096 +[ 994.830000] GobiNet::QMIWDASetDataFormat qmap settings ul_data_aggregation_max_size=4096, ul_data_aggregation_max_datagrams=16 + +root@ZhuoTK:/# quectel-CM -n 1 -s cmnet & +[04-13_03:35:31:878] Quectel_QConnectManager_Linux_V1.6.0.25 +[04-13_03:35:31:881] Find /sys/bus/usb/devices/1-1.3 idVendor=0x2c7c idProduct=0x125, bus=0x001, dev=0x006 +[04-13_03:35:31:882] Auto find qmichannel = /dev/qcqmi0 +[04-13_03:35:31:882] Auto find usbnet_adapter = usb0 +[04-13_03:35:31:883] netcard driver = GobiNet, driver version = V1.6.2.13 +[04-13_03:35:31:883] qmap_mode = 4, qmap_version = 5, qmap_size = 4096, muxid = 0x81, qmap_netcard = usb0.1 +[04-13_03:35:31:883] Modem works in QMI mode +[04-13_03:35:31:896] Get clientWDS = 7 +[04-13_03:35:31:927] Get clientDMS = 8 +[04-13_03:35:31:959] Get clientNAS = 9 +[04-13_03:35:31:992] Get clientUIM = 10 +[04-13_03:35:32:024] requestBaseBandVersion EC25EFAR06A11M4G +[04-13_03:35:32:152] requestGetSIMStatus SIMStatus: SIM_READY +[04-13_03:35:32:152] requestSetProfile[1] cmnet///0 +[04-13_03:35:32:216] requestGetProfile[1] cmnet///0 +[04-13_03:35:32:248] requestRegistrationState2 MCC: 460, MNC: 0, PS: Attached, DataCap: LTE +[04-13_03:35:32:279] requestQueryDataCall IPv4ConnectionStatus: DISCONNECTED +[04-13_03:35:32:280] ifconfig usb0 down +[04-13_03:35:32:290] ifconfig usb0.1 0.0.0.0 +[04-13_03:35:32:301] ifconfig usb0.1 down +[04-13_03:35:32:344] requestSetupDataCall WdsConnectionIPv4Handle: 0x8723eef0 +[ 1037.570000] net usb0: link_state 0x0 -> 0x1 +[04-13_03:35:32:477] ifconfig usb0 up +[04-13_03:35:32:496] ifconfig usb0.1 up +[04-13_03:35:32:508] you are use OpenWrt? +[04-13_03:35:32:509] should not calling udhcpc manually? +[04-13_03:35:32:509] should modify /etc/config/network as below? +[04-13_03:35:32:509] config interface wan +[04-13_03:35:32:509] option ifname usb0.1 +[04-13_03:35:32:509] option proto dhcp +[04-13_03:35:32:509] should use "/sbin/ifstaus wan" to check usb0.1 's status? +[04-13_03:35:32:510] busybox udhcpc -f -n -q -t 5 -i usb0.1 +[04-13_03:35:32:520] udhcpc (v1.23.2) started +[04-13_03:35:32:532] Sending discover... +[04-13_03:35:32:540] Sending select for 10.187.142.20... +[04-13_03:35:32:545] Lease of 10.187.142.20 obtained, lease time 7200 +[04-13_03:35:32:550] udhcpc: ifconfig usb0.1 10.187.142.20 netmask 255.255.255.248 broadcast + +[04-13_03:35:32:560] udhcpc: setting default routers: 10.187.142.21 + +root@ZhuoTK:/# quectel-CM -n 2 -s 4gnet & +[04-13_03:35:38:766] Quectel_QConnectManager_Linux_V1.6.0.25 +[04-13_03:35:38:769] Find /sys/bus/usb/devices/1-1.3 idVendor=0x2c7c idProduct=0x125, bus=0x001, dev=0x006 +[04-13_03:35:38:770] Auto find qmichannel = /dev/qcqmi0 +[04-13_03:35:38:770] Auto find usbnet_adapter = usb0 +[04-13_03:35:38:771] netcard driver = GobiNet, driver version = V1.6.2.13 +[04-13_03:35:38:771] qmap_mode = 4, qmap_version = 5, qmap_size = 4096, muxid = 0x82, qmap_netcard = usb0.2 +[04-13_03:35:38:771] Modem works in QMI mode +[04-13_03:35:38:809] Get clientWDS = 7 +[04-13_03:35:38:841] Get clientDMS = 8 +[04-13_03:35:38:873] Get clientNAS = 9 +[04-13_03:35:38:905] Get clientUIM = 10 +[04-13_03:35:38:937] requestBaseBandVersion EC25EFAR06A11M4G +[04-13_03:35:39:065] requestGetSIMStatus SIMStatus: SIM_READY +[04-13_03:35:39:065] requestSetProfile[2] 4gnet///0 +[04-13_03:35:39:129] requestGetProfile[2] 4gnet///0 +[04-13_03:35:39:161] requestRegistrationState2 MCC: 460, MNC: 0, PS: Attached, DataCap: LTE +[04-13_03:35:39:193] requestQueryDataCall IPv4ConnectionStatus: DISCONNECTED +[04-13_03:35:39:193] ifconfig usb0.2 0.0.0.0 +[04-13_03:35:39:206] ifconfig usb0.2 down +[04-13_03:35:39:417] requestSetupDataCall WdsConnectionIPv4Handle: 0x87252eb0 +[ 1044.650000] net usb0: link_state 0x1 -> 0x3 +[04-13_03:35:39:550] ifconfig usb0 up +[04-13_03:35:39:560] ifconfig usb0.2 up +[04-13_03:35:39:573] you are use OpenWrt? +[04-13_03:35:39:573] should not calling udhcpc manually? +[04-13_03:35:39:573] should modify /etc/config/network as below? +[04-13_03:35:39:573] config interface wan +[04-13_03:35:39:573] option ifname usb0.2 +[04-13_03:35:39:573] option proto dhcp +[04-13_03:35:39:573] should use "/sbin/ifstaus wan" to check usb0.2 's status? +[04-13_03:35:39:574] busybox udhcpc -f -n -q -t 5 -i usb0.2 +[04-13_03:35:39:585] udhcpc (v1.23.2) started +[04-13_03:35:39:597] Sending discover... +[04-13_03:35:39:601] Sending select for 10.197.125.183... +[04-13_03:35:39:606] Lease of 10.197.125.183 obtained, lease time 7200 +[04-13_03:35:39:611] udhcpc: ifconfig usb0.2 10.197.125.183 netmask 255.255.255.240 broadcast + +[04-13_03:35:39:621] udhcpc: setting default routers: 10.197.125.184 + +root@ZhuoTK:/# ifconfig usb0.1 +usb0.1 Link encap:Ethernet HWaddr 02:50:F4:00:00:00 + inet addr:10.187.142.20 Mask:255.255.255.248 + inet6 addr: fe80::50:f4ff:fe00:0/64 Scope:Link + UP RUNNING NOARP MTU:1500 Metric:1 + RX packets:4 errors:0 dropped:0 overruns:0 frame:0 + TX packets:10 errors:0 dropped:0 overruns:0 carrier:0 + collisions:0 txqueuelen:1000 + RX bytes:764 (764.0 B) TX bytes:1824 (1.7 KiB) + +root@ZhuoTK:/# ifconfig usb0.2 +usb0.2 Link encap:Ethernet HWaddr 02:50:F4:00:00:00 + inet addr:10.197.125.183 Mask:255.255.255.240 + inet6 addr: fe80::50:f4ff:fe00:0/64 Scope:Link + UP RUNNING NOARP MTU:1500 Metric:1 + RX packets:3 errors:0 dropped:0 overruns:0 frame:0 + TX packets:9 errors:0 dropped:0 overruns:0 carrier:0 + collisions:0 txqueuelen:1000 + RX bytes:688 (688.0 B) TX bytes:1224 (1.1 KiB) + +root@ZhuoTK:/# ip ro add 8.8.8.8/32 dev usb0.1 +root@ZhuoTK:/# ping 8.8.8.8 +PING 8.8.8.8 (8.8.8.8): 56 data bytes +64 bytes from 8.8.8.8: seq=0 ttl=52 time=74.450 ms + +root@ZhuoTK:/# ip ro del 8.8.8.8/32 +root@ZhuoTK:/# ip ro del 8.8.8.8/32 +RTNETLINK answers: No such process + +root@ZhuoTK:/# ip ro add 8.8.8.8/32 dev usb0.2 +root@ZhuoTK:/# ping 8.8.8.8 +PING 8.8.8.8 (8.8.8.8): 56 data bytes +64 bytes from 8.8.8.8: seq=0 ttl=52 time=257.851 ms + +root@ZhuoTK:/# quectel-CM -k 2 +[04-13_03:39:16:986] Quectel_QConnectManager_Linux_V1.6.0.25 +[04-13_03:39:16:988] /proc/2294/cmdline: quectel-CM -n 2 -s 4gnet +[04-13_03:39:16:988] send SIGINT to process 2294 +[04-13_03:39:16:989] requestDeactivateDefaultPDP WdsConnectionIPv4Handle +[ 1262.310000] net usb0: link_state 0x3 -> 0x1 +[04-13_03:39:17:216] ifconfig usb0.2 0.0.0.0 +[04-13_03:39:17:228] ifconfig usb0.2 down +[04-13_03:39:17:370] GobiNetThread exit +[04-13_03:39:17:371] qmi_main exit + +[2]+ Done quectel-CM -n 2 -s 4gnet + +root@ZhuoTK:/# ifconfig usb0.2 +usb0.2 Link encap:Ethernet HWaddr 02:50:F4:00:00:00 + NOARP MTU:1500 Metric:1 + RX packets:30 errors:0 dropped:0 overruns:0 frame:0 + TX packets:35 errors:0 dropped:0 overruns:0 carrier:0 + collisions:0 txqueuelen:1000 + RX bytes:2816 (2.7 KiB) TX bytes:3408 (3.3 KiB) diff --git a/package/wwan/driver/quectel_cm_5G/src/log/gobinet_qmap=4_bridge.txt b/package/wwan/driver/quectel_cm_5G/src/log/gobinet_qmap=4_bridge.txt new file mode 100644 index 000000000..16a10b991 --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/log/gobinet_qmap=4_bridge.txt @@ -0,0 +1,114 @@ +root@ZhuoTK:/# insmod GobiNet.ko qmap_mode=4 +[ 42.120000] GobiNet: Quectel_Linux&Android_GobiNet_Driver_V1.6.2.13 +[ 42.130000] GobiNet 1-1.3:1.4 usb0: register 'GobiNet' at usb-101c0000.ehci-1.3, GobiNet Ethernet Device, 02:50:f4:00:00:00 +[ 42.140000] creating qcqmi0 +[ 42.150000] GobiNet::qmap_register_device usb0.1 +[ 42.150000] GobiNet::qmap_register_device usb0.2 +[ 42.160000] GobiNet::qmap_register_device usb0.3 +[ 42.160000] GobiNet::qmap_register_device usb0.4 +[ 42.170000] usbcore: registered new interface driver GobiNet +[ 43.270000] GobiNet::QMIWDASetDataFormat qmap settings qmap_version=5, rx_size=4096, tx_size=4096 +[ 43.280000] GobiNet::QMIWDASetDataFormat qmap settings ul_data_aggregation_max_size=4096, ul_data_aggregation_max_datagrams=16 + +root@ZhuoTK:/# brctl addbr br0 +root@ZhuoTK:/# brctl addif br0 eth0.1 +root@ZhuoTK:/# brctl addif br0 usb0.1 +root@ZhuoTK:/# brctl show +bridge name bridge id STP enabled interfaces +br0 8000.00ca019197b9 no eth0.1 + usb0.1 + +root@ZhuoTK:/# quectel-CM -n 1 -s cmnet -b & +[04-13_05:12:42:155] Quectel_QConnectManager_Linux_V1.6.0.25 +[04-13_05:12:42:158] Find /sys/bus/usb/devices/1-1.3 idVendor=0x2c7c idProduct=0x125, bus=0x001, dev=0x003 +[ 86.130000] net usb0.1: bridge_mode change to 0x1 +[04-13_05:12:42:159] Auto find qmichannel = /dev/qcqmi0 +[04-13_05:12:42:160] Auto find usbnet_adapter = usb0 +[04-13_05:12:42:160] netcard driver = GobiNet, driver version = V1.6.2.13 +[04-13_05:12:42:160] qmap_mode = 4, qmap_version = 5, qmap_size = 4096, muxid = 0x81, qmap_netcard = usb0.1 +[04-13_05:12:42:166] Modem works in QMI mode +[04-13_05:12:42:181] Get clientWDS = 7 +[04-13_05:12:42:213] Get clientDMS = 8 +[04-13_05:12:42:246] Get clientNAS = 9 +[04-13_05:12:42:278] Get clientUIM = 10 +[04-13_05:12:42:310] requestBaseBandVersion EC25EFAR06A11M4G +[04-13_05:12:42:438] requestGetSIMStatus SIMStatus: SIM_READY +[04-13_05:12:42:439] requestSetProfile[1] cmnet///0 +[04-13_05:12:42:502] requestGetProfile[1] cmnet///0 +[04-13_05:12:42:534] requestRegistrationState2 MCC: 460, MNC: 0, PS: Attached, DataCap: LTE +[04-13_05:12:42:565] requestQueryDataCall IPv4ConnectionStatus: DISCONNECTED +[04-13_05:12:42:566] ifconfig usb0 down +[04-13_05:12:42:576] ifconfig usb0.1 0.0.0.0 +[04-13_05:12:42:587] ifconfig usb0.1 down +[04-13_05:12:42:629] requestSetupDataCall WdsConnectionIPv4Handle: 0x8724d740 +[ 86.730000] net usb0: link_state 0x0 -> 0x1 +[04-13_05:12:42:762] ifconfig usb0 up +[04-13_05:12:42:782] ifconfig usb0.1 up +[04-13_05:12:42:794] echo '0xa16b769' > /sys/class/net/usb0.1/bridge_ipv4 + +root@ZhuoTK:/# ifconfig br0 up +[ 98.270000] usb0.1 PC Mac Address: 00:0e:c6:a6:6c:f1 +[ 98.360000] usb0.1 sip = 0.0.0.0, tip=10.22.183.105, ipv4=10.22.183.105 +[ 98.370000] usb0.1 sip = 10.22.183.105, tip=10.22.183.106, ipv4=10.22.183.105 +[ 99.360000] usb0.1 sip = 0.0.0.0, tip=10.22.183.105, ipv4=10.22.183.105 +[ 100.360000] usb0.1 sip = 0.0.0.0, tip=10.22.183.105, ipv4=10.22.183.105 +[ 100.500000] usb0.1 sip = 10.22.183.105, tip=10.22.183.106, ipv4=10.22.183.105 + +root@ZhuoTK:/# quectel-CM -n 2 -s 4gnet & +[04-13_05:13:05:715] Quectel_QConnectManager_Linux_V1.6.0.25 +[04-13_05:13:05:717] Find /sys/bus/usb/devices/1-1.3 idVendor=0x2c7c idProduct=0x125, bus=0x001, dev=0x003 +[04-13_05:13:05:719] Auto find qmichannel = /dev/qcqmi0 +[04-13_05:13:05:719] Auto find usbnet_adapter = usb0 +[04-13_05:13:05:719] netcard driver = GobiNet, driver version = V1.6.2.13 +[04-13_05:13:05:719] qmap_mode = 4, qmap_version = 5, qmap_size = 4096, muxid = 0x82, qmap_netcard = usb0.2 +[04-13_05:13:05:720] Modem works in QMI mode +[04-13_05:13:05:734] Get clientWDS = 7 +[04-13_05:13:05:766] Get clientDMS = 8 +[04-13_05:13:05:798] Get clientNAS = 9 +[04-13_05:13:05:830] Get clientUIM = 10 +[04-13_05:13:05:861] requestBaseBandVersion EC25EFAR06A11M4G +[04-13_05:13:05:990] requestGetSIMStatus SIMStatus: SIM_READY +[04-13_05:13:05:991] requestSetProfile[2] 4gnet///0 +[04-13_05:13:06:054] requestGetProfile[2] 4gnet///0 +[04-13_05:13:06:086] requestRegistrationState2 MCC: 460, MNC: 0, PS: Attached, DataCap: LTE +[04-13_05:13:06:118] requestQueryDataCall IPv4ConnectionStatus: DISCONNECTED +[04-13_05:13:06:119] ifconfig usb0.2 0.0.0.0 +[04-13_05:13:06:131] ifconfig usb0.2 down +[04-13_05:13:06:375] requestSetupDataCall WdsConnectionIPv4Handle: 0x872b8c50 +[ 110.470000] net usb0: link_state 0x1 -> 0x3 +[04-13_05:13:06:507] ifconfig usb0 up +[04-13_05:13:06:518] ifconfig usb0.2 up +[04-13_05:13:06:539] you are use OpenWrt? +[04-13_05:13:06:540] should not calling udhcpc manually? +[04-13_05:13:06:540] should modify /etc/config/network as below? +[04-13_05:13:06:540] config interface wan +[04-13_05:13:06:540] option ifname usb0.2 +[04-13_05:13:06:540] option proto dhcp +[04-13_05:13:06:540] should use "/sbin/ifstaus wan" to check usb0.2 's status? +[04-13_05:13:06:540] busybox udhcpc -f -n -q -t 5 -i usb0.2 +[04-13_05:13:06:554] udhcpc (v1.23.2) started +[04-13_05:13:06:614] Sending discover... +[04-13_05:13:06:619] Sending select for 10.22.58.141... +[04-13_05:13:06:623] Lease of 10.22.58.141 obtained, lease time 7200 +[04-13_05:13:06:629] udhcpc: ifconfig usb0.2 10.22.58.141 netmask 255.255.255.252 broadcast + +[04-13_05:13:06:638] udhcpc: setting default routers: 10.22.58.142 + +root@ZhuoTK:/# ifconfig usb0.2 +usb0.2 Link encap:Ethernet HWaddr 02:50:F4:00:00:00 + inet addr:10.22.58.141 Mask:255.255.255.252 + inet6 addr: fe80::50:f4ff:fe00:0/64 Scope:Link + UP RUNNING NOARP MTU:1500 Metric:1 + RX packets:2 errors:0 dropped:0 overruns:0 frame:0 + TX packets:7 errors:0 dropped:0 overruns:0 carrier:0 + collisions:0 txqueuelen:1000 + RX bytes:612 (612.0 B) TX bytes:1064 (1.0 KiB) + +root@ZhuoTK:/# ip ro show +default via 10.22.58.142 dev usb0.2 +10.22.58.140/30 dev usb0.2 proto kernel scope link src 10.22.58.141 +192.168.1.0/24 dev br-lan proto kernel scope link src 192.168.1.251 + +root@ZhuoTK:/# ping 8.8.8.8 +PING 8.8.8.8 (8.8.8.8): 56 data bytes +64 bytes from 8.8.8.8: seq=0 ttl=52 time=69.822 ms + diff --git a/package/wwan/driver/quectel_cm_5G/src/log/pcie_mhi_mbim.txt b/package/wwan/driver/quectel_cm_5G/src/log/pcie_mhi_mbim.txt new file mode 100644 index 000000000..be400ea8e --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/log/pcie_mhi_mbim.txt @@ -0,0 +1,80 @@ +root@OpenWrt:/# lspci +00:00.0 Class 0604: 17cb:1001 +01:00.0 Class ff00: 17cb:0306 + +root@OpenWrt:/# insmod pcie_mhi.ko mhi_mbim_enabled=1 +[ 63.094154] mhi_init Quectel_Linux_PCIE_MHI_Driver_V1.3.0.17 +[ 63.094739] mhi_pci_probe pci_dev->name = 0000:01:00.0, domain=0, bus=1, slot=0, vendor=17CB, device=0306 +[ 63.099373] mhi_q 0000:01:00.0: BAR 0: assigned [mem 0x48000000-0x48000fff 64bit] +[ 63.108476] mhi_q 0000:01:00.0: enabling device (0140 -> 0142) +[ 63.293451] [I][mhi_netdev_enable_iface] Prepare the channels for transfer +[ 63.324757] [I][mhi_netdev_enable_iface] Exited. +[ 63.326265] rmnet_vnd_register_device(rmnet_mhi0.1)=0 + +root@OpenWrt:/# quectel-CM -s cment & +[04-13_09:25:23:910] Quectel_QConnectManager_Linux_V1.6.0.25 +[04-13_09:25:23:912] network interface '' or qmidev '' is not exist +[04-13_09:25:23:912] netcard driver = pcie_mhi, driver version = V1.3.0.17 +[04-13_09:25:23:913] mbim_qmap_mode = 1, vlan_id = 0x00, qmap_netcard = rmnet_mhi0.1 +[04-13_09:25:23:913] Modem works in MBIM mode +[04-13_09:25:23:965] cdc_wdm_fd = 7 +[04-13_09:25:23:965] mbim_open_device() +[04-13_09:25:24:549] mbim_device_caps_query() +[04-13_09:25:24:575] DeviceId: 869710030002905 +[04-13_09:25:24:575] FirmwareInfo: RM500QGLABR10A03M4G_01.001.03 +[04-13_09:25:24:575] HardwareInfo: RM500QGL_VH +[04-13_09:25:24:576] mbim_device_services_query() +[04-13_09:25:24:585] mbim_set_radio_state( 1 ) +[04-13_09:25:24:588] HwRadioState: 1, SwRadioState: 1 +[04-13_09:25:24:588] mbim_subscriber_status_query() +[04-13_09:25:24:612] SubscriberId: 460028563800461 +[04-13_09:25:24:612] SimIccId: 89860015120716380461 +[04-13_09:25:24:613] SubscriberReadyState NotInitialized -> Initialized +[04-13_09:25:24:613] mbim_register_state_query() +[04-13_09:25:24:617] RegisterState Unknown -> Home +[04-13_09:25:24:617] mbim_packet_service_query() +[04-13_09:25:24:619] PacketServiceState Unknown -> Attached +[04-13_09:25:24:619] CurrentDataClass = 5G_NSA +[04-13_09:25:24:620] mbim_query_connect(sessionID=0) +[04-13_09:25:24:631] ActivationState Unknown -> Deactivated +[04-13_09:25:24:631] ifconfig rmnet_mhi0 down +[04-13_09:25:24:657] ifconfig rmnet_mhi0.1 0.0.0.0 +ifconfig: SIOCSIFFLAGS: Network is down +[04-13_09:25:24:681] ifconfig rmnet_mhi0.1 down +[04-13_09:25:24:705] mbim_register_state_query() +[04-13_09:25:24:709] mbim_packet_service_query() +[04-13_09:25:24:713] CurrentDataClass = 5G_NSA +[04-13_09:25:24:713] mbim_set_connect(onoff=1, sessionID=0) +[04-13_09:25:25:096] ActivationState Deactivated -> Activated +[04-13_09:25:25:097] mbim_ip_config(sessionID=0) +[04-13_09:25:25:100] < SessionId = 0 +[04-13_09:25:25:100] < IPv4ConfigurationAvailable = 0xf +[04-13_09:25:25:100] < IPv6ConfigurationAvailable = 0x0 +[04-13_09:25:25:101] < IPv4AddressCount = 0x1 +[04-13_09:25:25:101] < IPv4AddressOffset = 0x3c +[04-13_09:25:25:101] < IPv6AddressCount = 0x0 +[04-13_09:25:25:102] < IPv6AddressOffset = 0x0 +[04-13_09:25:25:102] < IPv4 = 10.190.166.229/30 +[04-13_09:25:25:103] < gw = 10.190.166.230 +[04-13_09:25:25:103] < dns1 = 211.138.180.2 +[04-13_09:25:25:103] < dns2 = 211.138.180.3 +[04-13_09:25:25:104] < ipv4 mtu = 1500 +[04-13_09:25:25:112] ifconfig rmnet_mhi0 up +[04-13_09:25:25:141] ifconfig rmnet_mhi0.1 up +[04-13_09:25:25:170] ip -4 address flush dev rmnet_mhi0.1 +[04-13_09:25:25:190] ip -4 address add 10.190.166.229/30 dev rmnet_mhi0.1 +[04-13_09:25:25:213] ip -4 route add default via 10.190.166.230 dev rmnet_mhi0.1 + +root@OpenWrt:/# ifconfig rmnet_mhi0.1 +rmnet_mhi0.1 Link encap:Ethernet HWaddr 02:50:F4:00:00:00 + inet addr:10.190.166.229 Mask:255.255.255.252 + inet6 addr: fe80::50:f4ff:fe00:0/64 Scope:Link + UP RUNNING NOARP MTU:1500 Metric:1 + RX packets:19 errors:0 dropped:0 overruns:0 frame:0 + TX packets:29 errors:0 dropped:0 overruns:0 carrier:0 + collisions:0 txqueuelen:1000 + RX bytes:2326 (2.2 KiB) TX bytes:2991 (2.9 KiB) + +root@OpenWrt:/# ping 8.8.8.8 +PING 8.8.8.8 (8.8.8.8): 56 data bytes +64 bytes from 8.8.8.8: seq=0 ttl=52 time=278.561 ms diff --git a/package/wwan/driver/quectel_cm_5G/src/log/pcie_mhi_mbim_qmap=4.txt b/package/wwan/driver/quectel_cm_5G/src/log/pcie_mhi_mbim_qmap=4.txt new file mode 100644 index 000000000..360bdf66e --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/log/pcie_mhi_mbim_qmap=4.txt @@ -0,0 +1,170 @@ +root@OpenWrt:/# lspci +00:00.0 Class 0604: 17cb:1001 +01:00.0 Class ff00: 17cb:0304 +root@OpenWrt:/# insmod pcie_mhi.ko mhi_mbim_enabled=1 qmap_mode=4 +[ 76.596827] mhi_init Quectel_Linux_PCIE_MHI_Driver_V1.3.0.17 +[ 76.598596] mhi_pci_probe pci_dev->name = 0000:01:00.0, domain=0, bus=1, slot=0, vendor=17CB, device=0304 +[ 76.602863] mhi_q 0000:01:00.0: BAR 0: assigned [mem 0x48000000-0x48000fff 64bit] +[ 76.611323] mhi_q 0000:01:00.0: enabling device (0140 -> 0142) +[ 76.760239] [I][mhi_netdev_enable_iface] Prepare the channels for transfer +[ 76.828699] [I][mhi_netdev_enable_iface] Exited. +[ 76.832727] rmnet_vnd_register_device(rmnet_mhi0.1)=0 +[ 76.836596] rmnet_vnd_register_device(rmnet_mhi0.2)=0 +[ 76.841170] rmnet_vnd_register_device(rmnet_mhi0.3)=0 +[ 76.846373] rmnet_vnd_register_device(rmnet_mhi0.4)=0 + +root@OpenWrt:~# quectel-mbim-proxy -d /dev/mhi_MBIM & +root@OpenWrt:~# [04-14_03:05:36:296] mbim_dev_fd=3 +[04-14_03:05:36:297] mbim_send_open_msg() +[04-14_03:05:36:669] receive MBIM_OPEN_DONE, status=0 +[04-14_03:05:36:670] mbim_server_fd=4 + +root@OpenWrt:~# quectel-CM -n 1 -s cmnet & +[04-14_03:05:45:955] Quectel_QConnectManager_Linux_V1.6.0.25 +[04-14_03:05:45:956] network interface '' or qmidev '' is not exist +[04-14_03:05:45:957] netcard driver = pcie_mhi, driver version = V1.3.0.17 +[04-14_03:05:45:957] mbim_qmap_mode = 4, vlan_id = 0x01, qmap_netcard = rmnet_mhi0.1 +[04-14_03:05:45:958] Modem works in MBIM mode +[04-14_03:05:45:959] connect to quectel-mbim-proxy sockfd = 7 +[04-14_03:05:45:959] handle_client_connect client_fd=5, client_idx=1 +[04-14_03:05:45:959] cdc_wdm_fd = 7 +[04-14_03:05:45:960] mbim_open_device() +[04-14_03:05:45:961] mbim_device_caps_query() +[04-14_03:05:45:967] DeviceId: 860459050041596 +[04-14_03:05:45:968] FirmwareInfo: EM120RGLAPR02A03M4G_01.001.07 + +[04-14_03:05:45:968] HardwareInfo: EM120R_GL +[04-14_03:05:45:968] mbim_device_services_query() +[04-14_03:05:45:972] mbim_set_radio_state( 1 ) +[04-14_03:05:45:976] HwRadioState: 1, SwRadioState: 1 +[04-14_03:05:45:976] mbim_subscriber_status_query() +[04-14_03:05:45:985] SubscriberId: 460028563800461 +[04-14_03:05:45:985] SimIccId: 89860015120716380461 +[04-14_03:05:45:986] SubscriberReadyState NotInitialized -> Initialized +[04-14_03:05:45:986] mbim_register_state_query() +[04-14_03:05:45:991] RegisterState Unknown -> Home +[04-14_03:05:45:991] mbim_packet_service_query() +[04-14_03:05:45:995] PacketServiceState Unknown -> Attached +[04-14_03:05:45:996] mbim_query_connect(sessionID=1) +[04-14_03:05:46:000] ActivationState Unknown -> Deactivated +[04-14_03:05:46:000] ifconfig rmnet_mhi0 down +[04-14_03:05:46:024] ifconfig rmnet_mhi0.1 0.0.0.0 +ifconfig: SIOCSIFFLAGS: Network is down +[04-14_03:05:46:049] ifconfig rmnet_mhi0.1 down +[04-14_03:05:46:072] mbim_set_connect(onoff=1, sessionID=1) +[04-14_03:05:46:099] ActivationState Deactivated -> Activated +[04-14_03:05:46:099] mbim_ip_config(sessionID=1) +[ 222.484298] net rmnet_mhi0: link_state 0x0 -> 0x1 +[04-14_03:05:46:103] < SessionId = 1 +[04-14_03:05:46:104] < IPv4ConfigurationAvailable = 0xf +[04-14_03:05:46:104] < IPv6ConfigurationAvailable = 0x0 +[04-14_03:05:46:104] < IPv4AddressCount = 0x1 +[04-14_03:05:46:105] < IPv4AddressOffset = 0x3c +[ 222.507775] [I][mhi_netdev_open] Opened net dev interface +[04-14_03:05:46:105] < IPv6AddressCount = 0x0 +[04-14_03:05:46:105] < IPv6AddressOffset = 0x0 +[04-14_03:05:46:106] < IPv4 = 10.38.21.158/30 +[04-14_03:05:46:106] < gw = 10.38.21.157 +[04-14_03:05:46:106] < dns1 = 211.138.180.2 +[04-14_03:05:46:107] < dns2 = 211.138.180.3 +[04-14_03:05:46:107] < ipv4 mtu = 1500 +[04-14_03:05:46:112] ifconfig rmnet_mhi0 up +[04-14_03:05:46:140] ifconfig rmnet_mhi0.1 up +[04-14_03:05:46:168] ip -4 address flush dev rmnet_mhi0.1 +[04-14_03:05:46:190] ip -4 address add 10.38.21.158/30 dev rmnet_mhi0.1 +[04-14_03:05:46:212] ip -4 route add default via 10.38.21.157 dev rmnet_mhi0.1 +[04-14_03:05:50:730] handle_client_connect client_fd=6, client_idx=2 +[ 227.558631] net rmnet_mhi0: link_state 0x1 -> 0x3 + +root@OpenWrt:~# quectel-CM -n 2 -s 4gnet +[04-14_03:05:50:725] Quectel_QConnectManager_Linux_V1.6.0.25 +[04-14_03:05:50:726] network interface '' or qmidev '' is not exist +[04-14_03:05:50:727] netcard driver = pcie_mhi, driver version = V1.3.0.17 +[04-14_03:05:50:728] mbim_qmap_mode = 4, vlan_id = 0x02, qmap_netcard = rmnet_mhi0.2 +[04-14_03:05:50:729] Modem works in MBIM mode +[04-14_03:05:50:730] connect to quectel-mbim-proxy sockfd = 8 +[04-14_03:05:50:730] cdc_wdm_fd = 8 +[04-14_03:05:50:731] mbim_open_device() +[04-14_03:05:50:732] mbim_device_caps_query() +[04-14_03:05:50:738] DeviceId: 860459050041596 +[04-14_03:05:50:739] FirmwareInfo: EM120RGLAPR02A03M4G_01.001.07 + +[04-14_03:05:50:739] HardwareInfo: EM120R_GL +[04-14_03:05:50:740] mbim_device_services_query() +[04-14_03:05:50:744] mbim_set_radio_state( 1 ) +[04-14_03:05:50:747] HwRadioState: 1, SwRadioState: 1 +[04-14_03:05:50:747] mbim_subscriber_status_query() +[04-14_03:05:50:757] SubscriberId: 460028563800461 +[04-14_03:05:50:758] SimIccId: 89860015120716380461 +[04-14_03:05:50:758] SubscriberReadyState NotInitialized -> Initialized +[04-14_03:05:50:759] mbim_register_state_query() +[04-14_03:05:50:763] RegisterState Unknown -> Home +[04-14_03:05:50:764] mbim_packet_service_query() +[04-14_03:05:50:768] PacketServiceState Unknown -> Attached +[04-14_03:05:50:769] mbim_query_connect(sessionID=2) +[04-14_03:05:50:772] ActivationState Unknown -> Deactivated +[04-14_03:05:50:773] ifconfig rmnet_mhi0.2 0.0.0.0 +[04-14_03:05:50:799] ifconfig rmnet_mhi0.2 down +[04-14_03:05:50:834] mbim_set_connect(onoff=1, sessionID=2) +[04-14_03:05:51:170] ActivationState Deactivated -> Activated +[04-14_03:05:51:171] mbim_ip_config(sessionID=2) +[04-14_03:05:51:174] < SessionId = 2 +[04-14_03:05:51:174] < IPv4ConfigurationAvailable = 0xf +[04-14_03:05:51:175] < IPv6ConfigurationAvailable = 0x0 +[04-14_03:05:51:175] < IPv4AddressCount = 0x1 +[04-14_03:05:51:175] < IPv4AddressOffset = 0x3c +[04-14_03:05:51:176] < IPv6AddressCount = 0x0 +[04-14_03:05:51:176] < IPv6AddressOffset = 0x0 +[04-14_03:05:51:176] < IPv4 = 10.36.109.217/30 +[04-14_03:05:51:177] < gw = 10.36.109.218 +[04-14_03:05:51:177] < dns1 = 211.138.180.2 +[04-14_03:05:51:178] < dns2 = 211.138.180.3 +[04-14_03:05:51:178] < ipv4 mtu = 1500 +[04-14_03:05:51:182] ifconfig rmnet_mhi0 up +[04-14_03:05:51:206] ifconfig rmnet_mhi0.2 up +[04-14_03:05:51:233] ip -4 address flush dev rmnet_mhi0.2 +[04-14_03:05:51:254] ip -4 address add 10.36.109.217/30 dev rmnet_mhi0.2 +[04-14_03:05:51:277] ip -4 route add default via 10.36.109.218 dev rmnet_mhi0.2 + +root@OpenWrt:~# ifconfig rmnet_mhi0.1 +rmnet_mhi0.1 Link encap:Ethernet HWaddr 02:50:F4:00:00:00 + inet addr:10.38.21.158 Mask:255.255.255.252 + inet6 addr: fe80::50:f4ff:fe00:0/64 Scope:Link + UP RUNNING NOARP MTU:1500 Metric:1 + RX packets:37 errors:0 dropped:0 overruns:0 frame:0 + TX packets:29 errors:0 dropped:0 overruns:0 carrier:0 + collisions:0 txqueuelen:1000 + RX bytes:9907 (9.6 KiB) TX bytes:2764 (2.6 KiB) + +root@OpenWrt:~# ifconfig rmnet_mhi0.2 +rmnet_mhi0.2 Link encap:Ethernet HWaddr 02:50:F4:00:00:00 + inet addr:10.36.109.217 Mask:255.255.255.252 + inet6 addr: fe80::50:f4ff:fe00:0/64 Scope:Link + UP RUNNING NOARP MTU:1500 Metric:1 + RX packets:5 errors:0 dropped:0 overruns:0 frame:0 + TX packets:18 errors:0 dropped:0 overruns:0 carrier:0 + collisions:0 txqueuelen:1000 + RX bytes:344 (344.0 B) TX bytes:1152 (1.1 KiB) + +root@OpenWrt:~# ip ro del 8.8.8.8/32 +RTNETLINK answers: No such process +root@OpenWrt:~# ip ro add 8.8.8.8/32 dev rmnet_mhi0.1 +root@OpenWrt:~# ping 8.8.8.8 +PING 8.8.8.8 (8.8.8.8): 56 data bytes +64 bytes from 8.8.8.8: seq=0 ttl=52 time=73.248 ms + +root@OpenWrt:~# ip ro del 8.8.8.8/32 +root@OpenWrt:~# ip ro del 8.8.8.8/32 +RTNETLINK answers: No such process +root@OpenWrt:~# ip ro add 8.8.8.8/32 dev rmnet_mhi0.2 +root@OpenWrt:~# ping 8.8.8.8 +PING 8.8.8.8 (8.8.8.8): 56 data bytes +64 bytes from 8.8.8.8: seq=0 ttl=52 time=99.637 ms + +root@OpenWrt:~# quectel-CM -k 2 +[04-14_03:06:58:912] Quectel_QConnectManager_Linux_V1.6.0.25 +[04-14_03:06:59:063] /proc/3565/cmdline: quectel-CM -n 2 -s 4gnet +[04-14_03:06:59:064] send SIGINT to process 3565 +[ 295.719442] net rmnet_mhi0: link_state 0x3 -> 0x1 +[04-14_03:06:59:407] proxy_loop poll fd = 6, revents = 0011 +[04-14_03:06:59:408] handle_client_disconnect client_fd=6, client_idx=2 diff --git a/package/wwan/driver/quectel_cm_5G/src/log/pcie_mhi_qmap=1.txt b/package/wwan/driver/quectel_cm_5G/src/log/pcie_mhi_qmap=1.txt new file mode 100644 index 000000000..dba3ba3df --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/log/pcie_mhi_qmap=1.txt @@ -0,0 +1,127 @@ +root@OpenWrt:/# cat /sys/class/net/rmnet_mhi0/qmap_mode +1 +root@OpenWrt:/# cat /sys/module/pcie_mhi/parameters/mhi_mbim_enabled +0 +root@OpenWrt:/# dmesg | grep mhi +[ 18.442226] mhi_init Quectel_Linux_PCIE_MHI_Driver_V1.3.0.17 +[ 18.443032] mhi_pci_probe pci_dev->name = 0000:01:00.0, domain=0, bus=1, slot=0, vendor=17CB, device=0306 +[ 18.447488] mhi_q 0000:01:00.0: BAR 0: assigned [mem 0x48000000-0x48000fff 64bit] +[ 18.456563] mhi_q 0000:01:00.0: enabling device (0140 -> 0142) +[ 18.464184] [I][mhi_init_pci_dev] msi_required = 5, msi_allocated = 5, msi_irq = 197 +[ 18.464215] [I][mhi_power_up] dev_state:RESET +[ 18.464225] [I][mhi_async_power_up] Requested to power on +[ 18.464432] [I][mhi_alloc_coherent] size = 114688, dma_handle = 8d400000 +[ 18.464445] [I][mhi_init_dev_ctxt] mhi_ctxt->ctrl_seg = d1766000 +[ 18.466003] [I][mhi_async_power_up] dev_state:RESET ee:AMSS +[ 18.466080] [I][mhi_pm_st_worker] Transition to state:READY +[ 18.466109] [I][mhi_pm_st_worker] INVALID_EE -> AMSS +[ 18.466135] [I][mhi_ready_state_transition] Waiting to enter READY state +[ 18.466224] [I][mhi_async_power_up] Power on setup success +[ 18.466265] [I][mhi_pci_probe] Return successful +[ 18.577299] [I][mhi_intvec_threaded_handlr] device ee:AMSS dev_state:READY, pm_state:POR +[ 18.577312] [I][mhi_ready_state_transition] Device in READY State +[ 18.577325] [I][mhi_intvec_threaded_handlr] device ee:AMSS dev_state:READY, INVALID_EE +[ 18.577329] [I][mhi_tryset_pm_state] Transition to pm state from:POR to:POR +[ 18.577337] [I][mhi_init_mmio] Initializing MMIO +[ 18.577344] [I][mhi_init_mmio] CHDBOFF:0x300 +[ 18.577361] [I][mhi_init_mmio] ERDBOFF:0x700 +[ 18.577372] [I][mhi_init_mmio] Programming all MMIO values. +[ 18.690834] [I][mhi_dump_tre] carl_ev evt_state_change mhistate=2 +[ 18.690854] [I][mhi_process_ctrl_ev_ring] MHI state change event to state:M0 +[ 18.690866] [I][mhi_pm_m0_transition] Entered With State:READY PM_STATE:POR +[ 18.690879] [I][mhi_tryset_pm_state] Transition to pm state from:POR to:M0 +[ 18.694229] [I][mhi_dump_tre] carl_ev evt_ee_state execenv=2 +[ 18.694241] [I][mhi_process_ctrl_ev_ring] MHI EE received event:AMSS +[ 18.694293] [I][mhi_pm_st_worker] Transition to state:MISSION MODE +[ 18.694310] [I][mhi_pm_st_worker] INVALID_EE -> AMSS +[ 18.694319] [I][mhi_pm_mission_mode_transition] Processing Mission Mode Transition +[ 18.694341] [I][mhi_init_timesync] No timesync capability found +[ 18.694350] [I][mhi_pm_mission_mode_transition] Adding new devices +[ 18.696365] [I][mhi_dtr_probe] Enter for DTR control channel +[ 18.696383] [I][__mhi_prepare_channel] Entered: preparing channel:18 +[ 18.703113] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1, type=33 +[ 18.703164] [I][__mhi_prepare_channel] Chan:18 successfully moved to start state +[ 18.703174] [I][__mhi_prepare_channel] Entered: preparing channel:19 +[ 18.710681] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1, type=33 +[ 18.710734] [I][__mhi_prepare_channel] Chan:19 successfully moved to start state +[ 18.710804] [I][mhi_dtr_probe] Exit with ret:0 +[ 18.711774] [I][mhi_netdev_enable_iface] Prepare the channels for transfer +[ 18.711811] [I][__mhi_prepare_channel] Entered: preparing channel:100 +[ 18.732097] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1, type=33 +[ 18.732151] [I][__mhi_prepare_channel] Chan:100 successfully moved to start state +[ 18.732162] [I][__mhi_prepare_channel] Entered: preparing channel:101 +[ 18.744170] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1, type=33 +[ 18.744219] [I][__mhi_prepare_channel] Chan:101 successfully moved to start state +[ 18.749132] [I][mhi_netdev_enable_iface] Exited. +[ 18.750306] rmnet_vnd_register_device(rmnet_mhi0.1)=0 +[ 18.752927] [I][mhi_pm_mission_mode_transition] Exit with ret:0 + +root@OpenWrt:/# busybox microcom /dev/mhi_DUN +at+cpin? ++CPIN: READY + +OK +at+cops? ++COPS: 0,0,"CHINA MOBILE",13 + +OK +at+csq ++csq: 23,99 + +OK + +root@OpenWrt:/# quectel-CM -s cmnet & +[04-13_09:26:58:077] Quectel_QConnectManager_Linux_V1.6.0.25 +[04-13_09:26:58:078] network interface '' or qmidev '' is not exist +[04-13_09:26:58:079] netcard driver = pcie_mhi, driver version = V1.3.0.17 +[04-13_09:26:58:080] qmap_mode = 1, qmap_version = 9, qmap_size = 16384, muxid = 0x81, qmap_netcard = rmnet_mhi0.1 +[04-13_09:26:58:080] Modem works in QMI mode +[04-13_09:26:58:131] cdc_wdm_fd = 7 +[04-13_09:26:59:132] QmiWwanInit message timeout +[04-13_09:27:00:140] Get clientWDS = 15 +[04-13_09:27:00:144] Get clientDMS = 1 +[04-13_09:27:00:147] Get clientNAS = 4 +[04-13_09:27:00:151] Get clientUIM = 1 +[04-13_09:27:00:155] Get clientWDA = 1 +[04-13_09:27:00:158] requestBaseBandVersion RM500QGLABR10A03M4G +[04-13_09:27:00:161] qmap_settings.rx_urb_size = 16384 +[04-13_09:27:00:162] qmap_settings.ul_data_aggregation_max_datagrams = 11 +[04-13_09:27:00:162] qmap_settings.ul_data_aggregation_max_size = 8192 +[04-13_09:27:00:163] qmap_settings.dl_minimum_padding = 0 +[04-13_09:27:00:176] requestGetSIMStatus SIMStatus: SIM_READY +[04-13_09:27:00:177] requestSetProfile[1] cmnet///0 +[04-13_09:27:00:190] requestGetProfile[1] cmnet///0 +[04-13_09:27:00:193] requestRegistrationState2 MCC: 460, MNC: 0, PS: Attached, DataCap: 5G_NSA +[04-13_09:27:00:197] requestQueryDataCall IPv4ConnectionStatus: DISCONNECTED +[04-13_09:27:00:198] ifconfig rmnet_mhi0 down +[04-13_09:27:00:222] ifconfig rmnet_mhi0.1 0.0.0.0 +[04-13_09:27:00:247] ifconfig rmnet_mhi0.1 down +[04-13_09:27:00:281] requestSetupDataCall WdsConnectionIPv4Handle: 0x1228bb20 +[ 245.284909] net rmnet_mhi0: link_state 0x0 -> 0x1 +[04-13_09:27:00:293] ifconfig rmnet_mhi0 up +[ 245.308696] [I][mhi_netdev_open] Opened net dev interface +[04-13_09:27:00:318] ifconfig rmnet_mhi0.1 up +[04-13_09:27:00:353] you are use OpenWrt? +[04-13_09:27:00:354] should not calling udhcpc manually? +[04-13_09:27:00:354] should modify /etc/config/network as below? +[04-13_09:27:00:355] config interface wan +[04-13_09:27:00:355] option ifname rmnet_mhi0.1 +[04-13_09:27:00:355] option proto dhcp +[04-13_09:27:00:356] should use "/sbin/ifstaus wan" to check rmnet_mhi0.1 's status? +[04-13_09:27:00:356] busybox udhcpc -f -n -q -t 5 -i rmnet_mhi0.1 +udhcpc: started, v1.28.3 +udhcpc: sending discover +udhcpc: sending select for 10.128.73.23 +udhcpc: lease of 10.128.73.23 obtained, lease time 7200 +[04-13_09:27:00:710] udhcpc: ifconfig rmnet_mhi0.1 10.128.73.23 netmask 255.255.255.240 broadcast + +[04-13_09:27:00:742] udhcpc: setting default routers: 10.128.73.24 + +root@OpenWrt:/# ifconfig rmnet_mhi0.1 +rmnet_mhi0.1 Link encap:Ethernet HWaddr 02:50:F4:00:00:00 + inet addr:10.128.73.23 Mask:255.255.255.240 + inet6 addr: fe80::50:f4ff:fe00:0/64 Scope:Link + UP RUNNING NOARP MTU:1500 Metric:1 + RX packets:2 errors:0 dropped:0 overruns:0 frame:0 + TX packets:2 errors:0 dropped:0 overruns:0 carrier:0 + collisions:0 txqueuelen:1000 + RX bytes:612 (612.0 B) TX bytes:684 (684.0 B) diff --git a/package/wwan/driver/quectel_cm_5G/src/log/pcie_mhi_qmap=1_bridge.txt b/package/wwan/driver/quectel_cm_5G/src/log/pcie_mhi_qmap=1_bridge.txt new file mode 100644 index 000000000..37db6c45e --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/log/pcie_mhi_qmap=1_bridge.txt @@ -0,0 +1,76 @@ +root@OpenWrt:/# lspci +00:00.0 Class 0604: 17cb:1001 +01:00.0 Class ff00: 17cb:0306 + +root@OpenWrt:~# insmod pcie_mhi.ko +[ 77.949271] mhi_init Quectel_Linux_PCIE_MHI_Driver_V1.3.0.17 +[ 77.950949] mhi_pci_probe pci_dev->name = 0000:01:00.0, domain=0, bus=1, slot=0, vendor=17CB, device=0306 +[ 77.955331] mhi_q 0000:01:00.0: BAR 0: assigned [mem 0x48000000-0x48000fff 64bit] +[ 77.963756] mhi_q 0000:01:00.0: enabling device (0140 -> 0142) +[ 78.048911] [I][mhi_netdev_enable_iface] Prepare the channels for transfer +[ 78.092304] [I][mhi_netdev_enable_iface] Exited. +[ 78.096580] rmnet_vnd_register_device(rmnet_mhi0.1)=0 + +root@OpenWrt:~# brctl addbr br0 +root@OpenWrt:~# brctl addif br0 rmnet_mhi0.1 +root@OpenWrt:~# brctl addif br0 eth1 +[ 250.017213] device eth1 entered promiscuous mode +root@OpenWrt:~# brctl show +bridge name bridge id STP enabled interfaces +br0 8000.00001c353487 no eth1 + rmnet_mhi0.1 + +root@OpenWrt:~# quectel-CM -s cmnet -b & +[04-14_06:43:28:473] Quectel_QConnectManager_Linux_V1.6.0.25 +[04-14_06:43:28:474] network interface '' or qmidev '' is not exist +[04-14_06:43:28:475] netcard driver = pcie_mhi, driver version = V1.3.0.17 +[04-14_06:43:28:476] qmap_mode = 1, qmap_version = 9, qmap_size = 16384, muxid = 0x81, qmap_netcard = rmnet_mhi0.1 +[04-14_06:43:28:477] Modem works in QMI mode +[04-14_06:43:28:531] cdc_wdm_fd = 7 +[04-14_06:43:29:532] QmiWwanInit message timeout +[04-14_06:43:30:540] Get clientWDS = 15 +[04-14_06:43:30:543] Get clientDMS = 1 +[04-14_06:43:30:546] Get clientNAS = 4 +[04-14_06:43:30:550] Get clientUIM = 1 +[04-14_06:43:30:553] Get clientWDA = 1 +[04-14_06:43:30:557] requestBaseBandVersion RM500QGLABR10A03M4G +[04-14_06:43:30:560] qmap_settings.rx_urb_size = 16384 +[04-14_06:43:30:561] qmap_settings.ul_data_aggregation_max_datagrams = 11 +[04-14_06:43:30:561] qmap_settings.ul_data_aggregation_max_size = 8192 +[04-14_06:43:30:561] qmap_settings.dl_minimum_padding = 0 +[04-14_06:43:30:575] requestGetSIMStatus SIMStatus: SIM_READY +[04-14_06:43:30:575] requestSetProfile[1] cmnet///0 +[04-14_06:43:30:588] requestGetProfile[1] cmnet///0 +[04-14_06:43:30:591] requestRegistrationState2 MCC: 460, MNC: 0, PS: Attached, DataCap: 5G_NSA +[04-14_06:43:30:595] requestQueryDataCall IPv4ConnectionStatus: DISCONNECTED +[04-14_06:43:30:595] ifconfig rmnet_mhi0 down +[04-14_06:43:30:620] ifconfig rmnet_mhi0.1 0.0.0.0 +ifconfig: SIOCSIFFLAGS: Network is down +[04-14_06:43:30:644] ifconfig rmnet_mhi0.1 down +[04-14_06:43:30:679] requestSetupDataCall WdsConnectionIPv4Handle: 0xb41f47d0 +[ 263.869899] net rmnet_mhi0: link_state 0x0 -> 0x1 +[04-14_06:43:30:693] ifconfig rmnet_mhi0 up +[ 263.892647] [I][mhi_netdev_open] Opened net dev interface +[04-14_06:43:30:718] ifconfig rmnet_mhi0.1 up +[04-14_06:43:30:746] echo '0xa59316b' > /sys/class/net/rmnet_mhi0.1/bridge_ipv4 + +root@OpenWrt:~# ifconfig br0 up +[ 268.800026] br0: port 2(eth1) entered forwarding state +[ 268.800336] br0: port 2(eth1) entered forwarding state +[ 268.804251] br0: port 1(rmnet_mhi0.1) entered forwarding state +[ 268.809465] br0: port 1(rmnet_mhi0.1) entered forwarding state +[ 283.845790] br0: port 2(eth1) entered forwarding state + +[ 296.512489] rmnet_mhi0.1 PC Mac Address: 00:0e:c6:a6:6c:f1 +[ 296.515756] rmnet_mhi0.1 PC Mac Address: 00:0e:c6:a6:6c:f1 +[ 296.586584] rmnet_mhi0.1 sip = 10.89.49.107, tip=10.89.49.108, ipv4=10.89.49.107 +[ 296.672356] rmnet_mhi0.1 sip = 10.89.49.107, tip=10.89.49.108, ipv4=10.89.49.107 +[ 296.792061] rmnet_mhi0.1 sip = 10.89.49.107, tip=10.89.49.108, ipv4=10.89.49.107 +[ 296.832822] rmnet_mhi0.1 sip = 10.89.49.107, tip=10.89.49.108, ipv4=10.89.49.107 +[ 296.941073] rmnet_mhi0.1 sip = 0.0.0.0, tip=10.89.49.107, ipv4=10.89.49.107 +[ 297.941310] rmnet_mhi0.1 sip = 0.0.0.0, tip=10.89.49.107, ipv4=10.89.49.107 +[ 298.941528] rmnet_mhi0.1 sip = 0.0.0.0, tip=10.89.49.107, ipv4=10.89.49.107 +[ 299.941704] rmnet_mhi0.1 sip = 10.89.49.107, tip=10.89.49.107, ipv4=10.89.49.107 +[ 300.024484] rmnet_mhi0.1 sip = 10.89.49.107, tip=10.89.49.108, ipv4=10.89.49.107 +[ 300.051995] rmnet_mhi0.1 sip = 10.89.49.107, tip=10.89.49.108, ipv4=10.89.49.107 +[ 303.915933] rmnet_mhi0.1 sip = 10.89.49.107, tip=10.89.49.108, ipv4=10.89.49.107 diff --git a/package/wwan/driver/quectel_cm_5G/src/log/pcie_mhi_qmap=4.txt b/package/wwan/driver/quectel_cm_5G/src/log/pcie_mhi_qmap=4.txt new file mode 100644 index 000000000..19cd7b589 --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/log/pcie_mhi_qmap=4.txt @@ -0,0 +1,138 @@ +root@OpenWrt:/# lspci +00:00.0 Class 0604: 17cb:1001 +01:00.0 Class ff00: 17cb:0306 + +root@OpenWrt:/# insmod pcie_mhi.ko qmap_mode=4 +[ 61.988878] mhi_init Quectel_Linux_PCIE_MHI_Driver_V1.3.0.17 +[ 61.989484] mhi_pci_probe pci_dev->name = 0000:01:00.0, domain=0, bus=1, slot=0, vendor=17CB, device=0306 +[ 61.994039] mhi_q 0000:01:00.0: BAR 0: assigned [mem 0x48000000-0x48000fff 64bit] +[ 62.003208] mhi_q 0000:01:00.0: enabling device (0140 -> 0142) +[ 62.191947] [I][mhi_netdev_enable_iface] Prepare the channels for transfer +[ 62.224065] [I][mhi_netdev_enable_iface] Exited. +[ 62.225619] rmnet_vnd_register_device(rmnet_mhi0.1)=0 +[ 62.229289] rmnet_vnd_register_device(rmnet_mhi0.2)=0 +[ 62.234378] rmnet_vnd_register_device(rmnet_mhi0.3)=0 +[ 62.240039] rmnet_vnd_register_device(rmnet_mhi0.4)=0 + +root@OpenWrt:/# quectel-qmi-proxy -d /dev/mhi_QMI0 & +[04-13_09:25:12:278] Will use cdc-wdm='/dev/mhi_QMI0', proxy='quectel-qmi-proxy0' +[04-13_09:25:12:297] qmi_proxy_init enter +[04-13_09:25:12:297] qmi_proxy_loop enter thread_id 0xb6e88d44 +[04-13_09:25:14:298] qmi_proxy_init succful +[04-13_09:25:14:299] local server: quectel-qmi-proxy0 sockfd = 4 +[04-13_09:25:14:299] qmi_proxy_server_fd = 4 + +root@OpenWrt:/# quectel-CM -n 1 -s cmnet & +[04-13_09:25:32:336] Quectel_QConnectManager_Linux_V1.6.0.25 +[04-13_09:25:32:337] network interface '' or qmidev '' is not exist +[04-13_09:25:32:338] netcard driver = pcie_mhi, driver version = V1.3.0.17 +[04-13_09:25:32:339] qmap_mode = 4, qmap_version = 9, qmap_size = 16384, muxid = 0x81, qmap_netcard = rmnet_mhi0.1 +[04-13_09:25:32:340] Modem works in QMI mode +[04-13_09:25:32:341] connect to quectel-qmi-proxy0 sockfd = 7 +[04-13_09:25:32:342] cdc_wdm_fd = 7 +[04-13_09:25:32:380] requestBaseBandVersion RM500QGLABR10A03M4G +[04-13_09:25:32:382] qmap_settings.rx_urb_size = 16384 +[04-13_09:25:32:383] qmap_settings.ul_data_aggregation_max_datagrams = 11 +[04-13_09:25:32:383] qmap_settings.ul_data_aggregation_max_size = 8192 +[04-13_09:25:32:384] qmap_settings.dl_minimum_padding = 0 +[04-13_09:25:32:394] requestGetSIMStatus SIMStatus: SIM_READY +[04-13_09:25:32:395] requestSetProfile[1] cmnet///0 +[04-13_09:25:32:409] requestGetProfile[1] cmnet///0 +[04-13_09:25:32:414] requestRegistrationState2 MCC: 460, MNC: 0, PS: Attached, DataCap: 5G_NSA +[04-13_09:25:32:418] requestQueryDataCall IPv4ConnectionStatus: DISCONNECTED +[04-13_09:25:32:419] ifconfig rmnet_mhi0 down +[04-13_09:25:32:448] ifconfig rmnet_mhi0.1 0.0.0.0 +[04-13_09:25:32:473] ifconfig rmnet_mhi0.1 down +[04-13_09:25:32:514] requestSetupDataCall WdsConnectionIPv4Handle: 0x2313a2a0 +[ 121.648172] net rmnet_mhi0: link_state 0x0 -> 0x1 +[04-13_09:25:32:525] ifconfig rmnet_mhi0 up +[ 121.671210] [I][mhi_netdev_open] Opened net dev interface +[04-13_09:25:32:551] ifconfig rmnet_mhi0.1 up +[04-13_09:25:32:586] you are use OpenWrt? +[04-13_09:25:32:587] should not calling udhcpc manually? +[04-13_09:25:32:587] should modify /etc/config/network as below? +[04-13_09:25:32:587] config interface wan +[04-13_09:25:32:588] option ifname rmnet_mhi0.1 +[04-13_09:25:32:588] option proto dhcp +[04-13_09:25:32:589] should use "/sbin/ifstaus wan" to check rmnet_mhi0.1 's status? +[04-13_09:25:32:589] busybox udhcpc -f -n -q -t 5 -i rmnet_mhi0.1 +udhcpc: started, v1.28.3 +udhcpc: sending discover +udhcpc: sending select for 10.174.91.70 +udhcpc: lease of 10.174.91.70 obtained, lease time 7200 +[04-13_09:25:32:980] udhcpc: ifconfig rmnet_mhi0.1 10.174.91.70 netmask 255.255.255.252 broadcast + +[04-13_09:25:33:007] udhcpc: setting default routers: 10.174.91.69 + +root@OpenWrt:/# quectel-CM -n 2 -s 4gnet & +[04-13_09:25:42:976] Quectel_QConnectManager_Linux_V1.6.0.25 +[04-13_09:25:42:977] network interface '' or qmidev '' is not exist +[04-13_09:25:42:978] netcard driver = pcie_mhi, driver version = V1.3.0.17 +[04-13_09:25:42:978] qmap_mode = 4, qmap_version = 9, qmap_size = 16384, muxid = 0x82, qmap_netcard = rmnet_mhi0.2 +[04-13_09:25:42:979] Modem works in QMI mode +[04-13_09:25:42:981] connect to quectel-qmi-proxy0 sockfd = 7 +[04-13_09:25:42:982] cdc_wdm_fd = 7 +[04-13_09:25:43:010] requestBaseBandVersion RM500QGLABR10A03M4G +[04-13_09:25:43:013] qmap_settings.rx_urb_size = 16384 +[04-13_09:25:43:014] qmap_settings.ul_data_aggregation_max_datagrams = 11 +[04-13_09:25:43:014] qmap_settings.ul_data_aggregation_max_size = 8192 +[04-13_09:25:43:015] qmap_settings.dl_minimum_padding = 0 +[04-13_09:25:43:030] requestGetSIMStatus SIMStatus: SIM_READY +[04-13_09:25:43:030] requestSetProfile[2] 4gnet///0 +[04-13_09:25:43:046] requestGetProfile[2] 4gnet///0 +[04-13_09:25:43:050] requestRegistrationState2 MCC: 460, MNC: 0, PS: Attached, DataCap: 5G_NSA +[04-13_09:25:43:054] requestQueryDataCall IPv4ConnectionStatus: DISCONNECTED +[04-13_09:25:43:055] ifconfig rmnet_mhi0.2 0.0.0.0 +[04-13_09:25:43:082] ifconfig rmnet_mhi0.2 down +[04-13_09:25:43:507] requestSetupDataCall WdsConnectionIPv4Handle: 0x2332a780 +[ 132.641313] net rmnet_mhi0: link_state 0x1 -> 0x3 +[04-13_09:25:43:519] ifconfig rmnet_mhi0 up +[04-13_09:25:43:543] ifconfig rmnet_mhi0.2 up +[04-13_09:25:43:570] you are use OpenWrt? +[04-13_09:25:43:570] should not calling udhcpc manually? +[04-13_09:25:43:571] should modify /etc/config/network as below? +[04-13_09:25:43:571] config interface wan +[04-13_09:25:43:571] option ifname rmnet_mhi0.2 +[04-13_09:25:43:572] option proto dhcp +[04-13_09:25:43:572] should use "/sbin/ifstaus wan" to check rmnet_mhi0.2 's status? +[04-13_09:25:43:573] busybox udhcpc -f -n -q -t 5 -i rmnet_mhi0.2 +udhcpc: started, v1.28.3 +udhcpc: sending discover +udhcpc: sending select for 10.163.253.197 +udhcpc: lease of 10.163.253.197 obtained, lease time 7200 +[04-13_09:25:43:810] udhcpc: ifconfig rmnet_mhi0.2 10.163.253.197 netmask 255.255.255.252 broadcast + +[04-13_09:25:43:836] udhcpc: setting default routers: 10.163.253.198 + +root@OpenWrt:/# ifconfig rmnet_mhi0.1 +rmnet_mhi0.1 Link encap:Ethernet HWaddr 02:50:F4:00:00:00 + inet addr:10.174.91.70 Mask:255.255.255.252 + inet6 addr: fe80::50:f4ff:fe00:0/64 Scope:Link + UP RUNNING NOARP MTU:1500 Metric:1 + RX packets:2 errors:0 dropped:0 overruns:0 frame:0 + TX packets:4 errors:0 dropped:0 overruns:0 carrier:0 + collisions:0 txqueuelen:1000 + RX bytes:612 (612.0 B) TX bytes:1380 (1.3 KiB) + +root@OpenWrt:/# ifconfig rmnet_mhi0.2 +rmnet_mhi0.2 Link encap:Ethernet HWaddr 02:50:F4:00:00:00 + inet addr:10.163.253.197 Mask:255.255.255.252 + inet6 addr: fe80::50:f4ff:fe00:0/64 Scope:Link + UP RUNNING NOARP MTU:1500 Metric:1 + RX packets:2 errors:0 dropped:0 overruns:0 frame:0 + TX packets:2 errors:0 dropped:0 overruns:0 carrier:0 + collisions:0 txqueuelen:1000 + RX bytes:612 (612.0 B) TX bytes:684 (684.0 B) + +root@OpenWrt:/# ip ro del 8.8.8.8/32 +RTNETLINK answers: No such process +root@OpenWrt:/# ip ro add 8.8.8.8/32 dev rmnet_mhi0.1 +root@OpenWrt:/# ping 8.8.8.8 +PING 8.8.8.8 (8.8.8.8): 56 data bytes +64 bytes from 8.8.8.8: seq=0 ttl=52 time=390.869 ms + +root@OpenWrt:/# ip ro del 8.8.8.8/32 +root@OpenWrt:/# ip ro del 8.8.8.8/32 +RTNETLINK answers: No such process +root@OpenWrt:/# ip ro add 8.8.8.8/32 dev rmnet_mhi0.2 +root@OpenWrt:/# ping 8.8.8.8 +PING 8.8.8.8 (8.8.8.8): 56 data bytes +64 bytes from 8.8.8.8: seq=0 ttl=111 time=314.395 ms \ No newline at end of file diff --git a/package/wwan/driver/quectel_cm_5G/src/log/pcie_mhi_qmap=4_bridge.txt b/package/wwan/driver/quectel_cm_5G/src/log/pcie_mhi_qmap=4_bridge.txt new file mode 100644 index 000000000..c70b6173a --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/log/pcie_mhi_qmap=4_bridge.txt @@ -0,0 +1,147 @@ +root@OpenWrt:~# lspci +00:00.0 Class 0604: 17cb:1001 +01:00.0 Class ff00: 17cb:0306 +root@OpenWrt:~# + +root@OpenWrt:~# insmod pcie_mhi.ko qmap_mode=4 +[ 200.906104] mhi_init Quectel_Linux_PCIE_MHI_Driver_V1.3.0.17 +[ 200.907913] mhi_pci_probe pci_dev->name = 0000:01:00.0, domain=0, bus=1, slot=0, vendor=17CB, device=0306 +[ 200.912164] mhi_q 0000:01:00.0: BAR 0: assigned [mem 0x48000000-0x48000fff 64bit] +[ 200.920593] mhi_q 0000:01:00.0: enabling device (0140 -> 0142) +root@OpenWrt:~# [ 201.112214] [I][mhi_netdev_enable_iface] Prepare the channels for transfer +[ 201.154640] [I][mhi_netdev_enable_iface] Exited. +[ 201.159271] rmnet_vnd_register_device(rmnet_mhi0.1)=0 +[ 201.162953] rmnet_vnd_register_device(rmnet_mhi0.2)=0 +[ 201.167698] rmnet_vnd_register_device(rmnet_mhi0.3)=0 +[ 201.172178] rmnet_vnd_register_device(rmnet_mhi0.4)=0 + +root@OpenWrt:~# brctl addbr br0 +root@OpenWrt:~# brctl addif br0 eth1 +root@OpenWrt:~# brctl addif br0 rmnet_mhi0.2 +root@OpenWrt:~# brctl show +bridge name bridge id STP enabled interfaces +br0 8000.00001c353487 no eth1 + rmnet_mhi0.2 + +root@OpenWrt:~# quectel-qmi-proxy -d /dev/mhi_QMI0 & +[04-14_06:44:01:556] Will use cdc-wdm='/dev/mhi_QMI0', proxy='quectel-qmi-proxy0' +[04-14_06:44:01:573] qmi_proxy_init enter +[04-14_06:44:01:573] qmi_proxy_loop enter thread_id 0xb6f20d44 +[04-14_06:44:03:574] qmi_proxy_init succful +[04-14_06:44:03:574] local server: quectel-qmi-proxy0 sockfd = 4 +[04-14_06:44:03:575] qmi_proxy_server_fd = 4 + + +root@OpenWrt:~# quectel-CM -n 1 -s cmnet & +[04-14_06:47:53:303] Quectel_QConnectManager_Linux_V1.6.0.25 +[04-14_06:47:53:314] network interface '' or qmidev '' is not exist +[04-14_06:47:53:315] netcard driver = pcie_mhi, driver version = V1.3.0.17 +[04-14_06:47:53:316] qmap_mode = 4, qmap_version = 9, qmap_size = 16384, muxid = 0x81, qmap_netcard = rmnet_mhi0.1 +[04-14_06:47:53:316] Modem works in QMI mode +[04-14_06:47:53:318] connect to quectel-qmi-proxy0 sockfd = 7 +[04-14_06:47:53:318] cdc_wdm_fd = 7 +[04-14_06:47:53:326] Get clientWDS = 15 +[04-14_06:47:53:329] Get clientDMS = 2 +[04-14_06:47:53:334] Get clientNAS = 4 +[04-14_06:47:53:338] Get clientUIM = 1 +[04-14_06:47:53:343] Get clientWDA = 1 +[04-14_06:47:53:347] requestBaseBandVersion RM500QGLABR10A03M4G +[04-14_06:47:53:351] qmap_settings.rx_urb_size = 16384 +[04-14_06:47:53:352] qmap_settings.ul_data_aggregation_max_datagrams = 11 +[04-14_06:47:53:352] qmap_settings.ul_data_aggregation_max_size = 8192 +[04-14_06:47:53:352] qmap_settings.dl_minimum_padding = 0 +[04-14_06:47:53:369] requestGetSIMStatus SIMStatus: SIM_READY +[04-14_06:47:53:370] requestSetProfile[1] cmnet///0 +[04-14_06:47:53:402] requestGetProfile[1] cmnet///0 +[04-14_06:47:53:407] requestRegistrationState2 MCC: 0, MNC: 0, PS: Detached, DataCap: UNKNOW +[04-14_06:47:53:411] requestQueryDataCall IPv4ConnectionStatus: DISCONNECTED +[04-14_06:47:53:412] ifconfig rmnet_mhi0 down +[04-14_06:47:53:436] ifconfig rmnet_mhi0.1 0.0.0.0 +[04-14_06:47:53:460] ifconfig rmnet_mhi0.1 down +[04-14_06:48:26:399] requestRegistrationState2 MCC: 460, MNC: 0, PS: Detached, DataCap: UNKNOW +[04-14_06:48:26:405] requestRegistrationState2 MCC: 460, MNC: 0, PS: Detached, DataCap: UNKNOW +[04-14_06:48:26:411] requestRegistrationState2 MCC: 460, MNC: 0, PS: Detached, DataCap: UNKNOW +[04-14_06:48:26:970] requestRegistrationState2 MCC: 460, MNC: 0, PS: Attached, DataCap: 5G_NSA +[04-14_06:48:26:992] requestSetupDataCall WdsConnectionIPv4Handle: 0x34176710 +[04-14_06:48:27:005] ifconfig rmnet_mhi0 up +[04-14_06:48:27:031] ifconfig rmnet_mhi0.1 up +[04-14_06:48:27:057] you are use OpenWrt? +[04-14_06:48:27:057] should not calling udhcpc manually? +[04-14_06:48:27:080] should use "/sbin/ifstaus wan" to check rmnet_mhi0.1 's status? +[04-14_06:48:27:081] busybox udhcpc -f -n -q -t 5 -i rmnet_mhi0.1 +[04-14_06:48:27:363] udhcpc: ifconfig rmnet_mhi0.1 10.245.22.3 netmask 255.255.255.248 broadcast + +[04-14_06:48:27:398] udhcpc: setting default routers: 10.245.22.4 +[04-14_06:48:27:491] requestRegistrationState2 MCC: 460, MNC: 0, PS: Attached, DataCap: 5G_NSA + +root@OpenWrt:~# quectel-CM -n 2 -s 4gnet -b & +[04-14_06:48:06:842] Quectel_QConnectManager_Linux_V1.6.0.25 +[04-14_06:48:06:853] network interface '' or qmidev '' is not exist +[04-14_06:48:06:854] netcard driver = pcie_mhi, driver version = V1.3.0.17 +[04-14_06:48:06:855] qmap_mode = 4, qmap_version = 9, qmap_size = 16384, muxid = 0x82, qmap_netcard = rmnet_mhi0.2 +[04-14_06:48:06:855] Modem works in QMI mode +[04-14_06:48:06:857] connect to quectel-qmi-proxy0 sockfd = 7 +[04-14_06:48:06:858] cdc_wdm_fd = 7 +[04-14_06:48:06:864] Get clientWDS = 16 +[04-14_06:48:06:867] Get clientDMS = 3 +[04-14_06:48:06:871] Get clientNAS = 5 +[04-14_06:48:06:874] Get clientUIM = 2 +[04-14_06:48:06:879] Get clientWDA = 2 +[04-14_06:48:06:886] requestBaseBandVersion RM500QGLABR10A03M4G +[04-14_06:48:06:891] qmap_settings.rx_urb_size = 16384 +[04-14_06:48:06:891] qmap_settings.ul_data_aggregation_max_datagrams = 11 +[04-14_06:48:06:892] qmap_settings.ul_data_aggregation_max_size = 8192 +[04-14_06:48:06:892] qmap_settings.dl_minimum_padding = 0 +[04-14_06:48:06:909] requestGetSIMStatus SIMStatus: SIM_READY +[04-14_06:48:06:909] requestSetProfile[2] 4gnet///0 +[04-14_06:48:06:940] requestGetProfile[2] 4gnet///0 +[04-14_06:48:06:944] requestRegistrationState2 MCC: 0, MNC: 0, PS: Detached, DataCap: UNKNOW +[04-14_06:48:06:949] requestQueryDataCall IPv4ConnectionStatus: DISCONNECTED +[04-14_06:48:06:949] ifconfig rmnet_mhi0 down +[04-14_06:48:06:973] ifconfig rmnet_mhi0.2 0.0.0.0 +[04-14_06:48:06:998] ifconfig rmnet_mhi0.2 down +[04-14_06:48:26:400] requestRegistrationState2 MCC: 460, MNC: 0, PS: Detached, DataCap: UNKNOW +[04-14_06:48:26:405] requestRegistrationState2 MCC: 460, MNC: 0, PS: Detached, DataCap: UNKNOW +[04-14_06:48:26:411] requestRegistrationState2 MCC: 460, MNC: 0, PS: Detached, DataCap: UNKNOW +[04-14_06:48:26:970] requestRegistrationState2 MCC: 460, MNC: 0, PS: Attached, DataCap: 5G_NSA +[04-14_06:48:27:220] requestSetupDataCall WdsConnectionIPv4Handle: 0x341450a0 +[04-14_06:48:27:228] requestRegistrationState2 MCC: 460, MNC: 0, PS: Attached, DataCap: 5G_NSA +[04-14_06:48:27:238] ifconfig rmnet_mhi0 up +[04-14_06:48:27:263] ifconfig rmnet_mhi0.2 up +[04-14_06:48:27:313] echo '0xaf51be9' > /sys/class/net/rmnet_mhi0.2/bridge_ipv4 + +root@OpenWrt:~# ifconfig rmnet_mhi0.1 +rmnet_mhi0.1 Link encap:Ethernet HWaddr 02:50:F4:00:00:00 + inet addr:10.245.22.3 Mask:255.255.255.248 + inet6 addr: fe80::50:f4ff:fe00:0/64 Scope:Link + UP RUNNING NOARP MTU:1500 Metric:1 + RX packets:6 errors:0 dropped:0 overruns:0 frame:0 + TX packets:6 errors:0 dropped:0 overruns:0 carrier:0 + collisions:0 txqueuelen:1000 + RX bytes:1836 (1.7 KiB) TX bytes:2052 (2.0 KiB) + +root@OpenWrt:~# ifconfig rmnet_mhi0.2 +rmnet_mhi0.2 Link encap:Ethernet HWaddr 02:50:F4:00:00:00 + inet6 addr: fe80::50:f4ff:fe00:0/64 Scope:Link + UP RUNNING NOARP MTU:1500 Metric:1 + RX packets:347 errors:0 dropped:0 overruns:0 frame:0 + TX packets:795 errors:0 dropped:0 overruns:0 carrier:0 + collisions:0 txqueuelen:1000 + RX bytes:119871 (117.0 KiB) TX bytes:121254 (118.4 KiB) + +root@OpenWrt:~# ifconfig br0 up +[ 520.005476] rmnet_mhi0.2 PC Mac Address: 00:0e:c6:a6:6c:f1 +[ 520.025896] rmnet_mhi0.2 PC Mac Address: 00:0e:c6:a6:6c:f1 +[ 520.028002] rmnet_mhi0.2 PC Mac Address: 00:0e:c6:a6:6c:f1 +[ 520.144371] rmnet_mhi0.2 sip = 10.245.27.233, tip=10.245.27.234, ipv4=10.245.27.233 +[ 520.410052] rmnet_mhi0.2 sip = 0.0.0.0, tip=10.245.27.233, ipv4=10.245.27.233 +[ 520.414504] rmnet_mhi0.2 sip = 10.245.27.233, tip=10.245.27.234, ipv4=10.245.27.233 +[ 520.847074] rmnet_mhi0.2 sip = 10.245.27.233, tip=10.245.27.234, ipv4=10.245.27.233 +[ 521.410241] rmnet_mhi0.2 sip = 0.0.0.0, tip=10.245.27.233, ipv4=10.245.27.233 +[ 522.410455] rmnet_mhi0.2 sip = 0.0.0.0, tip=10.245.27.233, ipv4=10.245.27.233 +[ 522.822594] rmnet_mhi0.2 sip = 10.245.27.233, tip=10.245.27.234, ipv4=10.245.27.233 +[ 523.410638] rmnet_mhi0.2 sip = 10.245.27.233, tip=10.245.27.233, ipv4=10.245.27.233 +[ 523.510028] rmnet_mhi0.2 sip = 10.245.27.233, tip=10.245.27.234, ipv4=10.245.27.233 +[ 523.997961] rmnet_mhi0.2 sip = 10.245.27.233, tip=10.245.27.234, ipv4=10.245.27.233 +[ 543.799483] rmnet_mhi0.2 sip = 10.245.27.233, tip=10.245.27.234, ipv4=10.245.27.233 +[ 543.929301] rmnet_mhi0.2 sip = 10.245.27.233, tip=10.245.27.234, ipv4=10.245.27.233 + diff --git a/package/wwan/driver/quectel_cm_5G/src/log/qmi_wwan_q.txt b/package/wwan/driver/quectel_cm_5G/src/log/qmi_wwan_q.txt new file mode 100644 index 000000000..4dbf55a0c --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/log/qmi_wwan_q.txt @@ -0,0 +1,65 @@ +root@ZhuoTK:/# dmesg +[ 15.840000] qmi_wwan_q 1-1.3:1.4: cdc-wdm0: USB WDM device +[ 15.860000] qmi_wwan_q 1-1.3:1.4: Quectel Android work on RawIP mode +[ 15.860000] qmi_wwan_q 1-1.3:1.4: rx_urb_size = 1520 +[ 15.870000] qmi_wwan_q 1-1.3:1.4 wwan0: register 'qmi_wwan_q' at usb-101c0000.ehci-1.3, WWAN/QMI device, da:0b:ce:b2:db:21 + +root@ZhuoTK:/# quectel-CM -s cment & +[04-13_03:20:20:456] Quectel_QConnectManager_Linux_V1.6.0.25 +[04-13_03:20:20:459] Find /sys/bus/usb/devices/1-1.3 idVendor=0x2c7c idProduct=0x125, bus=0x001, dev=0x003 +[04-13_03:20:20:460] Auto find qmichannel = /dev/cdc-wdm0 +[04-13_03:20:20:460] Auto find usbnet_adapter = wwan0 +[04-13_03:20:20:461] netcard driver = qmi_wwan_q, driver version = V1.2.0.23 +[04-13_03:20:20:461] Modem works in QMI mode +[04-13_03:20:20:469] cdc_wdm_fd = 7 +[04-13_03:20:20:547] Get clientWDS = 4 +[04-13_03:20:20:579] Get clientDMS = 1 +[04-13_03:20:20:611] Get clientNAS = 4 +[04-13_03:20:20:643] Get clientUIM = 1 +[04-13_03:20:20:675] Get clientWDA = 1 +[04-13_03:20:20:707] requestBaseBandVersion EC25EFAR06A11M4G +[04-13_03:20:20:836] requestGetSIMStatus SIMStatus: SIM_READY +[04-13_03:20:20:836] requestSetProfile[1] cment///0 +[04-13_03:20:20:899] requestGetProfile[1] cment///0 +[04-13_03:20:20:931] requestRegistrationState2 MCC: 460, MNC: 0, PS: Attached, DataCap: LTE +[04-13_03:20:20:963] requestQueryDataCall IPv4ConnectionStatus: DISCONNECTED +[04-13_03:20:20:963] ifconfig wwan0 0.0.0.0 +[04-13_03:20:20:976] ifconfig wwan0 down +[04-13_03:20:21:186] requestSetupDataCall WdsConnectionIPv4Handle: 0x8723e780 +[04-13_03:20:21:316] ifconfig wwan0 up +[04-13_03:20:21:329] you are use OpenWrt? +[04-13_03:20:21:330] should not calling udhcpc manually? +[04-13_03:20:21:330] should modify /etc/config/network as below? +[04-13_03:20:21:330] config interface wan +[04-13_03:20:21:330] option ifname wwan0 +[04-13_03:20:21:330] option proto dhcp +[04-13_03:20:21:330] should use "/sbin/ifstaus wan" to check wwan0 's status? +[04-13_03:20:21:331] busybox udhcpc -f -n -q -t 5 -i wwan0 +[04-13_03:20:21:341] udhcpc (v1.23.2) started +[04-13_03:20:21:353] Sending discover... +[04-13_03:20:21:362] Sending select for 10.90.1.113... +[04-13_03:20:21:365] Lease of 10.90.1.113 obtained, lease time 7200 +[04-13_03:20:21:370] udhcpc: ifconfig wwan0 10.90.1.113 netmask 255.255.255.252 broadcast + +[04-13_03:20:21:380] udhcpc: setting default routers: 10.90.1.114 + +root@ZhuoTK:/# ifconfig wwan0 +wwan0 Link encap:Ethernet HWaddr 00:CA:01:91:97:BA + inet addr:10.90.1.113 Mask:255.255.255.252 + inet6 addr: fe80::2ca:1ff:fe91:97ba/64 Scope:Link + UP RUNNING NOARP MTU:1500 Metric:1 + RX packets:38 errors:0 dropped:0 overruns:0 frame:0 + TX packets:46 errors:0 dropped:0 overruns:0 carrier:0 + collisions:0 txqueuelen:1000 + RX bytes:5244 (5.1 KiB) TX bytes:6964 (6.8 KiB) + +root@ZhuoTK:/# ip ro show +default via 10.90.1.114 dev wwan0 +10.90.1.112/30 dev wwan0 proto kernel scope link src 10.90.1.113 +192.168.1.0/24 dev br-lan proto kernel scope link src 192.168.1.251 + +root@ZhuoTK:/# killall quectel-CM +[04-13_03:20:46:130] requestDeactivateDefaultPDP WdsConnectionIPv4Handle +[04-13_03:20:46:406] ifconfig wwan0 0.0.0.0 +[04-13_03:20:46:418] ifconfig wwan0 down +[04-13_03:20:46:600] QmiWwanThread exit +[04-13_03:20:46:600] qmi_main exit diff --git a/package/wwan/driver/quectel_cm_5G/src/log/qmi_wwan_q_bridge.txt b/package/wwan/driver/quectel_cm_5G/src/log/qmi_wwan_q_bridge.txt new file mode 100644 index 000000000..890122c45 --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/log/qmi_wwan_q_bridge.txt @@ -0,0 +1,57 @@ +root@ZhuoTK:/# insmod qmi_wwan_q.ko +[ 116.910000] qmi_wwan_q 1-1.3:1.4: cdc-wdm0: USB WDM device +[ 116.930000] qmi_wwan_q 1-1.3:1.4: Quectel Android work on RawIP mode +[ 116.930000] qmi_wwan_q 1-1.3:1.4: rx_urb_size = 1520 +[ 116.940000] qmi_wwan_q 1-1.3:1.4 wwan0: register 'qmi_wwan_q' at usb-101c0000.ehci-1.3, WWAN/QMI device, 06:fb:51:a3:d6:c5 +[ 116.950000] usbcore: registered new interface driver qmi_wwan_q + +root@ZhuoTK:/# brctl addbr br0 +root@ZhuoTK:/# brctl addif br0 eth0.1 +root@ZhuoTK:/# brctl addif br0 wwan0 +root@ZhuoTK:/# brctl show +bridge name bridge id STP enabled interfaces +br0 8000.00ca019197b9 no eth0.1 + wwan0 + +root@ZhuoTK:/# quectel-CM -s cmnet -b & +root@ZhuoTK:/# [04-13_05:13:39:369] Quectel_QConnectManager_Linux_V1.6.0.25 +[04-13_05:13:39:372] Find /sys/bus/usb/devices/1-1.3 idVendor=0x2c7c idProduct=0x125, bus=0x001, dev=0x003 +[ 143.340000] net wwan0: bridge_mode change to 0x1 +[04-13_05:13:39:373] Auto find qmichannel = /dev/cdc-wdm0 +[04-13_05:13:39:374] Auto find usbnet_adapter = wwan0 +[04-13_05:13:39:374] netcard driver = qmi_wwan_q, driver version = V1.2.0.23 +[04-13_05:13:39:380] Modem works in QMI mode +[04-13_05:13:39:388] cdc_wdm_fd = 7 +[04-13_05:13:39:466] Get clientWDS = 5 +[04-13_05:13:39:496] Get clientDMS = 2 +[04-13_05:13:39:527] Get clientNAS = 4 +[04-13_05:13:39:559] Get clientUIM = 1 +[04-13_05:13:39:592] Get clientWDA = 1 +[04-13_05:13:39:626] requestBaseBandVersion EC25EFAR06A11M4G +[04-13_05:13:39:752] requestGetSIMStatus SIMStatus: SIM_READY +[04-13_05:13:39:752] requestSetProfile[1] cmnet///0 +[04-13_05:13:39:816] requestGetProfile[1] cmnet///0 +[04-13_05:13:39:848] requestRegistrationState2 MCC: 460, MNC: 0, PS: Attached, DataCap: LTE +[04-13_05:13:39:879] requestQueryDataCall IPv4ConnectionStatus: DISCONNECTED +[04-13_05:13:39:880] ifconfig wwan0 0.0.0.0 +[04-13_05:13:39:893] ifconfig wwan0 down +[04-13_05:13:39:943] requestSetupDataCall WdsConnectionIPv4Handle: 0x872627c0 +[04-13_05:13:40:073] ifconfig wwan0 up +[04-13_05:13:40:085] echo '0xa8d9237' > /sys/class/net/wwan0/bridge_ipv4 + +root@ZhuoTK:/# ifconfig br0 up +[ 165.730000] wwan0 PC Mac Address: 00:0e:c6:a6:6c:f1 +[ 165.750000] wwan0 PC Mac Address: 00:0e:c6:a6:6c:f1 +[ 165.860000] wwan0 sip = 10.141.146.55, tip=10.141.146.56, ipv4=10.141.146.55 +[ 165.870000] wwan0 sip = 10.141.146.55, tip=10.141.146.56, ipv4=10.141.146.55 +[ 165.990000] wwan0 sip = 10.141.146.55, tip=10.141.146.56, ipv4=10.141.146.55 +[ 166.010000] wwan0 sip = 0.0.0.0, tip=10.141.146.55, ipv4=10.141.146.55 +[ 166.070000] wwan0 sip = 10.141.146.55, tip=10.141.146.56, ipv4=10.141.146.55 +[ 167.010000] wwan0 sip = 0.0.0.0, tip=10.141.146.55, ipv4=10.141.146.55 +[ 167.480000] br0: port 2(wwan0) entered forwarding state +[ 167.520000] br0: port 1(eth0.1) entered forwarding state +[ 168.020000] wwan0 sip = 0.0.0.0, tip=10.141.146.55, ipv4=10.141.146.55 +[ 169.010000] wwan0 sip = 10.141.146.55, tip=10.141.146.55, ipv4=10.141.146.55 +[ 169.120000] wwan0 sip = 10.141.146.55, tip=10.141.146.56, ipv4=10.141.146.55 +[ 169.130000] wwan0 sip = 10.141.146.55, tip=10.141.146.56, ipv4=10.141.146.55 +[ 176.620000] wwan0 sip = 10.141.146.55, tip=10.141.146.56, ipv4=10.141.146.55 diff --git a/package/wwan/driver/quectel_cm_5G/src/log/qmi_wwan_q_qmap=1.txt b/package/wwan/driver/quectel_cm_5G/src/log/qmi_wwan_q_qmap=1.txt new file mode 100644 index 000000000..27cf80cfb --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/log/qmi_wwan_q_qmap=1.txt @@ -0,0 +1,54 @@ +root@ZhuoTK:/# insmod qmi_wwan_q.ko qmap_mode=1 +[ 1367.200000] usbcore: registered new interface driver qmi_wwan_q +[ 1383.840000] usb 1-1.3: new high-speed USB device number 7 using ehci-platform +[ 1384.080000] qmi_wwan_q 1-1.3:1.4: cdc-wdm0: USB WDM device +[ 1384.080000] qmi_wwan_q 1-1.3:1.4: Quectel Android work on RawIP mode +[ 1384.100000] qmi_wwan_q 1-1.3:1.4: rx_urb_size = 4096 +[ 1384.100000] qmi_wwan_q 1-1.3:1.4 wwan0: register 'qmi_wwan_q' at usb-101c0000.ehci-1.3, WWAN/QMI device, da:0b:ce:b2:db:21 + +root@ZhuoTK:/# quectel-CM -s cmnet & +[04-13_03:41:28:144] Quectel_QConnectManager_Linux_V1.6.0.25 +[04-13_03:41:28:146] Find /sys/bus/usb/devices/1-1.3 idVendor=0x2c7c idProduct=0x125, bus=0x001, dev=0x007 +[04-13_03:41:28:148] Auto find qmichannel = /dev/cdc-wdm0 +[04-13_03:41:28:148] Auto find usbnet_adapter = wwan0 +[04-13_03:41:28:148] netcard driver = qmi_wwan_q, driver version = V1.2.0.23 +[04-13_03:41:28:149] qmap_mode = 1, qmap_version = 5, qmap_size = 4096, muxid = 0x81, qmap_netcard = wwan0 +[04-13_03:41:28:150] Modem works in QMI mode +[04-13_03:41:28:158] cdc_wdm_fd = 7 +[04-13_03:41:28:238] Get clientWDS = 4 +[04-13_03:41:28:271] Get clientDMS = 1 +[04-13_03:41:28:302] Get clientNAS = 4 +[04-13_03:41:28:334] Get clientUIM = 1 +[04-13_03:41:28:365] Get clientWDA = 1 +[04-13_03:41:28:397] requestBaseBandVersion EC25EFAR06A11M4G +[04-13_03:41:28:430] qmap_settings.rx_urb_size = 4096 +[ 1393.530000] net wwan0: ul_data_aggregation_max_datagrams=11, ul_data_aggregation_max_size=4096, dl_minimum_padding=0 +[04-13_03:41:28:431] qmap_settings.ul_data_aggregation_max_datagrams = 11 +[04-13_03:41:28:431] qmap_settings.ul_data_aggregation_max_size = 4096 +[04-13_03:41:28:431] qmap_settings.dl_minimum_padding = 0 +[04-13_03:41:28:557] requestGetSIMStatus SIMStatus: SIM_READY +[04-13_03:41:28:558] requestSetProfile[1] cmnet///0 +[04-13_03:41:28:622] requestGetProfile[1] cmnet///0 +[04-13_03:41:28:654] requestRegistrationState2 MCC: 460, MNC: 0, PS: Attached, DataCap: LTE +[04-13_03:41:28:685] requestQueryDataCall IPv4ConnectionStatus: DISCONNECTED +[ 1393.790000] net wwan0: link_state 0x1 -> 0x0 +[04-13_03:41:28:692] ifconfig wwan0 0.0.0.0 +[04-13_03:41:28:703] ifconfig wwan0 down +[04-13_03:41:28:751] requestSetupDataCall WdsConnectionIPv4Handle: 0x8729a6b0 +[ 1393.980000] net wwan0: link_state 0x0 -> 0x1 +[04-13_03:41:28:882] ifconfig wwan0 up +[04-13_03:41:28:895] you are use OpenWrt? +[04-13_03:41:28:895] should not calling udhcpc manually? +[04-13_03:41:28:895] should modify /etc/config/network as below? +[04-13_03:41:28:896] config interface wan +[04-13_03:41:28:896] option ifname wwan0 +[04-13_03:41:28:896] option proto dhcp +[04-13_03:41:28:896] should use "/sbin/ifstaus wan" to check wwan0 's status? +[04-13_03:41:28:896] busybox udhcpc -f -n -q -t 5 -i wwan0 +[04-13_03:41:28:907] udhcpc (v1.23.2) started +[04-13_03:41:28:919] Sending discover... +[04-13_03:41:28:925] Sending select for 10.129.198.20... +[04-13_03:41:28:929] Lease of 10.129.198.20 obtained, lease time 7200 +[04-13_03:41:28:934] udhcpc: ifconfig wwan0 10.129.198.20 netmask 255.255.255.248 broadcast + +[04-13_03:41:28:949] udhcpc: setting default routers: 10.129.198.21 + diff --git a/package/wwan/driver/quectel_cm_5G/src/log/qmi_wwan_q_qmap=1_bridge.txt b/package/wwan/driver/quectel_cm_5G/src/log/qmi_wwan_q_qmap=1_bridge.txt new file mode 100644 index 000000000..4765c3dd6 --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/log/qmi_wwan_q_qmap=1_bridge.txt @@ -0,0 +1,86 @@ +root@ZhuoTK:/# insmod qmi_wwan_q.ko qmap_mode=1 +[ 49.000000] qmi_wwan_q 1-1.3:1.4: cdc-wdm0: USB WDM device +[ 49.000000] qmi_wwan_q 1-1.3:1.4: Quectel Android work on RawIP mode +[ 49.020000] qmi_wwan_q 1-1.3:1.4: rx_urb_size = 4096 +[ 49.020000] qmi_wwan_q 1-1.3:1.4 wwan0: register 'qmi_wwan_q' at usb-101c0000.ehci-1.3, WWAN/QMI device, de:ae:5c:82:b5:b2 +[ 49.030000] usbcore: registered new interface driver qmi_wwan_q + +root@ZhuoTK:/# brctl addbr br0 +root@ZhuoTK:/# brctl addif br0 eth0.1 +root@ZhuoTK:/# brctl addif br0 wwan0 +root@ZhuoTK:/# brctl show +bridge name bridge id STP enabled interfaces +br0 8000.00ca019197b9 no eth0.1 + wwan0 + +root@ZhuoTK:/# quectel-CM -s cmnet -b & +[04-13_05:11:46:442] Quectel_QConnectManager_Linux_V1.6.0.25 +[04-13_05:11:46:444] Find /sys/bus/usb/devices/1-1.3 idVendor=0x2c7c idProduct=0x125, bus=0x001, dev=0x003 +[ 84.340000] net wwan0: bridge_mode change to 0x1 +[04-13_05:11:46:446] Auto find qmichannel = /dev/cdc-wdm0 +[04-13_05:11:46:446] Auto find usbnet_adapter = wwan0 +[04-13_05:11:46:446] netcard driver = qmi_wwan_q, driver version = V1.2.0.23 +[04-13_05:11:46:447] qmap_mode = 1, qmap_version = 5, qmap_size = 4096, muxid = 0x81, qmap_netcard = wwan0 +[04-13_05:11:46:454] Modem works in QMI mode +[04-13_05:11:46:462] cdc_wdm_fd = 7 +[04-13_05:11:46:537] Get clientWDS = 5 +[04-13_05:11:46:569] Get clientDMS = 1 +[04-13_05:11:46:601] Get clientNAS = 4 +[04-13_05:11:46:633] Get clientUIM = 1 +[04-13_05:11:46:666] Get clientWDA = 1 +[04-13_05:11:46:697] requestBaseBandVersion EC25EFAR06A11M4G +[04-13_05:11:46:730] qmap_settings.rx_urb_size = 4096 +[ 84.620000] net wwan0: ul_data_aggregation_max_datagrams=11, ul_data_aggregation_max_size=4096, dl_minimum_padding=0 +[04-13_05:11:46:730] qmap_settings.ul_data_aggregation_max_datagrams = 11 +[04-13_05:11:46:730] qmap_settings.ul_data_aggregation_max_size = 4096 +[04-13_05:11:46:730] qmap_settings.dl_minimum_padding = 0 +[04-13_05:11:46:859] requestGetSIMStatus SIMStatus: SIM_READY +[04-13_05:11:46:859] requestSetProfile[1] cmnet///0 +[04-13_05:11:46:922] requestGetProfile[1] cmnet///0 +[04-13_05:11:46:954] requestRegistrationState2 MCC: 460, MNC: 0, PS: Attached, DataCap: LTE +[04-13_05:11:46:986] requestQueryDataCall IPv4ConnectionStatus: DISCONNECTED +[ 84.880000] net wwan0: link_state 0x1 -> 0x0 +[04-13_05:11:46:992] ifconfig wwan0 0.0.0.0 +[04-13_05:11:47:005] ifconfig wwan0 down +[04-13_05:11:47:050] requestSetupDataCall WdsConnectionIPv4Handle: 0x872a5830 +[ 85.070000] net wwan0: link_state 0x0 -> 0x1 +[04-13_05:11:47:183] ifconfig wwan0 up +[04-13_05:11:47:195] echo '0xa54a78b' > /sys/class/net/wwan0/bridge_ipv4 + +root@ZhuoTK:/# ifconfig wwan0 +wwan0 Link encap:Ethernet HWaddr DE:AE:5C:82:B5:B2 + inet6 addr: fe80::dcae:5cff:fe82:b5b2/64 Scope:Link + UP RUNNING NOARP MTU:1500 Metric:1 + RX packets:3792 errors:0 dropped:0 overruns:0 frame:0 + TX packets:3271 errors:0 dropped:36 overruns:0 carrier:0 + collisions:0 txqueuelen:1000 + RX bytes:2271762 (2.1 MiB) TX bytes:565184 (551.9 KiB) + +root@ZhuoTK:/# ifconfig br0 up +[ 89.530000] br0: port 2(wwan0) entered forwarding state +[ 89.530000] br0: port 2(wwan0) entered forwarding state +[ 89.540000] br0: port 1(eth0.1) entered forwarding state +[ 89.540000] br0: port 1(eth0.1) entered forwarding state + +root@ZhuoTK:/# +[ 93.720000] wwan0 sip = 192.168.1.153, tip=10.84.167.140, ipv4=10.84.167.139 +[ 104.560000] br0: port 2(wwan0) entered forwarding state +[ 104.560000] br0: port 1(eth0.1) entered forwarding state +[ 111.750000] rt305x-esw 10110000.esw: link changed 0x00 +[ 116.440000] rt305x-esw 10110000.esw: link changed 0x01 +[ 116.620000] wwan0 PC Mac Address: 00:0e:c6:a6:6c:f1 +[ 116.680000] wwan0 PC Mac Address: 00:0e:c6:a6:6c:f1 +[ 116.690000] wwan0 PC Mac Address: 00:0e:c6:a6:6c:f1 +[ 116.760000] wwan0 sip = 10.84.167.139, tip=10.84.167.140, ipv4=10.84.167.139 +[ 117.050000] wwan0 sip = 10.84.167.139, tip=10.84.167.140, ipv4=10.84.167.139 +[ 117.220000] wwan0 sip = 0.0.0.0, tip=10.84.167.139, ipv4=10.84.167.139 +[ 117.820000] wwan0 sip = 10.84.167.139, tip=10.84.167.140, ipv4=10.84.167.139 +[ 118.220000] wwan0 sip = 0.0.0.0, tip=10.84.167.139, ipv4=10.84.167.139 +[ 118.300000] wwan0 sip = 10.84.167.139, tip=10.84.167.140, ipv4=10.84.167.139 +[ 119.220000] wwan0 sip = 0.0.0.0, tip=10.84.167.139, ipv4=10.84.167.139 +[ 120.220000] wwan0 sip = 10.84.167.139, tip=10.84.167.139, ipv4=10.84.167.139 +[ 120.300000] wwan0 sip = 10.84.167.139, tip=10.84.167.140, ipv4=10.84.167.139 +[ 121.430000] wwan0 sip = 10.84.167.139, tip=10.84.167.140, ipv4=10.84.167.139 +[ 141.730000] wwan0 sip = 10.84.167.139, tip=10.84.167.140, ipv4=10.84.167.139 +[ 144.390000] wwan0 sip = 10.84.167.139, tip=10.84.167.140, ipv4=10.84.167.139 +[ 144.510000] wwan0 sip = 10.84.167.139, tip=10.84.167.140, ipv4=10.84.167.139 diff --git a/package/wwan/driver/quectel_cm_5G/src/log/qmi_wwan_q_qmap=4.txt b/package/wwan/driver/quectel_cm_5G/src/log/qmi_wwan_q_qmap=4.txt new file mode 100644 index 000000000..2a25be012 --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/log/qmi_wwan_q_qmap=4.txt @@ -0,0 +1,185 @@ +root@ZhuoTK:/# insmod qmi_wwan_q.ko qmap_mode=4 +[ 1515.180000] usbcore: registered new interface driver qmi_wwan_q +[ 1530.260000] usb 1-1.3: new high-speed USB device number 8 using ehci-platform +[ 1530.500000] qmi_wwan_q 1-1.3:1.4: cdc-wdm0: USB WDM device +[ 1530.500000] qmi_wwan_q 1-1.3:1.4: Quectel Android work on RawIP mode +[ 1530.520000] qmi_wwan_q 1-1.3:1.4: rx_urb_size = 4096 +[ 1530.520000] qmi_wwan_q 1-1.3:1.4 wwan0: register 'qmi_wwan_q' at usb-101c0000.ehci-1.3, RMNET/USB device, da:0b:ce:b2:db:21 +[ 1530.530000] net wwan0: qmap_register_device wwan0_1 +[ 1530.540000] net wwan0: qmap_register_device wwan0_2 +[ 1530.550000] net wwan0: qmap_register_device wwan0_3 +[ 1530.550000] net wwan0: qmap_register_device wwan0_4 + +root@ZhuoTK:~# quectel-qmi-proxy & +[04-13_03:44:53:958] Will use cdc-wdm='/dev/cdc-wdm0', proxy='quectel-qmi-proxy0' +[04-13_03:44:53:959] qmi_proxy_init enter +[04-13_03:44:53:960] qmi_proxy_loop enter thread_id 0x77c07530 +[04-13_03:44:54:960] qmi_proxy_init succful +[04-13_03:44:54:960] local server: quectel-qmi-proxy0 sockfd = 4 +[04-13_03:44:54:960] qmi_proxy_server_fd = 4 +[04-13_03:45:04:346] +++ ClientFd=5 +[04-13_03:45:04:410] +++ ClientFd=5 QMIType=1 ClientId=4 +[04-13_03:45:04:442] +++ ClientFd=5 QMIType=2 ClientId=1 +[04-13_03:45:04:474] +++ ClientFd=5 QMIType=3 ClientId=4 +[04-13_03:45:04:506] +++ ClientFd=5 QMIType=11 ClientId=1 +[04-13_03:45:04:539] +++ ClientFd=5 QMIType=26 ClientId=1 +[04-13_03:45:10:770] +++ ClientFd=6 +[04-13_03:45:10:811] +++ ClientFd=6 QMIType=1 ClientId=21 +[04-13_03:45:10:843] +++ ClientFd=6 QMIType=2 ClientId=2 +[04-13_03:45:10:875] +++ ClientFd=6 QMIType=3 ClientId=5 +[04-13_03:45:10:907] +++ ClientFd=6 QMIType=11 ClientId=2 +[04-13_03:46:31:419] --- ClientFd=6 QMIType=1 ClientId=21 +[04-13_03:46:31:451] --- ClientFd=6 QMIType=2 ClientId=2 +[04-13_03:46:31:484] --- ClientFd=6 QMIType=3 ClientId=5 +[04-13_03:46:31:517] --- ClientFd=6 QMIType=11 ClientId=2 +[04-13_03:46:31:518] qmi_proxy_loop poll fd = 6, revents = 0011 +[04-13_03:46:31:519] --- ClientFd=6 + +root@ZhuoTK:/# quectel-CM -n 1 -s cmnet & +root@ZhuoTK:/# [04-13_03:45:04:340] Quectel_QConnectManager_Linux_V1.6.0.25 +[04-13_03:45:04:343] Find /sys/bus/usb/devices/1-1.3 idVendor=0x2c7c idProduct=0x125, bus=0x001, dev=0x008 +[04-13_03:45:04:344] Auto find qmichannel = /dev/cdc-wdm0 +[04-13_03:45:04:344] Auto find usbnet_adapter = wwan0 +[04-13_03:45:04:345] netcard driver = qmi_wwan_q, driver version = V1.2.0.23 +[04-13_03:45:04:345] qmap_mode = 4, qmap_version = 5, qmap_size = 4096, muxid = 0x81, qmap_netcard = wwan0_1 +[04-13_03:45:04:345] Modem works in QMI mode +[04-13_03:45:04:347] connect to quectel-qmi-proxy0 sockfd = 7 +[04-13_03:45:04:347] cdc_wdm_fd = 7 +[04-13_03:45:04:411] Get clientWDS = 4 +[04-13_03:45:04:443] Get clientDMS = 1 +[04-13_03:45:04:475] Get clientNAS = 4 +[04-13_03:45:04:507] Get clientUIM = 1 +[04-13_03:45:04:540] Get clientWDA = 1 +[04-13_03:45:04:571] requestBaseBandVersion EC25EFAR06A11M4G +[04-13_03:45:04:602] qmap_settings.rx_urb_size = 4096 +[ 1609.700000] net wwan0: ul_data_aggregation_max_datagrams=11, ul_data_aggregation_max_size=4096, dl_minimum_padding=0 +[04-13_03:45:04:603] qmap_settings.ul_data_aggregation_max_datagrams = 11 +[04-13_03:45:04:603] qmap_settings.ul_data_aggregation_max_size = 4096 +[04-13_03:45:04:603] qmap_settings.dl_minimum_padding = 0 +[04-13_03:45:04:731] requestGetSIMStatus SIMStatus: SIM_READY +[04-13_03:45:04:731] requestSetProfile[1] cmnet///0 +[04-13_03:45:04:795] requestGetProfile[1] cmnet///0 +[04-13_03:45:04:827] requestRegistrationState2 MCC: 460, MNC: 0, PS: Attached, DataCap: LTE +[04-13_03:45:04:858] requestQueryDataCall IPv4ConnectionStatus: DISCONNECTED +[ 1609.960000] net wwan0: link_state 0x1 -> 0x0 +[04-13_03:45:04:865] ifconfig wwan0 down +[04-13_03:45:04:879] ifconfig wwan0_1 0.0.0.0 +[04-13_03:45:04:889] ifconfig wwan0_1 down +[04-13_03:45:04:955] requestSetupDataCall WdsConnectionIPv4Handle: 0x87253410 +[ 1610.180000] net wwan0: link_state 0x0 -> 0x1 +[04-13_03:45:05:087] ifconfig wwan0 up +[ 1610.200000] IPv6: ADDRCONF(NETDEV_UP): wwan0: link is not ready +[04-13_03:45:05:105] ifconfig wwan0_1 up +[ 1610.220000] IPv6: ADDRCONF(NETDEV_CHANGE): wwan0: link becomes ready +[04-13_03:45:05:125] you are use OpenWrt? +[04-13_03:45:05:125] should not calling udhcpc manually? +[04-13_03:45:05:125] should modify /etc/config/network as below? +[04-13_03:45:05:125] config interface wan +[04-13_03:45:05:125] option ifname wwan0_1 +[04-13_03:45:05:125] option proto dhcp +[04-13_03:45:05:126] should use "/sbin/ifstaus wan" to check wwan0_1 's status? +[04-13_03:45:05:126] busybox udhcpc -f -n -q -t 5 -i wwan0_1 +[04-13_03:45:05:136] udhcpc (v1.23.2) started +[04-13_03:45:05:148] Sending discover... +[04-13_03:45:05:155] Sending select for 10.244.10.206... +[04-13_03:45:05:160] Lease of 10.244.10.206 obtained, lease time 7200 +[04-13_03:45:05:165] udhcpc: ifconfig wwan0_1 10.244.10.206 netmask 255.255.255.252 broadcast + +[04-13_03:45:05:174] udhcpc: setting default routers: 10.244.10.205 + +root@ZhuoTK:/# quectel-CM -n 2 -s 4gnet & +[04-13_03:45:10:764] Quectel_QConnectManager_Linux_V1.6.0.25 +[04-13_03:45:10:767] Find /sys/bus/usb/devices/1-1.3 idVendor=0x2c7c idProduct=0x125, bus=0x001, dev=0x008 +[04-13_03:45:10:768] Auto find qmichannel = /dev/cdc-wdm0 +[04-13_03:45:10:768] Auto find usbnet_adapter = wwan0 +[04-13_03:45:10:768] netcard driver = qmi_wwan_q, driver version = V1.2.0.23 +[04-13_03:45:10:769] qmap_mode = 4, qmap_version = 5, qmap_size = 4096, muxid = 0x82, qmap_netcard = wwan0_2 +[04-13_03:45:10:769] Modem works in QMI mode +[04-13_03:45:10:771] connect to quectel-qmi-proxy0 sockfd = 7 +[04-13_03:45:10:771] cdc_wdm_fd = 7 +[04-13_03:45:10:812] Get clientWDS = 21 +[04-13_03:45:10:844] Get clientDMS = 2 +[04-13_03:45:10:876] Get clientNAS = 5 +[04-13_03:45:10:908] Get clientUIM = 2 +[04-13_03:45:10:971] requestBaseBandVersion EC25EFAR06A11M4G +[04-13_03:45:11:099] requestGetSIMStatus SIMStatus: SIM_READY +[04-13_03:45:11:099] requestSetProfile[2] 4gnet///0 +[04-13_03:45:11:163] requestGetProfile[2] 4gnet///0 +[04-13_03:45:11:195] requestRegistrationState2 MCC: 460, MNC: 0, PS: Attached, DataCap: LTE +[04-13_03:45:11:227] requestQueryDataCall IPv4ConnectionStatus: DISCONNECTED +[04-13_03:45:11:227] ifconfig wwan0_2 0.0.0.0 +[ 1616.340000] IPv6: ADDRCONF(NETDEV_UP): wwan0_2: link is not ready +[04-13_03:45:11:246] ifconfig wwan0_2 down +[04-13_03:45:11:642] requestSetupDataCall WdsConnectionIPv4Handle: 0x87254580 +[ 1616.870000] net wwan0: link_state 0x1 -> 0x3 +[04-13_03:45:11:775] ifconfig wwan0 up +[04-13_03:45:11:785] ifconfig wwan0_2 up +[04-13_03:45:11:798] you are use OpenWrt? +[04-13_03:45:11:798] should not calling udhcpc manually? +[04-13_03:45:11:798] should modify /etc/config/network as below? +[04-13_03:45:11:798] config interface wan +[04-13_03:45:11:798] option ifname wwan0_2 +[04-13_03:45:11:798] option proto dhcp +[04-13_03:45:11:798] should use "/sbin/ifstaus wan" to check wwan0_2 's status? +[04-13_03:45:11:799] busybox udhcpc -f -n -q -t 5 -i wwan0_2 +[04-13_03:45:11:809] udhcpc (v1.23.2) started +[04-13_03:45:11:821] Sending discover... +[04-13_03:45:11:830] Sending select for 10.245.78.212... +[04-13_03:45:11:836] Lease of 10.245.78.212 obtained, lease time 7200 +[04-13_03:45:11:842] udhcpc: ifconfig wwan0_2 10.245.78.212 netmask 255.255.255.248 broadcast + +[04-13_03:45:11:852] udhcpc: setting default routers: 10.245.78.213 + +root@ZhuoTK:/# ifconfig wwan0_1 +wwan0_1 Link encap:Ethernet HWaddr DA:0B:CE:B2:DB:21 + inet addr:10.244.10.206 Mask:255.255.255.252 + inet6 addr: fe80::d80b:ceff:feb2:db21/64 Scope:Link + UP RUNNING NOARP MTU:1500 Metric:1 + RX packets:2 errors:0 dropped:0 overruns:0 frame:0 + TX packets:4 errors:0 dropped:0 overruns:0 carrier:0 + collisions:0 txqueuelen:1000 + RX bytes:640 (640.0 B) TX bytes:1344 (1.3 KiB) + +root@ZhuoTK:/# ifconfig wwan0_2 +wwan0_2 Link encap:Ethernet HWaddr DA:0B:CE:B2:DB:21 + inet addr:10.245.78.212 Mask:255.255.255.248 + inet6 addr: fe80::d80b:ceff:feb2:db21/64 Scope:Link + UP RUNNING NOARP MTU:1500 Metric:1 + RX packets:8 errors:0 dropped:0 overruns:0 frame:0 + TX packets:7 errors:0 dropped:0 overruns:0 carrier:0 + collisions:0 txqueuelen:1000 + RX bytes:1193 (1.1 KiB) TX bytes:1028 (1.0 KiB) + +root@ZhuoTK:/# ip ro del 8.8.8.8/32 +RTNETLINK answers: No such process + +root@ZhuoTK:/# ip ro add 8.8.8.8/32 dev wwan0_1 +root@ZhuoTK:/# ping 8.8.8.8 +PING 8.8.8.8 (8.8.8.8): 56 data bytes +64 bytes from 8.8.8.8: seq=0 ttl=52 time=113.508 ms + +root@ZhuoTK:/# ip ro del 8.8.8.8/32 +root@ZhuoTK:/# ip ro del 8.8.8.8/32 +RTNETLINK answers: No such process + +root@ZhuoTK:/# ip ro add 8.8.8.8/32 dev wwan0_2 +root@ZhuoTK:/# ping 8.8.8.8 +PING 8.8.8.8 (8.8.8.8): 56 data bytes +64 bytes from 8.8.8.8: seq=0 ttl=111 time=123.651 ms + +root@ZhuoTK:/# quectel-CM -k 2 +[04-13_03:46:30:808] Quectel_QConnectManager_Linux_V1.6.0.25 +[04-13_03:46:30:811] /proc/2834/cmdline: quectel-CM -n 2 -s 4gnet +[04-13_03:46:30:811] send SIGINT to process 2834 +[04-13_03:46:30:811] requestDeactivateDefaultPDP WdsConnectionIPv4Handle +[ 1696.460000] net wwan0: link_state 0x3 -> 0x1 +[04-13_03:46:31:361] ifconfig wwan0_2 0.0.0.0 +[04-13_03:46:31:373] ifconfig wwan0_2 down +[04-13_03:46:31:516] QmiWwanThread exit +[04-13_03:46:31:516] qmi_main exit + +root@ZhuoTK:/# ifconfig wwan0_2 +wwan0_2 Link encap:Ethernet HWaddr DA:0B:CE:B2:DB:21 + NOARP MTU:1500 Metric:1 + RX packets:16 errors:0 dropped:0 overruns:0 frame:0 + TX packets:15 errors:0 dropped:0 overruns:0 carrier:0 + collisions:0 txqueuelen:1000 + RX bytes:1865 (1.8 KiB) TX bytes:1620 (1.5 KiB) diff --git a/package/wwan/driver/quectel_cm_5G/src/log/qmi_wwan_q_qmap=4_bridge.txt b/package/wwan/driver/quectel_cm_5G/src/log/qmi_wwan_q_qmap=4_bridge.txt new file mode 100644 index 000000000..50eead59b --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/log/qmi_wwan_q_qmap=4_bridge.txt @@ -0,0 +1,132 @@ +root@ZhuoTK:/# brctl addbr br0 +brctl: bridge br0: File exists +root@ZhuoTK:/# brctl delbr br0 +root@ZhuoTK:/# brctl addbr br0 +root@ZhuoTK:/# brctl addif br0 eth0.1 + +root@ZhuoTK:/# insmod qmi_wwan_q.ko qmap_mode=4 +[ 365.340000] usbcore: registered new interface driver qmi_wwan_q +[ 380.860000] usb 1-1.3: new high-speed USB device number 4 using ehci-platform +[ 381.100000] qmi_wwan_q 1-1.3:1.4: cdc-wdm0: USB WDM device +[ 381.100000] qmi_wwan_q 1-1.3:1.4: Quectel Android work on RawIP mode +[ 381.120000] qmi_wwan_q 1-1.3:1.4: rx_urb_size = 4096 +[ 381.120000] qmi_wwan_q 1-1.3:1.4 wwan0: register 'qmi_wwan_q' at usb-101c0000.ehci-1.3, RMNET/USB device, fa:24:73:b5:39:a8 +[ 381.130000] net wwan0: qmap_register_device wwan0_1 +[ 381.140000] net wwan0: qmap_register_device wwan0_2 +[ 381.150000] net wwan0: qmap_register_device wwan0_3 +[ 381.150000] net wwan0: qmap_register_device wwan0_4 + +root@ZhuoTK:/# brctl addbr br0 +root@ZhuoTK:/# brctl addif br0 eth0.1 +root@ZhuoTK:/# brctl addif br0 wwan0_2 +root@ZhuoTK:/# brctl show +bridge name bridge id STP enabled interfaces +br0 8000.00ca019197b9 no eth0.1 + wwan0_2 + +root@ZhuoTK:/# quectel-qmi-proxy & +[04-13_05:18:10:832] Will use cdc-wdm='/dev/cdc-wdm0', proxy='quectel-qmi-proxy0' +[04-13_05:18:10:833] qmi_proxy_init enter +[04-13_05:18:10:833] qmi_proxy_loop enter thread_id 0x77995530 +[04-13_05:18:11:833] qmi_proxy_init succful +[04-13_05:18:11:833] local server: quectel-qmi-proxy0 sockfd = 4 +[04-13_05:18:11:833] qmi_proxy_server_fd = 4 + +root@ZhuoTK:/# quectel-CM -n 2 -s 4gnet -b & +[04-13_05:18:20:144] Quectel_QConnectManager_Linux_V1.6.0.25 +[04-13_05:18:20:146] Find /sys/bus/usb/devices/1-1.3 idVendor=0x2c7c idProduct=0x125, bus=0x001, dev=0x004 +[04-13_05:18:20:147] Auto find qmichannel = /dev/cdc-wdm0 +[04-13_05:18:20:148] Auto find usbnet_adapter = wwan0 +[04-13_05:18:20:148] netcard driver = qmi_wwan_q, driver version = V1.2.0.23 +[04-13_05:18:20:148] qmap_mode = 4, qmap_version = 5, qmap_size = 4096, muxid = 0x82, qmap_netcard = wwan0_2 +[04-13_05:18:20:149] Modem works in QMI mode +[04-13_05:18:20:150] connect to quectel-qmi-proxy0 sockfd = 7 +[04-13_05:18:20:150] cdc_wdm_fd = 7 +[04-13_05:18:20:370] requestBaseBandVersion EC25EFAR06A11M4G +[04-13_05:18:20:403] qmap_settings.rx_urb_size = 4096 +[04-13_05:18:20:404] qmap_settings.ul_data_aggregation_max_datagrams = 11 +[04-13_05:18:20:404] qmap_settings.ul_data_aggregation_max_size = 4096 +[04-13_05:18:20:404] qmap_settings.dl_minimum_padding = 0 +[04-13_05:18:20:530] requestGetSIMStatus SIMStatus: SIM_READY +[04-13_05:18:20:530] requestSetProfile[2] 4gnet///0 +[04-13_05:18:20:594] requestGetProfile[2] 4gnet///0 +[04-13_05:18:20:626] requestRegistrationState2 MCC: 460, MNC: 0, PS: Attached, DataCap: LTE +[04-13_05:18:20:657] requestQueryDataCall IPv4ConnectionStatus: DISCONNECTED +[04-13_05:18:20:658] ifconfig wwan0_2 0.0.0.0 +[04-13_05:18:20:669] ifconfig wwan0_2 down +[04-13_05:18:21:010] requestSetupDataCall WdsConnectionIPv4Handle: 0x87249650 +[ 425.100000] net wwan0: link_state 0x1 -> 0x3 +[04-13_05:18:21:143] ifconfig wwan0 up +[04-13_05:18:21:156] ifconfig wwan0_2 up +[04-13_05:18:21:168] echo '0xa8ceec7' > /sys/class/net/wwan0_2/bridge_ipv4 + +root@ZhuoTK:/# ifconfig br0 up + +[ 450.520000] br0: port 2(wwan0_2) entered forwarding state +[ 450.520000] br0: port 1(eth0.1) entered forwarding state +[ 450.770000] wwan0_2 PC Mac Address: 00:0e:c6:a6:6c:f1 +[ 450.790000] wwan0_2 PC Mac Address: 00:0e:c6:a6:6c:f1 +[ 450.840000] wwan0_2 sip = 10.140.238.199, tip=10.140.238.200, ipv4=10.140.238.199 +[ 450.950000] wwan0_2 sip = 10.140.238.199, tip=10.140.238.200, ipv4=10.140.238.199 +[ 450.950000] wwan0_2 sip = 10.140.238.199, tip=10.140.238.200, ipv4=10.140.238.199 +[ 451.120000] wwan0_2 sip = 0.0.0.0, tip=10.140.238.199, ipv4=10.140.238.199 +[ 451.180000] wwan0_2 sip = 10.140.238.199, tip=10.140.238.200, ipv4=10.140.238.199 +[ 452.120000] wwan0_2 sip = 0.0.0.0, tip=10.140.238.199, ipv4=10.140.238.199 +[ 453.080000] wwan0_2 sip = 10.140.238.199, tip=10.140.238.200, ipv4=10.140.238.199 +[ 453.120000] wwan0_2 sip = 0.0.0.0, tip=10.140.238.199, ipv4=10.140.238.199 +[ 454.120000] wwan0_2 sip = 10.140.238.199, tip=10.140.238.199, ipv4=10.140.238.199 +[ 454.220000] wwan0_2 sip = 10.140.238.199, tip=10.140.238.200, ipv4=10.140.238.199 +[ 456.200000] wwan0_2 sip = 10.140.238.199, tip=10.140.238.200, ipv4=10.140.238.199 +[ 458.120000] wwan0_2 sip = 10.140.238.199, tip=10.140.238.200, ipv4=10.140.238.199 +[ 459.240000] wwan0_2 sip = 10.140.238.199, tip=10.140.238.200, ipv4=10.140.238.199 + +root@ZhuoTK:/# quectel-CM -n 1 -s cmnet & +[04-13_05:19:21:122] Quectel_QConnectManager_Linux_V1.6.0.25 +[04-13_05:19:21:125] Find /sys/bus/usb/devices/1-1.3 idVendor=0x2c7c idProduct=0x125, bus=0x001, dev=0x004 +[04-13_05:19:21:126] Auto find qmichannel = /dev/cdc-wdm0 +[04-13_05:19:21:126] Auto find usbnet_adapter = wwan0 +[04-13_05:19:21:127] netcard driver = qmi_wwan_q, driver version = V1.2.0.23 +[04-13_05:19:21:127] qmap_mode = 4, qmap_version = 5, qmap_size = 4096, muxid = 0x81, qmap_netcard = wwan0_1 +[04-13_05:19:21:127] Modem works in QMI mode +[04-13_05:19:21:128] connect to quectel-qmi-proxy0 sockfd = 7 +[04-13_05:19:21:129] cdc_wdm_fd = 7 +[04-13_05:19:21:331] requestBaseBandVersion EC25EFAR06A11M4G +[04-13_05:19:21:459] requestGetSIMStatus SIMStatus: SIM_READY +[04-13_05:19:21:459] requestSetProfile[1] cmnet///0 +[04-13_05:19:21:522] requestGetProfile[1] cmnet///0 +[04-13_05:19:21:554] requestRegistrationState2 MCC: 460, MNC: 0, PS: Attached, DataCap: LTE +[04-13_05:19:21:585] requestQueryDataCall IPv4ConnectionStatus: DISCONNECTED +[ 485.550000] net wwan0: link_state 0x3 -> 0x2 +[04-13_05:19:21:592] ifconfig wwan0_1 0.0.0.0 +[ 485.570000] IPv6: ADDRCONF(NETDEV_UP): wwan0_1: link is not ready +[04-13_05:19:21:610] ifconfig wwan0_1 down +[04-13_05:19:21:682] requestSetupDataCall WdsConnectionIPv4Handle: 0x8725ed70 +[ 485.780000] net wwan0: link_state 0x2 -> 0x3 +[04-13_05:19:21:815] ifconfig wwan0 up +[04-13_05:19:21:826] ifconfig wwan0_1 up +[04-13_05:19:21:845] you are use OpenWrt? +[04-13_05:19:21:845] should not calling udhcpc manually? +[04-13_05:19:21:845] should modify /etc/config/network as below? +[04-13_05:19:21:845] config interface wan +[04-13_05:19:21:845] option ifname wwan0_1 +[04-13_05:19:21:845] option proto dhcp +[04-13_05:19:21:845] should use "/sbin/ifstaus wan" to check wwan0_1 's status? +[04-13_05:19:21:846] busybox udhcpc -f -n -q -t 5 -i wwan0_1 +[04-13_05:19:21:863] udhcpc (v1.23.2) started +[04-13_05:19:21:923] Sending discover... +[04-13_05:19:21:927] Sending select for 10.141.146.55... +[04-13_05:19:21:932] Lease of 10.141.146.55 obtained, lease time 7200 +[04-13_05:19:21:938] udhcpc: ifconfig wwan0_1 10.141.146.55 netmask 255.255.255.240 broadcast + +[04-13_05:19:21:949] udhcpc: setting default routers: 10.141.146.56 + +root@ZhuoTK:/# ip ro show +default via 10.141.146.56 dev wwan0_1 +10.141.146.48/28 dev wwan0_1 proto kernel scope link src 10.141.146.55 +192.168.1.0/24 dev br-lan proto kernel scope link src 192.168.1.251 + +root@ZhuoTK:/# ping 8.8.8.8 +PING 8.8.8.8 (8.8.8.8): 56 data bytes +64 bytes from 8.8.8.8: seq=0 ttl=111 time=86.006 ms +64 bytes from 8.8.8.8: seq=1 ttl=111 time=74.763 ms +64 bytes from 8.8.8.8: seq=2 ttl=111 time=85.501 ms +64 bytes from 8.8.8.8: seq=3 ttl=111 time=74.231 ms diff --git a/package/wwan/driver/quectel_cm_5G/src/log/qmi_wwan_qmap=4.txt b/package/wwan/driver/quectel_cm_5G/src/log/qmi_wwan_qmap=4.txt new file mode 100644 index 000000000..81d9ef1c3 --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/log/qmi_wwan_qmap=4.txt @@ -0,0 +1,55 @@ +# dmesg +[ 1737.738025] usb 1-1.2: new high-speed USB device number 5 using xhci-hcd +[ 1737.838917] usb 1-1.2: New USB device found, idVendor=2c7c, idProduct=0512, bcdDevice= 3.18 +[ 1737.838948] usb 1-1.2: New USB device strings: Mfr=1, Product=2, SerialNumber=3 +[ 1737.838963] usb 1-1.2: Product: EG12-EA +[ 1737.838975] usb 1-1.2: Manufacturer: Quectel +[ 1737.838986] usb 1-1.2: SerialNumber: 0123456789ABCDE +[ 1737.994955] option 1-1.2:1.0: GSM modem (1-port) converter detected +[ 1737.995430] usb 1-1.2: GSM modem (1-port) converter now attached to ttyUSB0 +[ 1737.995978] option 1-1.2:1.1: GSM modem (1-port) converter detected +[ 1737.996409] usb 1-1.2: GSM modem (1-port) converter now attached to ttyUSB1 +[ 1737.996963] option 1-1.2:1.2: GSM modem (1-port) converter detected +[ 1737.997351] usb 1-1.2: GSM modem (1-port) converter now attached to ttyUSB2 +[ 1737.997909] option 1-1.2:1.3: GSM modem (1-port) converter detected +[ 1737.998976] usb 1-1.2: GSM modem (1-port) converter now attached to ttyUSB3 +[ 1825.835796] qmi_wwan 1-1.2:1.4: cdc-wdm0: USB WDM device +[ 1825.839685] qmi_wwan 1-1.2:1.4 wwan0: register 'qmi_wwan' at usb-fe9c0000.xhci-1.2, WWAN/QMI device, 0e:80:14:b1:f6:b9 +[ 1825.840062] usbcore: registered new interface driver qmi_wwan + +# ifconfig wwan0 down +# echo Y > /sys/class/net/wwan0/qmi/raw_ip + +# echo 1 > /sys/class/net/wwan0/qmi/add_mux +# ifconfig qmimux0 +qmimux0: flags=4240 mtu 1500 + unspec 00-00-00-00-00-00-00-00-00-00-00-00-00-00-00-00 txqueuelen 1000 (UNSPEC) + RX packets 0 bytes 0 (0.0 B) + RX errors 0 dropped 0 overruns 0 frame 0 + TX packets 0 bytes 0 (0.0 B) + TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0 + +# echo 2 > /sys/class/net/wwan0/qmi/add_mux +# echo 3 > /sys/class/net/wwan0/qmi/add_mux +# echo 4 > /sys/class/net/wwan0/qmi/add_mux +# ifconfig qmimux3 +qmimux3: flags=4240 mtu 1500 + unspec 00-00-00-00-00-00-00-00-00-00-00-00-00-00-00-00 txqueuelen 1000 (UNSPEC) + RX packets 0 bytes 0 (0.0 B) + RX errors 0 dropped 0 overruns 0 frame 0 + TX packets 0 bytes 0 (0.0 B) + TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0 + +set wwan0's mtu to max qmap packet size, for usbnet.c:usbnet_change_mtu() do not accept +# ifconfig wwan0 mtu 16385 + +run qmi proxy programm, +# quectel-CM/quectel-qmi-proxy -d /dev/cdc-wdm0 +or libqmi's qmi-proxy, if use libqmi's qmi-proxy, you can use qmicli to setup data call. +# /usr/libexec/qmi-proxy --verbose --no-exit + +use quectel-CM to setup data call, if use libqmi's qmi-proxy, use '-p qmi-proxy' instead of '-p quectel-qmi-proxy' +# quectel-CM -p quectel-qmi-proxy -n 4 -s cmnet4 +# quectel-CM -p quectel-qmi-proxy -n 1 -s cmnet + +for how to use libqmi to setup data call, please refer to https://knowledge.quectel.com/display/SWSYSTLinuxAndroid/libqmi_How+to+using+QMAP+multiplexing \ No newline at end of file diff --git a/package/wwan/driver/quectel_cm_5G/src/log/usage_of_argument/6.txt b/package/wwan/driver/quectel_cm_5G/src/log/usage_of_argument/6.txt new file mode 100644 index 000000000..0da1dff9a --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/log/usage_of_argument/6.txt @@ -0,0 +1,68 @@ +root@OpenWrt:~# quectel-CM -s cmnet -4 -6 +[04-14_06:56:51:778] Quectel_QConnectManager_Linux_V1.6.0.25 +[04-14_06:56:51:779] network interface '' or qmidev '' is not exist +[04-14_06:56:51:780] netcard driver = pcie_mhi, driver version = V1.3.0.17 +[04-14_06:56:51:781] qmap_mode = 4, qmap_version = 9, qmap_size = 16384, muxid = 0x81, qmap_netcard = rmnet_mhi0.1 +[04-14_06:56:51:782] Modem works in QMI mode +[04-14_06:56:51:783] connect to quectel-qmi-proxy0 sockfd = 7 +[04-14_06:56:51:783] cdc_wdm_fd = 7 +[04-14_06:56:51:789] Get clientWDS = 15 +[04-14_06:56:51:792] Get clientWDS = 16 +[04-14_06:56:51:794] Get clientDMS = 3 +[04-14_06:56:51:798] Get clientNAS = 4 +[04-14_06:56:51:801] Get clientUIM = 1 +[04-14_06:56:51:805] Get clientWDA = 1 +[04-14_06:56:51:809] requestBaseBandVersion RM500QGLABR10A03M4G +[04-14_06:56:51:813] qmap_settings.rx_urb_size = 16384 +[04-14_06:56:51:813] qmap_settings.ul_data_aggregation_max_datagrams = 11 +[04-14_06:56:51:814] qmap_settings.ul_data_aggregation_max_size = 8192 +[04-14_06:56:51:814] qmap_settings.dl_minimum_padding = 0 +[04-14_06:56:51:835] requestGetSIMStatus SIMStatus: SIM_READY +[04-14_06:56:51:836] requestSetProfile[1] cmnet///0 +[04-14_06:56:51:848] requestGetProfile[1] cmnet///0 +[04-14_06:56:51:852] requestRegistrationState2 MCC: 460, MNC: 0, PS: Attached, DataCap: 5G_NSA +[04-14_06:56:51:857] requestQueryDataCall IPv4ConnectionStatus: DISCONNECTED +[04-14_06:56:51:860] requestQueryDataCall IPv6ConnectionStatus: DISCONNECTED +[04-14_06:56:51:861] ifconfig rmnet_mhi0 down +[04-14_06:56:51:885] ifconfig rmnet_mhi0.1 0.0.0.0 +ifconfig: SIOCSIFFLAGS: Network is down +[04-14_06:56:51:909] ifconfig rmnet_mhi0.1 down +[04-14_06:56:51:943] requestSetupDataCall WdsConnectionIPv4Handle: 0x341450a0 +[04-14_06:56:52:423] requestSetupDataCall WdsConnectionIPv6Handle: 0x341439f0 +[ 1001.561353] net rmnet_mhi0: link_state 0x0 -> 0x1 +[04-14_06:56:52:441] ifconfig rmnet_mhi0 up +[ 1001.584623] [I][mhi_netdev_open] Opened net dev interface +[04-14_06:56:52:467] ifconfig rmnet_mhi0.1 up +[04-14_06:56:52:495] you are use OpenWrt? +[04-14_06:56:52:496] should not calling udhcpc manually? +[04-14_06:56:52:496] should modify /etc/config/network as below? +[04-14_06:56:52:497] config interface wan +[04-14_06:56:52:497] option ifname rmnet_mhi0.1 +[04-14_06:56:52:497] option proto dhcp +[04-14_06:56:52:498] should use "/sbin/ifstaus wan" to check rmnet_mhi0.1 's status? +[04-14_06:56:52:498] busybox udhcpc -f -n -q -t 5 -i rmnet_mhi0.1 +udhcpc: started, v1.28.3 +udhcpc: sending discover +udhcpc: sending select for 10.245.22.3 +udhcpc: lease of 10.245.22.3 obtained, lease time 7200 +[04-14_06:56:52:713] udhcpc: ifconfig rmnet_mhi0.1 10.245.22.3 netmask 255.255.255.248 broadcast + +[04-14_06:56:52:754] udhcpc: setting default routers: 10.245.22.4 +[04-14_06:56:52:838] ip -6 address flush dev rmnet_mhi0.1 +[04-14_06:56:52:861] ip -6 address add 2409:8930:463:3daa:7c8e:429a:c902:c6cc/64 dev rmnet_mhi0.1 +[04-14_06:56:52:884] ip -6 route add default dev rmnet_mhi0.1 + +root@OpenWrt:~# cat /etc/resolv.conf +nameserver 2409:8030:2000:0:0:0:0:1 # IPV6 rmnet_mhi0.1 +nameserver 2409:8030:2000:0:0:0:0:2 # IPV6 rmnet_mhi0.1 +search lan +nameserver 127.0.0.1 + +root@OpenWrt:~# ip -6 ro show +2409:8930:463:96df::/64 dev rmnet_mhi0.1 proto kernel metric 256 +fe80::/64 dev br-lan proto kernel metric 256 +fe80::/64 dev br0 proto kernel metric 256 +default dev rmnet_mhi0.1 metric 1024 + +root@OpenWrt:~# ping6 www.qq.com +PING www.qq.com (2402:4e00:1430:1301::9227:79cc:76f2): 56 data bytes +64 bytes from 2402:4e00:1430:1301::9227:79cc:76f2: seq=0 ttl=51 time=97.230 ms diff --git a/package/wwan/driver/quectel_cm_5G/src/log/usage_of_argument/m.txt b/package/wwan/driver/quectel_cm_5G/src/log/usage_of_argument/m.txt new file mode 100644 index 000000000..d9983e4da --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/log/usage_of_argument/m.txt @@ -0,0 +1,58 @@ +root@ZhuoTK:/# quectel-CM -n 1 -m 4 -s cmnet & +[04-13_05:12:07:455] Quectel_QConnectManager_Linux_V1.6.0.25 +[04-13_05:12:07:458] Find /sys/bus/usb/devices/1-1.3 idVendor=0x2c7c idProduct=0x125, bus=0x001, dev=0x003 +[04-13_05:12:07:459] Auto find qmichannel = /dev/qcqmi0 +[04-13_05:12:07:459] Auto find usbnet_adapter = usb0 +[04-13_05:12:07:467] netcard driver = GobiNet, driver version = V1.6.2.13 +[04-13_05:12:07:467] qmap_mode = 4, qmap_version = 5, qmap_size = 4096, muxid = 0x84, qmap_netcard = usb0.4 +[04-13_05:12:07:467] Modem works in QMI mode +[04-13_05:12:07:495] Get clientWDS = 7 +[04-13_05:12:07:529] Get clientDMS = 8 +[04-13_05:12:07:561] Get clientNAS = 9 +[04-13_05:12:07:591] Get clientUIM = 10 +[04-13_05:12:07:623] requestBaseBandVersion EC25EFAR06A11M4G +[04-13_05:12:07:752] requestGetSIMStatus SIMStatus: SIM_READY +[04-13_05:12:07:752] requestSetProfile[1] cmnet///0 +[04-13_05:12:07:817] requestGetProfile[1] cmnet///0 +[04-13_05:12:07:849] requestRegistrationState2 MCC: 460, MNC: 0, PS: Attached, DataCap: LTE +[04-13_05:12:07:881] requestQueryDataCall IPv4ConnectionStatus: DISCONNECTED +[04-13_05:12:07:881] ifconfig usb0 down +[04-13_05:12:07:892] ifconfig usb0.4 0.0.0.0 +[04-13_05:12:07:903] ifconfig usb0.4 down +[04-13_05:12:07:944] requestSetupDataCall WdsConnectionIPv4Handle: 0x87265c40 +[ 52.020000] net usb0: link_state 0x0 -> 0x8 +[04-13_05:12:08:077] ifconfig usb0 up +[04-13_05:12:08:096] ifconfig usb0.4 up +[04-13_05:12:08:116] you are use OpenWrt? +[04-13_05:12:08:116] should not calling udhcpc manually? +[04-13_05:12:08:116] should modify /etc/config/network as below? +[04-13_05:12:08:116] config interface wan +[04-13_05:12:08:116] option ifname usb0.4 +[04-13_05:12:08:116] option proto dhcp +[04-13_05:12:08:116] should use "/sbin/ifstaus wan" to check usb0.4 's status? +[04-13_05:12:08:117] busybox udhcpc -f -n -q -t 5 -i usb0.4 +[04-13_05:12:08:134] udhcpc (v1.23.2) started +[04-13_05:12:08:193] Sending discover... +[04-13_05:12:08:197] Sending select for 10.84.241.180... +[04-13_05:12:08:203] Lease of 10.84.241.180 obtained, lease time 7200 +[04-13_05:12:08:208] udhcpc: ifconfig usb0.4 10.84.241.180 netmask 255.255.255.248 broadcast + +[04-13_05:12:08:221] udhcpc: setting default routers: 10.84.241.181 + +root@ZhuoTK:/# ifconfig usb0.4 +usb0.4 Link encap:Ethernet HWaddr 02:50:F4:00:00:00 + inet addr:10.84.241.180 Mask:255.255.255.248 + inet6 addr: fe80::50:f4ff:fe00:0/64 Scope:Link + UP RUNNING NOARP MTU:1500 Metric:1 + RX packets:2 errors:0 dropped:0 overruns:0 frame:0 + TX packets:6 errors:0 dropped:0 overruns:0 carrier:0 + collisions:0 txqueuelen:1000 + RX bytes:612 (612.0 B) TX bytes:984 (984.0 B) + +root@ZhuoTK:/# ip ro show +default via 10.84.241.181 dev usb0.4 +10.84.241.176/29 dev usb0.4 proto kernel scope link src 10.84.241.180 +192.168.1.0/24 dev br-lan proto kernel scope link src 192.168.1.251 + +root@ZhuoTK:/# ping 8.8.8.8 +PING 8.8.8.8 (8.8.8.8): 56 data bytes +64 bytes from 8.8.8.8: seq=0 ttl=52 time=99.431 ms diff --git a/package/wwan/driver/quectel_cm_5G/src/main.c b/package/wwan/driver/quectel_cm_5G/src/main.c new file mode 100644 index 000000000..e39a5f31f --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/main.c @@ -0,0 +1,934 @@ +/****************************************************************************** + @file main.c + @brief The entry program. + + DESCRIPTION + Connectivity Management Tool for USB network adapter of Quectel wireless cellular modules. + + INITIALIZATION AND SEQUENCING REQUIREMENTS + None. + + --------------------------------------------------------------------------- + Copyright (c) 2016 -2020 Quectel Wireless Solution, Co., Ltd. All Rights Reserved. + Quectel Wireless Solution Proprietary and Confidential. + --------------------------------------------------------------------------- +******************************************************************************/ + +#include "QMIThread.h" +#include +#include +#include +#include + +#include "util.h" +//#define CONFIG_PID_FILE_FORMAT "/var/run/quectel-CM-%s.pid" //for example /var/run/quectel-CM-wwan0.pid + +static PROFILE_T s_profile; +int debug_qmi = 0; +int qmidevice_control_fd[2]; +static int signal_control_fd[2]; +int g_donot_exit_when_modem_hangup = 0; +extern int ql_ifconfig(int argc, char *argv[]); +extern int ql_get_netcard_driver_info(const char*); +extern int ql_capture_usbmon_log(PROFILE_T *profile, const char *log_path); +extern void ql_stop_usbmon_log(PROFILE_T *profile); +//UINT ifc_get_addr(const char *ifname); +static int s_link = -1; +static void usbnet_link_change(int link, PROFILE_T *profile) { + if (s_link == link) + return; + + s_link = link; + + if (!(link & (1<ipv4, 0, sizeof(IPV4_T)); + + if (!(link & (1<ipv6, 0, sizeof(IPV6_T)); + + if (link) { + udhcpc_start(profile); + } else { + udhcpc_stop(profile); + } +} + +static int check_ipv4_address(PROFILE_T *profile) { + uint32_t oldAddress = profile->ipv4.Address; + + if (profile->request_ops == &mbim_request_ops) + return 1; //we will get a new ipv6 address per requestGetIPAddress() + if (profile->request_ops == &atc_request_ops) + return 1; //TODO + + if (profile->request_ops->requestGetIPAddress(profile, IpFamilyV4) == 0) { + if (profile->ipv4.Address != oldAddress || debug_qmi) { + unsigned char *l = (unsigned char *)&oldAddress; + unsigned char *r = (unsigned char *)&profile->ipv4.Address; + dbg_time("localIP: %d.%d.%d.%d VS remoteIP: %d.%d.%d.%d", + l[3], l[2], l[1], l[0], r[3], r[2], r[1], r[0]); + } + return (profile->ipv4.Address == oldAddress); + } + + return 0; +} + +static void main_send_event_to_qmidevice(int triger_event) { + if (write(qmidevice_control_fd[0], &triger_event, sizeof(triger_event)) == -1) {}; +} + +static void send_signo_to_main(int signo) { + if (write(signal_control_fd[0], &signo, sizeof(signo)) == -1) {}; +} + +void qmidevice_send_event_to_main(int triger_event) { + if (write(qmidevice_control_fd[1], &triger_event, sizeof(triger_event)) == -1) {}; +} + +void qmidevice_send_event_to_main_ext(int triger_event, void *data, unsigned len) { + if (write(qmidevice_control_fd[1], &triger_event, sizeof(triger_event)) == -1) {}; + if (write(qmidevice_control_fd[1], data, len) == -1) {}; +} + +#define MAX_PATH 256 + +static int ls_dir(const char *dir, int (*match)(const char *dir, const char *file, void *argv[]), void *argv[]) +{ + DIR *pDir; + struct dirent* ent = NULL; + int match_times = 0; + + pDir = opendir(dir); + if (pDir == NULL) { + dbg_time("Cannot open directory: %s, errno: %d (%s)", dir, errno, strerror(errno)); + return 0; + } + + while ((ent = readdir(pDir)) != NULL) { + match_times += match(dir, ent->d_name, argv); + } + closedir(pDir); + + return match_times; +} + +static int is_same_linkfile(const char *dir, const char *file, void *argv[]) +{ + const char *qmichannel = (const char *)argv[1]; + char linkname[MAX_PATH*2+6]; + char filename[MAX_PATH]; + int linksize; + + snprintf(linkname, sizeof(linkname), "%.256s/%s", dir, file); + linksize = readlink(linkname, filename, sizeof(filename)); + if (linksize <= 0) + return 0; + + filename[linksize] = 0; + if (strcmp(filename, qmichannel)) + return 0; + + dbg_time("%s -> %s", linkname, filename); + return 1; +} + +static int is_brother_process(const char *dir, const char *file, void *argv[]) +{ + //const char *myself = (const char *)argv[0]; + char linkname[MAX_PATH*2+6]; + char filename[MAX_PATH]; + int linksize; + int i = 0, kill_timeout = 15; + pid_t pid; + + //dbg_time("%s", file); + while (file[i]) { + if (!isdigit(file[i])) + break; + i++; + } + + if (file[i]) { + //dbg_time("%s not digit", file); + return 0; + } + + snprintf(linkname, sizeof(linkname), "%s/%s/exe", dir, file); + linksize = readlink(linkname, filename, sizeof(filename)); + if (linksize <= 0) + return 0; + + filename[linksize] = 0; + + pid = atoi(file); + if (pid >= getpid()) + return 0; + + snprintf(linkname, sizeof(linkname), "%s/%s/fd", dir, file); + if (!ls_dir(linkname, is_same_linkfile, argv)) + return 0; + + dbg_time("%s/%s/exe -> %s", dir, file, filename); + while (kill_timeout-- && !kill(pid, 0)) + { + kill(pid, SIGTERM); + sleep(1); + } + if (!kill(pid, 0)) + { + dbg_time("force kill %s/%s/exe -> %s", dir, file, filename); + kill(pid, SIGKILL); + sleep(1); + } + + return 1; +} + +static int kill_brothers(const char *qmichannel) +{ + char myself[MAX_PATH]; + int filenamesize; + void *argv[2] = {myself, (void *)qmichannel}; + + filenamesize = readlink("/proc/self/exe", myself, sizeof(myself)); + if (filenamesize <= 0) + return 0; + myself[filenamesize] = 0; + + if (ls_dir("/proc", is_brother_process, argv)) + sleep(1); + + return 0; +} + +static int kill_data_call_pdp(int pdp, char *self) { + int pid; + char *p = NULL; + + p = self; + while (*self) { + if (*self == '/') + p = self+1; + self++; + } + + pid = getpid_by_pdp(pdp, p); + if (pid > 0) { + dbg_time("send SIGINT to process %d", pid); + return kill(pid, SIGINT); + } + + return -1; +} + +static void ql_sigaction(int signo) { + if (SIGALRM == signo) + send_signo_to_main(SIG_EVENT_START); + else + { + g_donot_exit_when_modem_hangup = 0; + send_signo_to_main(SIG_EVENT_STOP); + main_send_event_to_qmidevice(SIG_EVENT_STOP); //main may be wating qmi response + } +} + +static int usage(const char *progname) { + dbg_time("Usage: %s [options]", progname); + dbg_time("-s [apn [user password auth]] Set apn/user/password/auth get from your network provider. auth: 1~pap, 2~chap, 3~MsChapV2"); + dbg_time("-p pincode Verify sim card pin if sim card is locked"); + dbg_time("-p [quectel-][qmi|mbim]-proxy Request to use proxy"); + dbg_time("-f logfilename Save log message of this program to file"); + dbg_time("-u usbmonlog filename Save usbmon log to file"); + dbg_time("-i interface Specify which network interface to setup data call when multi-modems exits"); + dbg_time("-4 Setup IPv4 data call (default)"); + dbg_time("-6 Setup IPv6 data call"); + dbg_time("-n pdn Specify which pdn to setup data call (default 1 for QMI, 0 for MBIM)"); + dbg_time("-k pdn Specify which pdn to hangup data call (by send SIGINT to 'quectel-CM -n pdn')"); + dbg_time("-m iface-idx Bind QMI data call to wwan0_ when QMAP used. E.g '-n 7 -m 1' bind pdn-7 data call to wwan0_1"); + dbg_time("-b Enable network interface bridge function (default 0)"); + dbg_time("-v Verbose log mode, for debug purpose."); + dbg_time("[Examples]"); + dbg_time("Example 1: %s ", progname); + dbg_time("Example 2: %s -s 3gnet ", progname); + dbg_time("Example 3: %s -s 3gnet carl 1234 1 -p 1234 -f gobinet_log.txt", progname); + return 0; +} + +static int qmi_main(PROFILE_T *profile) +{ + int triger_event = 0; + int signo; +#ifdef CONFIG_SIM + SIM_Status SIMStatus = SIM_ABSENT; +#endif + UCHAR PSAttachedState = 0; + UCHAR IPv4ConnectionStatus = QWDS_PKT_DATA_UNKNOW; + UCHAR IPv6ConnectionStatus = QWDS_PKT_DATA_UNKNOW; + unsigned SetupCallFail = 0; + unsigned long SetupCallAllowTime = clock_msec(); +#ifdef REBOOT_SIM_CARD_WHEN_LONG_TIME_NO_PS + unsigned PsAttachFail = 0; + unsigned long PsAttachTime = clock_msec(); +#endif + int qmierr = 0; + const struct request_ops *request_ops = profile ->request_ops; + pthread_t gQmiThreadID = 0; + +//sudo apt-get install udhcpc +//sudo apt-get remove ModemManager + if (profile->reattach_flag) { + if (!reattach_driver(profile)) + sleep(2); + } + + /* try to recreate FDs*/ + if (socketpair( AF_LOCAL, SOCK_STREAM, 0, signal_control_fd) < 0 ) { + dbg_time("%s Faild to create main_control_fd: %d (%s)", __func__, errno, strerror(errno)); + return -1; + } + + if ( socketpair( AF_LOCAL, SOCK_STREAM, 0, qmidevice_control_fd ) < 0 ) { + dbg_time("%s Failed to create thread control socket pair: %d (%s)", __func__, errno, strerror(errno)); + return 0; + } + + if ((profile->qmap_mode == 0 || profile->qmap_mode == 1) + && (!profile->proxy[0] || strstr(profile->qmichannel, "_IPCR"))) { + kill_brothers(profile->qmichannel); + } + + if (pthread_create( &gQmiThreadID, 0, profile->qmi_ops->read, (void *)profile) != 0) { + dbg_time("%s Failed to create QMIThread: %d (%s)", __func__, errno, strerror(errno)); + return 0; + } + + if ((read(qmidevice_control_fd[0], &triger_event, sizeof(triger_event)) != sizeof(triger_event)) + || (triger_event != RIL_INDICATE_DEVICE_CONNECTED)) { + dbg_time("%s Failed to init QMIThread: %d (%s)", __func__, errno, strerror(errno)); + return 0; + } + + if (profile->qmi_ops->init && profile->qmi_ops->init(profile)) { + dbg_time("%s Failed to qmi init: %d (%s)", __func__, errno, strerror(errno)); + return 0; + } + + if (request_ops->requestBaseBandVersion) + request_ops->requestBaseBandVersion(profile); + + if (request_ops->requestSetEthMode) + request_ops->requestSetEthMode(profile); + + if (request_ops->requestSetLoopBackState && profile->loopback_state) { + qmierr = request_ops->requestSetLoopBackState(profile->loopback_state, profile->replication_factor); + if (qmierr != QMI_ERR_INVALID_QMI_CMD) //X20 return this error + profile->loopback_state = 0; //wait for RIL_UNSOL_LOOPBACK_CONFIG_IND + } + + if (request_ops->requestGetSIMStatus) { + qmierr = request_ops->requestGetSIMStatus(&SIMStatus); + + while (qmierr == QMI_ERR_OP_DEVICE_UNSUPPORTED) { + sleep(1); + qmierr = request_ops->requestGetSIMStatus(&SIMStatus); + } + + if ((SIMStatus == SIM_PIN) && profile->pincode && request_ops->requestEnterSimPin) { + request_ops->requestEnterSimPin(profile->pincode); + } + } + + if (SIMStatus == SIM_READY) { + if (request_ops->requestGetICCID) + request_ops->requestGetICCID(); + + if (request_ops->requestGetIMSI) + request_ops->requestGetIMSI(); + } + + if (request_ops->requestGetProfile) + request_ops->requestGetProfile(profile); + + if (request_ops->requestSetProfile && (profile->apn || profile->user || profile->password)) { + if (request_ops->requestSetProfile(profile) == 1) { +#ifdef REBOOT_SIM_CARD_WHEN_APN_CHANGE //enable at only when customer asked + if (request_ops->requestRadioPower) { + request_ops->requestRadioPower(0); + request_ops->requestRadioPower(1); + } +#endif + } + } + + request_ops->requestRegistrationState(&PSAttachedState); + +#ifdef CONFIG_ENABLE_QOS + request_ops->requestRegisterQos(profile); +#endif + + send_signo_to_main(SIG_EVENT_CHECK); + + while (1) + { + struct pollfd pollfds[] = {{signal_control_fd[1], POLLIN, 0}, {qmidevice_control_fd[0], POLLIN, 0}}; + int ne, ret, nevents = sizeof(pollfds)/sizeof(pollfds[0]); + + do { + ret = poll(pollfds, nevents, 15*1000); + } while ((ret < 0) && (errno == EINTR)); + + if (ret == 0) + { + send_signo_to_main(SIG_EVENT_CHECK); + continue; + } + + if (ret <= 0) { + dbg_time("%s poll=%d, errno: %d (%s)", __func__, ret, errno, strerror(errno)); + goto __main_quit; + } + + for (ne = 0; ne < nevents; ne++) { + int fd = pollfds[ne].fd; + short revents = pollfds[ne].revents; + + if (revents & (POLLERR | POLLHUP | POLLNVAL)) { + dbg_time("%s poll err/hup", __func__); + dbg_time("epoll fd = %d, events = 0x%04x", fd, revents); + main_send_event_to_qmidevice(RIL_REQUEST_QUIT); + if (revents & POLLHUP) + goto __main_quit; + } + + if ((revents & POLLIN) == 0) + continue; + + if (fd == signal_control_fd[1]) + { + if (read(fd, &signo, sizeof(signo)) == sizeof(signo)) + { + alarm(0); + switch (signo) + { + case SIG_EVENT_START: + if (PSAttachedState != 1 && profile->loopback_state == 0) + break; + + if (SetupCallAllowTime > clock_msec()) { + alarm((SetupCallAllowTime - clock_msec()+999)/1000); + break; + } + + if (profile->enable_ipv4 && IPv4ConnectionStatus != QWDS_PKT_DATA_CONNECTED) { + qmierr = request_ops->requestSetupDataCall(profile, IpFamilyV4); + + if ((qmierr > 0) && profile->user && profile->user[0] && profile->password && profile->password[0]) { + int old_auto = profile->auth; + + //may be fail because wrong auth mode, try pap->chap, or chap->pap + profile->auth = (profile->auth == 1) ? 2 : 1; + qmierr = request_ops->requestSetupDataCall(profile, IpFamilyV4); + + if (qmierr) + profile->auth = old_auto; //still fail, restore old auth moe + } + + if (!qmierr) { + qmierr = request_ops->requestGetIPAddress(profile, IpFamilyV4); + if (!qmierr) + IPv4ConnectionStatus = QWDS_PKT_DATA_CONNECTED; + } + + } + + if (profile->enable_ipv6 && IPv6ConnectionStatus != QWDS_PKT_DATA_CONNECTED) { + if (profile->enable_ipv4 && profile->request_ops != &qmi_request_ops) { + IPv6ConnectionStatus = IPv4ConnectionStatus; + } + else { + qmierr = request_ops->requestSetupDataCall(profile, IpFamilyV6); + + if (!qmierr) { + qmierr = request_ops->requestGetIPAddress(profile, IpFamilyV6); + if (!qmierr) + IPv6ConnectionStatus = QWDS_PKT_DATA_CONNECTED; + } + } + } + + if ((profile->enable_ipv4 && IPv4ConnectionStatus == QWDS_PKT_DATA_DISCONNECTED) + || (profile->enable_ipv6 && IPv6ConnectionStatus == QWDS_PKT_DATA_DISCONNECTED)) { + const unsigned allow_time[] = {5, 10, 20, 40, 60}; + + if (SetupCallFail < (sizeof(allow_time)/sizeof(unsigned))) + SetupCallAllowTime = allow_time[SetupCallFail]; + else + SetupCallAllowTime = 60; + SetupCallFail++; + dbg_time("try to requestSetupDataCall %ld second later", SetupCallAllowTime); + alarm(SetupCallAllowTime); + SetupCallAllowTime = SetupCallAllowTime*1000 + clock_msec(); + } + else if (IPv4ConnectionStatus == QWDS_PKT_DATA_CONNECTED || IPv6ConnectionStatus == QWDS_PKT_DATA_CONNECTED) { + SetupCallFail = 0; + SetupCallAllowTime = clock_msec(); + } + break; + + case SIG_EVENT_CHECK: + if (request_ops->requestGetSignalInfo) + request_ops->requestGetSignalInfo(); + + if (request_ops->requestGetCellInfoList) + request_ops->requestGetCellInfoList(); + + if (request_ops->requestGetCoexWWANState) + request_ops->requestGetCoexWWANState(); + + if (PSAttachedState != 1) + request_ops->requestRegistrationState(&PSAttachedState); + +#ifdef REBOOT_SIM_CARD_WHEN_LONG_TIME_NO_PS + if (PSAttachedState) { + PsAttachTime = clock_msec(); + PsAttachFail = 0; + } + else { + unsigned long diff = (clock_msec() - PsAttachTime) / 1000; + unsigned long threshold = REBOOT_SIM_CARD_WHEN_LONG_TIME_NO_PS << PsAttachFail; + + if (diff > threshold || diff > 960) { + //interval time is 60 -> 120 - > 240 - > 480 -> 960 + PsAttachTime = clock_msec(); + PsAttachFail++; + + if (request_ops->requestRadioPower) { + request_ops->requestRadioPower(0); + request_ops->requestRadioPower(1); + } + } + } +#endif + + if (profile->enable_ipv4 && IPv4ConnectionStatus != QWDS_PKT_DATA_DISCONNECTED + && !request_ops->requestQueryDataCall(&IPv4ConnectionStatus, IpFamilyV4)) + { + if (QWDS_PKT_DATA_CONNECTED == IPv4ConnectionStatus && profile->ipv4.Address == 0) { + //killall -9 quectel-CM for MBIM and ATC call + qmierr = request_ops->requestGetIPAddress(profile, IpFamilyV4); + if (qmierr) + IPv4ConnectionStatus = QWDS_PKT_DATA_DISCONNECTED; + } + + //local ip is different with remote ip + if (QWDS_PKT_DATA_CONNECTED == IPv4ConnectionStatus && check_ipv4_address(profile) == 0) { + request_ops->requestDeactivateDefaultPDP(profile, IpFamilyV4); + IPv4ConnectionStatus = QWDS_PKT_DATA_DISCONNECTED; + } + } + else { + IPv4ConnectionStatus = QWDS_PKT_DATA_DISCONNECTED; + } + + if (profile->enable_ipv6 && IPv6ConnectionStatus != QWDS_PKT_DATA_DISCONNECTED) { + if (profile->enable_ipv4 && profile->request_ops != &qmi_request_ops) { + IPv6ConnectionStatus = IPv4ConnectionStatus; + } + else { + request_ops->requestQueryDataCall(&IPv6ConnectionStatus, IpFamilyV6); + } + } + else { + IPv6ConnectionStatus = QWDS_PKT_DATA_DISCONNECTED; + } + + if (IPv4ConnectionStatus == QWDS_PKT_DATA_DISCONNECTED && IPv6ConnectionStatus == QWDS_PKT_DATA_DISCONNECTED) { + usbnet_link_change(0, profile); + } + else if (IPv4ConnectionStatus == QWDS_PKT_DATA_CONNECTED || IPv6ConnectionStatus == QWDS_PKT_DATA_CONNECTED) { + int link = 0; + if (IPv4ConnectionStatus == QWDS_PKT_DATA_CONNECTED) + link |= (1<enable_ipv4 && IPv4ConnectionStatus == QWDS_PKT_DATA_DISCONNECTED) + || (profile->enable_ipv6 && IPv6ConnectionStatus == QWDS_PKT_DATA_DISCONNECTED)) { + send_signo_to_main(SIG_EVENT_START); + } + break; + + case SIG_EVENT_STOP: + if (profile->enable_ipv4 && IPv4ConnectionStatus == QWDS_PKT_DATA_CONNECTED) { + request_ops->requestDeactivateDefaultPDP(profile, IpFamilyV4); + } + if (profile->enable_ipv6 && IPv6ConnectionStatus == QWDS_PKT_DATA_CONNECTED) { + if (profile->enable_ipv4 && profile->request_ops != &qmi_request_ops) { + + } + else { + request_ops->requestDeactivateDefaultPDP(profile, IpFamilyV6); + } + } + usbnet_link_change(0, profile); + if (profile->qmi_ops->deinit) + profile->qmi_ops->deinit(); + main_send_event_to_qmidevice(RIL_REQUEST_QUIT); + goto __main_quit; + break; + + default: + break; + } + } + } + + if (fd == qmidevice_control_fd[0]) { + if (read(fd, &triger_event, sizeof(triger_event)) == sizeof(triger_event)) { + switch (triger_event) { + case RIL_INDICATE_DEVICE_DISCONNECTED: + usbnet_link_change(0, profile); + goto __main_quit; + break; + + case RIL_UNSOL_RESPONSE_VOICE_NETWORK_STATE_CHANGED: + request_ops->requestRegistrationState(&PSAttachedState); + if (PSAttachedState == 1) { + if ((profile->enable_ipv4 && IPv4ConnectionStatus == QWDS_PKT_DATA_DISCONNECTED) + || (profile->enable_ipv6 && IPv6ConnectionStatus == QWDS_PKT_DATA_DISCONNECTED)) { + send_signo_to_main(SIG_EVENT_START); + } + } else { + SetupCallAllowTime = clock_msec(); + } + break; + + case RIL_UNSOL_DATA_CALL_LIST_CHANGED: + if (IPv4ConnectionStatus == QWDS_PKT_DATA_CONNECTED || IPv6ConnectionStatus == QWDS_PKT_DATA_CONNECTED) { + SetupCallAllowTime = clock_msec() + 1000; //from connect -> disconnect, do not re-dail immediately, wait network stable + } + send_signo_to_main(SIG_EVENT_CHECK); + break; + + case MODEM_REPORT_RESET_EVENT: + { + dbg_time("main recv MODEM RESET SIGNAL"); + main_send_event_to_qmidevice(RIL_REQUEST_QUIT); + g_donot_exit_when_modem_hangup = 1; + goto __main_quit; + } + break; + + case RIL_UNSOL_LOOPBACK_CONFIG_IND: + { + QMI_WDA_SET_LOOPBACK_CONFIG_IND_MSG SetLoopBackInd; + if (read(fd, &SetLoopBackInd, sizeof(SetLoopBackInd)) == sizeof(SetLoopBackInd)) { + profile->loopback_state = SetLoopBackInd.loopback_state.TLVVaule; + profile->replication_factor = le32_to_cpu(SetLoopBackInd.replication_factor.TLVVaule); + dbg_time("SetLoopBackInd: loopback_state=%d, replication_factor=%u", + profile->loopback_state, profile->replication_factor); + if (profile->loopback_state) + send_signo_to_main(SIG_EVENT_START); + } + } + break; +#ifdef CONFIG_REG_QOS_IND + case RIL_UNSOL_GLOBAL_QOS_FLOW_IND_QOS_ID: + { + UINT qos_id = 0; + if (read(fd, &qos_id, sizeof(qos_id)) == sizeof(qos_id)) { + profile->qos_id = qos_id; + } + } + break; +#endif + default: + break; + } + } + } + } + } + +__main_quit: + usbnet_link_change(0, profile); + if (gQmiThreadID && pthread_join(gQmiThreadID, NULL)) { + dbg_time("%s Error joining to listener thread (%s)", __func__, strerror(errno)); + } + + close(signal_control_fd[0]); + close(signal_control_fd[1]); + close(qmidevice_control_fd[0]); + close(qmidevice_control_fd[1]); + dbg_time("%s exit", __func__); + + return 0; +} + +static int quectel_CM(PROFILE_T *profile) +{ + int ret = 0; + char qmichannel[32] = {'\0'}; + char usbnet_adapter[32] = {'\0'}; + + if (profile->expect_adapter[0]) + strncpy(usbnet_adapter, profile->expect_adapter, sizeof(usbnet_adapter)); + + if (qmidevice_detect(qmichannel, usbnet_adapter, sizeof(qmichannel), profile)) { + profile->hardware_interface = HARDWARE_USB; + } + else if (mhidevice_detect(qmichannel, usbnet_adapter, profile)) { + profile->hardware_interface = HARDWARE_PCIE; + } + else if (atdevice_detect(qmichannel, usbnet_adapter, profile)) { + profile->hardware_interface = HARDWARE_PCIE; + } +#ifdef CONFIG_QRTR + else if (1) { + strcpy(qmichannel, "qrtr"); + strcpy(usbnet_adapter, "rmnet_mhi0"); + profile->hardware_interface = HARDWARE_PCIE; + profile->software_interface = SOFTWARE_QRTR; + } +#endif + else { + dbg_time("qmidevice_detect failed"); + goto error; + } + + strncpy(profile->qmichannel, qmichannel, sizeof(profile->qmichannel)); + strncpy(profile->usbnet_adapter, usbnet_adapter, sizeof(profile->usbnet_adapter)); + ql_get_netcard_driver_info(profile->usbnet_adapter); + + if ((profile->hardware_interface == HARDWARE_USB) && profile->usblogfile) + ql_capture_usbmon_log(profile, profile->usblogfile); + + if (profile->hardware_interface == HARDWARE_USB) { + profile->software_interface = get_driver_type(profile); + } + + ql_qmap_mode_detect(profile); + + if (profile->software_interface == SOFTWARE_MBIM) { + dbg_time("Modem works in MBIM mode"); + profile->request_ops = &mbim_request_ops; + profile->qmi_ops = &mbim_dev_ops; + ret = qmi_main(profile); + } + else if (profile->software_interface == SOFTWARE_QMI) { + dbg_time("Modem works in QMI mode"); + profile->request_ops = &qmi_request_ops; + if (qmidev_is_gobinet(profile->qmichannel)) + profile->qmi_ops = &gobi_qmidev_ops; + else + profile->qmi_ops = &qmiwwan_qmidev_ops; + qmidev_send = profile->qmi_ops->send; + ret = qmi_main(profile); + } + else if (profile->software_interface == SOFTWARE_ECM_RNDIS_NCM) { + dbg_time("Modem works in ECM_RNDIS_NCM mode"); + profile->request_ops = &atc_request_ops; + profile->qmi_ops = &atc_dev_ops; + ret = qmi_main(profile); + } +#ifdef CONFIG_QRTR + else if (profile->software_interface == SOFTWARE_QRTR) { + dbg_time("Modem works in QRTR mode"); + profile->request_ops = &qmi_request_ops; + profile->qmi_ops = &qrtr_qmidev_ops; + qmidev_send = profile->qmi_ops->send; + ret = qmi_main(profile); + } +#endif + else { + dbg_time("unsupport software_interface %d", profile->software_interface); + } + + ql_stop_usbmon_log(profile); + +error: + return ret; +} + +static int parse_user_input(int argc, char **argv, PROFILE_T *profile) { + int opt = 1; + + profile->pdp = CONFIG_DEFAULT_PDP; + + if (!strcmp(argv[argc-1], "&")) + argc--; + +#define has_more_argv() ((opt < argc) && (argv[opt][0] != '-')) + while (opt < argc) { + if (argv[opt][0] != '-') { + return usage(argv[0]); + } + + switch (argv[opt++][1]) + { + case 's': + profile->apn = profile->user = profile->password = ""; + if (has_more_argv()) { + profile->apn = argv[opt++]; + } + if (has_more_argv()) { + profile->user = argv[opt++]; + } + if (has_more_argv()) { + profile->password = argv[opt++]; + if (profile->password && profile->password[0]) + profile->auth = 2; //default chap, customers may miss auth + } + if (has_more_argv()) { + const char *auth = argv[opt++]; + + if (!strcmp(auth, "0") || !strcasecmp(auth, "none")) { + profile->auth = 0; + } + else if (!strcmp(auth, "1") || !strcasecmp(auth, "pap")) { + profile->auth = 1; + } + else if (!strcmp(auth, "2") || !strcasecmp(auth, "chap")) { + profile->auth = 2; + } + else if (!strcmp(auth, "3") || !strcasecmp(auth, "MsChapV2")) { + profile->auth = 3; + } + else { + dbg_time("unknow auth '%s'", auth); + return usage(argv[0]); + } + } + break; + + case 'p': + if (has_more_argv()) { + const char *arg = argv[opt++]; + + if (!strcmp(arg, QUECTEL_QMI_PROXY) || !strcmp(arg, QUECTEL_MBIM_PROXY) + || !strcmp(arg, LIBQMI_PROXY) || !strcmp(arg, LIBMBIM_PROXY) || !strcmp(arg, QUECTEL_ATC_PROXY)) { + strncpy(profile->proxy, arg, sizeof(profile->proxy) - 1); + } + else if ((999 < atoi(arg)) && (atoi(arg) < 10000)) { + profile->pincode = arg; + } + else { + dbg_time("unknow -p '%s'", arg); + return usage(argv[0]); + } + } + break; + + case 'm': + if (has_more_argv()) + profile->muxid = argv[opt++][0] - '0' + 0x80; + break; + + case 'n': + if (has_more_argv()) + profile->pdp = argv[opt++][0] - '0'; + break; + + case 'f': + if (has_more_argv()) { + profile->logfile = argv[opt++]; + } + break; + + case 'i': + if (has_more_argv()) { + strncpy(profile->expect_adapter, argv[opt++], sizeof(profile->expect_adapter) - 1); + } + break; + + case 'v': + debug_qmi = 1; + break; + + case 'l': + if (has_more_argv()) { + profile->replication_factor = atoi(argv[opt++]); + if (profile->replication_factor > 0) { + profile->loopback_state = 1; + } + } + break; + + case '4': + profile->enable_ipv4 = 1; + break; + + case '6': + profile->enable_ipv6 = 1; + break; + + case 'u': + if (has_more_argv()) { + profile->usblogfile = argv[opt++]; + } + break; + + case 'b': + profile->enable_bridge = 1; + break; + + case 'k': + if (has_more_argv()) { + profile->kill_pdp = argv[opt++][0] - '0'; + } + break; + + default: + return usage(argv[0]); + break; + } + } + + if (profile->enable_ipv4 != 1 && profile->enable_ipv6 != 1) { // default enable IPv4 + profile->enable_ipv4 = 1; + } + + return 1; +} + +int main(int argc, char *argv[]) +{ + int ret; + PROFILE_T *ctx = &s_profile; + + dbg_time("QConnectManager_Linux_V1.6.4"); + + ret = parse_user_input(argc, argv, ctx); + if (!ret) + return ret; + + if (ctx->kill_pdp) { + return kill_data_call_pdp(ctx->kill_pdp, argv[0]); + } + + if (ctx->logfile) { + logfilefp = fopen(ctx->logfile, "a+"); + if (!logfilefp) { + dbg_time("Fail to open %s, errno: %d(%s)", ctx->logfile, errno, strerror(errno)); + } + } + + signal(SIGINT, ql_sigaction); + signal(SIGTERM, ql_sigaction); + signal(SIGALRM, ql_sigaction); + + do { + ret = quectel_CM(ctx); + if (g_donot_exit_when_modem_hangup > 0) + sleep(3); + } while (g_donot_exit_when_modem_hangup > 0); + + if (logfilefp) { + fclose(logfilefp); + } + + return ret; +} diff --git a/package/wwan/driver/quectel_cm_5G/src/mbim-cm.c b/package/wwan/driver/quectel_cm_5G/src/mbim-cm.c new file mode 100644 index 000000000..836025ab7 --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/mbim-cm.c @@ -0,0 +1,2426 @@ +/****************************************************************************** + @file mbim-cm.c + @brief MIBIM drivers. + + DESCRIPTION + Connectivity Management Tool for USB network adapter of Quectel wireless cellular modules. + + INITIALIZATION AND SEQUENCING REQUIREMENTS + None. + + --------------------------------------------------------------------------- + Copyright (c) 2016 - 2020 Quectel Wireless Solution, Co., Ltd. All Rights Reserved. + Quectel Wireless Solution Proprietary and Confidential. + --------------------------------------------------------------------------- +******************************************************************************/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "QMIThread.h" + +#define mbim_debug dbg_time + +#define UUID_BASIC_CONNECT "a289cc33-bcbb-8b4f-b6b0-133ec2aae6df" +//https://docs.microsoft.com/en-us/windows-hardware/drivers/network/mb-5g-data-class-support +#define UUID_BASIC_CONNECT_EXT "3d01dcc5-fef5-4d05-0d3a-bef7058e9aaf" +#define UUID_SMS "533fbeeb-14fe-4467-9f90-33a223e56c3f" +#define UUID_USSD "e550a0c8-5e82-479e-82f7-10abf4c3351f" +#define UUID_PHONEBOOK "4bf38476-1e6a-41db-b1d8-bed289c25bdb" +#define UUID_STK "d8f20131-fcb5-4e17-8602-d6ed3816164c" +#define UUID_AUTH "1d2b5ff7-0aa1-48b2-aa52-50f15767174e" +#define UUID_DSS "c08a26dd-7718-4382-8482-6e0d583c4d0e" +#define uuid_ext_qmux "d1a30bc2-f97a-6e43-bf65-c7e24fb0f0d3" +#define uuid_mshsd "883b7c26-985f-43fa-9804-27d7fb80959c" +#define uuid_qmbe "2d0c12c9-0e6a-495a-915c-8d174fe5d63c" +#define UUID_MSFWID "e9f7dea2-feaf-4009-93ce-90a3694103b6" +#define uuid_atds "5967bdcc-7fd2-49a2-9f5c-b2e70e527db3" +#define uuid_qdu "6427015f-579d-48f5-8c54-f43ed1e76f83" +#define UUID_MS_UICC_LOW_LEVEL "c2f6588e-f037-4bc9-8665-f4d44bd09367" +#define UUID_MS_SARControl "68223D04-9F6C-4E0F-822D-28441FB72340" +#define UUID_VOICEEXTENSIONS "8d8b9eba-37be-449b-8f1e-61cb034a702e" +#define UUID_LIBMBIM_PROXY "838cf7fb-8d0d-4d7f-871e-d71dbefbb39b" + +#define UUID_MBIMContextTypeInternet "7E5E2A7E-4E6F-7272-736B-656E7E5E2A7E" + +typedef unsigned char UINT8; +typedef unsigned short UINT16; +typedef unsigned int UINT32; +typedef unsigned long long UINT64; + +#pragma pack(4) +typedef enum { + MBIM_CID_CMD_TYPE_QUERY = 0, + MBIM_CID_CMD_TYPE_SET = 1, +} MBIM_CID_CMD_TYPE_E; + +typedef enum { + MBIM_CID_DEVICE_CAPS = 1, + MBIM_CID_SUBSCRIBER_READY_STATUS = 2, + MBIM_CID_RADIO_STATE = 3, MBIM_CID_PIN = 4, + MBIM_CID_PIN_LIS = 5, + MBIM_CID_HOME_PROVIDER = 6, + MBIM_CID_PREFERRED_PROVIDERS = 7, + MBIM_CID_VISIBLE_PROVIDERS = 8, + MBIM_CID_REGISTER_STATE = 9, + MBIM_CID_PACKET_SERVICE = 10, + MBIM_CID_SIGNAL_STATE = 11, + MBIM_CID_CONNECT = 12, + MBIM_CID_PROVISIONED_CONTEXTS = 13, + MBIM_CID_SERVICE_ACTIVATION = 14, + MBIM_CID_IP_CONFIGURATION = 15, + MBIM_CID_DEVICE_SERVICES = 16, + MBIM_CID_DEVICE_SERVICE_SUBSCRIBE_LIST = 19, + MBIM_CID_PACKET_STATISTICS = 20, + MBIM_CID_NETWORK_IDLE_HINT = 21, + MBIM_CID_EMERGENCY_MODE = 22, + MBIM_CID_IP_PACKET_FILTERS = 23, + MBIM_CID_MULTICARRIER_PROVIDERS = 24, +} UUID_BASIC_CONNECT_CID_E; + +typedef enum{ + MBIM_CID_MS_PROVISIONED_CONTEXT_V2 = 1, + MBIM_CID_MS_NETWORK_BLACKLIST = 2, + MBIM_CID_MS_LTE_ATTACH_CONFIG = 3, + MBIM_CID_MS_LTE_ATTACH_STATUS = 4, + MBIM_CID_MS_SYS_CAPS = 5, + MBIM_CID_MS_DEVICE_CAPS_V2 = 6, + MBIM_CID_MS_DEVICE_SLOT_MAPPING = 7, + MBIM_CID_MS_SLOT_INFO_STATUS = 8, + MBIM_CID_MS_PCO = 9, + MBIM_CID_MS_DEVICE_RESET = 10, + MBIM_CID_MS_BASE_STATIONS_INFO = 11, + MBIM_CID_MS_LOCATION_INFO_STATUS = 12, + MBIM_CID_NOT_DEFINED = 13, + MBIM_CID_MS_PIN_EX = 14, + MBIM_CID_MS_VERSION = 15, +} UUID_BASIC_CONNECT_EXT_CID_E; + +typedef enum { + MBIM_CID_SMS_CONFIGURATION = 1, // Y Y Y + MBIM_CID_SMS_READ = 2, // N Y Y + MBIM_CID_SMS_SEND = 3, // Y N N + MBIM_CID_SMS_DELETE = 4, // Y N N + MBIM_CID_SMS_MESSAGE_STORE_STATUS = 5, // N Y Y +} UUID_SMS_CID_E; + +typedef enum { + MBIM_CID_DSS_CONNECT = 1, // Y N N +} UUID_DSS_CID_E; + +typedef enum{ + MBIM_OPEN_MSG = 1, + MBIM_CLOSE_MSG = 2, + MBIM_COMMAND_MSG = 3, + MBIM_HOST_ERROR_MSG = 4, + MBIM_OPEN_DONE = 0x80000001, + MBIM_CLOSE_DONE = 0x80000002, + MBIM_COMMAND_DONE = 0x80000003, + MBIM_FUNCTION_ERROR_MSG = 0x80000004, + MBIM_INDICATE_STATUS_MSG = 0x80000007, +} MBIM_MSG_Type_E; + +typedef enum { /*< since=1.10 >*/ + MBIM_CID_PROXY_CONTROL_UNKNOWN = 0, + MBIM_CID_PROXY_CONTROL_CONFIGURATION = 1 +} UUID_LIBMBIM_PROXY_CID_E; + +typedef enum { + MBIM_CID_MS_UICC_ATR = 1, + MBIM_CID_MS_UICC_OPEN_CHANNEL = 2, + MBIM_CID_MS_UICC_CLOSE_CHANNEL = 3, + MBIM_CID_MS_UICC_APDU = 4, + MBIM_CID_MS_UICC_TERMINAL_CAPABILITY = 5, + MBIM_CID_MS_UICC_RESET = 6, + MBIM_CID_MS_APP_LIST = 7, +} UUID_MS_UICC_CID_E; + +typedef enum { + MBIM_ERROR_TIMEOUT_FRAGMENT = 1, + MBIM_ERROR_FRAGMENT_OUT_OF_SEQUENCE = 2, + MBIM_ERROR_LENGTH_MISMATCH = 3, + MBIM_ERROR_DUPLICATED_TID = 4, + MBIM_ERROR_NOT_OPENED = 5, + MBIM_ERROR_UNKNOWN = 6, + MBIM_ERROR_CANCEL = 7, + MBIM_ERROR_MAX_TRANSFER = 8, +} MBIM_ERROR_E; + +typedef enum { + MBIM_STATUS_SUCCESS = 0, + MBIM_STATUS_BUSY = 1, + MBIM_STATUS_FAILURE = 2, + MBIM_STATUS_SIM_NOT_INSERTED = 3, + MBIM_STATUS_BAD_SIM = 4, + MBIM_STATUS_PIN_REQUIRED = 5, + MBIM_STATUS_PIN_DISABLED = 6, + MBIM_STATUS_NOT_REGISTERED = 7, + MBIM_STATUS_PROVIDERS_NOT_FOUND = 8, + MBIM_STATUS_NO_DEVICE_SUPPORT = 9, + MBIM_STATUS_PROVIDER_NOT_VISIBLE = 10, + MBIM_STATUS_DATA_CLASS_NOT_AVAILABL = 11, + MBIM_STATUS_PACKET_SERVICE_DETACHED = 12, +} MBIM_STATUS_CODES_E; + +typedef enum { + MBIMPacketServiceActionAttach = 0, + MBIMPacketServiceActionDetach = 1, +} MBIM_PACKET_SERVICE_ACTION_E; + +typedef enum { + MBIMPacketServiceStateUnknown = 0, + MBIMPacketServiceStateAttaching = 1, + MBIMPacketServiceStateAttached = 2, + MBIMPacketServiceStateDetaching = 3, + MBIMPacketServiceStateDetached = 4, +} MBIM_PACKET_SERVICE_STATE_E; + +static const char *MBIMPacketServiceStateStr(int _val) { + struct { int val;char *name;} _enumstr[] = { + {MBIMPacketServiceStateUnknown, "Unknown"}, + {MBIMPacketServiceStateAttaching, "Attaching"}, + {MBIMPacketServiceStateAttached, "Attached"}, + {MBIMPacketServiceStateDetaching, "Detaching"}, + {MBIMPacketServiceStateDetached, "Detached"}, + }; + int idx; + + for (idx = 0; idx < (int)(sizeof(_enumstr)/sizeof(_enumstr[0])); idx++) { + if (_val == _enumstr[idx].val) + return _enumstr[idx].name; + } + + return "Undefined"; +}; + +typedef enum { + MBIMDataClassNone = 0x0, + MBIMDataClassGPRS = 0x1, + MBIMDataClassEDGE = 0x2, + MBIMDataClassUMTS = 0x4, + MBIMDataClassHSDPA = 0x8, + MBIMDataClassHSUPA = 0x10, + MBIMDataClassLTE = 0x20, + MBIMDataClass5G_NSA = 0x40, + MBIMDataClass5G_SA = 0x80, + MBIMDataClass1XRTT = 0x10000, + MBIMDataClass1XEVDO = 0x20000, + MBIMDataClass1XEVDORevA = 0x40000, + MBIMDataClass1XEVDV = 0x80000, + MBIMDataClass3XRTT = 0x100000, + MBIMDataClass1XEVDORevB = 0x200000, + MBIMDataClassUMB = 0x400000, + MBIMDataClassCustom = 0x80000000, +} MBIM_DATA_CLASS_E; + +static const char *MBIMDataClassStr(int _val) { + struct { int val;char *name;} _enumstr[] = { + {MBIMDataClassNone, "None"}, + {MBIMDataClassGPRS, "GPRS"}, + {MBIMDataClassEDGE, "EDGE"}, + {MBIMDataClassUMTS, "UMTS"}, + {MBIMDataClassHSDPA, "HSDPA"}, + {MBIMDataClassHSUPA, "HSUPA"}, + {MBIMDataClassLTE, "LTE"}, + {MBIMDataClass5G_NSA, "5G_NSA"}, + {MBIMDataClass5G_SA, "5G_SA"}, + {MBIMDataClass1XRTT, "1XRTT"}, + {MBIMDataClass1XEVDO, "1XEVDO"}, + {MBIMDataClass1XEVDORevA, "1XEVDORevA"}, + {MBIMDataClass1XEVDV, "1XEVDV"}, + {MBIMDataClass3XRTT, "3XRTT"}, + {MBIMDataClass1XEVDORevB, "1XEVDORevB"}, + {MBIMDataClassUMB, "UMB"}, + {MBIMDataClassCustom, "Custom"}, + }; + int idx; + + for (idx = 0; idx < (int)(sizeof(_enumstr)/sizeof(_enumstr[0])); idx++) { + if (_val == _enumstr[idx].val) + return _enumstr[idx].name; + } + + return "Unknow"; +}; + +typedef struct { + UINT32 NwError; + UINT32 PacketServiceState; //MBIM_PACKET_SERVICE_STATE_E + UINT32 HighestAvailableDataClass; //MBIM_DATA_CLASS_E + UINT64 UplinkSpeed; + UINT64 DownlinkSpeed; +} MBIM_PACKET_SERVICE_INFO_T; + +typedef struct { + UINT32 NwError; + UINT32 PacketServiceState; //MBIM_PACKET_SERVICE_STATE_E + UINT32 CurrentDataClass; //MBIM_DATA_CLASS_E + UINT64 UplinkSpeed; + UINT64 DownlinkSpeed; + UINT32 FrequencyRange; +} MBIM_PACKET_SERVICE_INFO_V2_T; + +typedef enum { + MBIMSubscriberReadyStateNotInitialized = 0, + MBIMSubscriberReadyStateInitialized = 1, + MBIMSubscriberReadyStateSimNotInserted = 2, + MBIMSubscriberReadyStateBadSim = 3, + MBIMSubscriberReadyStateFailure = 4, + MBIMSubscriberReadyStateNotActivated = 5, + MBIMSubscriberReadyStateDeviceLocked = 6, +}MBIM_SUBSCRIBER_READY_STATE_E; + +static const char *MBIMSubscriberReadyStateStr(int _val) { + struct { int val;char *name;} _enumstr[] = { + {MBIMSubscriberReadyStateNotInitialized, "NotInitialized"}, + {MBIMSubscriberReadyStateInitialized, "Initialized"}, + {MBIMSubscriberReadyStateSimNotInserted, "NotInserted"}, + {MBIMSubscriberReadyStateBadSim, "BadSim"}, + {MBIMSubscriberReadyStateFailure, "Failure"}, + {MBIMSubscriberReadyStateNotActivated, "NotActivated"}, + {MBIMSubscriberReadyStateDeviceLocked, "DeviceLocked"}, + }; + int idx; + + for (idx = 0; idx < (int)(sizeof(_enumstr)/sizeof(_enumstr[0])); idx++) { + if (_val == _enumstr[idx].val) + return _enumstr[idx].name; + } + + return "Undefined"; +}; + +typedef struct { + UINT32 DeviceType; //MBIM_DEVICE_TYPE + UINT32 CellularClass; //MBIM_CELLULAR_CLASS + UINT32 VoiceClass; //MBIM_VOICE_CLASS + UINT32 SimClass; //MBIM_SIM_CLASS + UINT32 DataClass; //MBIM_DATA_CLASS + UINT32 SmsCaps; //MBIM_SMS_CAPS + UINT32 ControlCaps; //MBIM_CTRL_CAPS + UINT32 MaxSessions; + UINT32 CustomDataClassOffset; + UINT32 CustomDataClassSize; + UINT32 DeviceIdOffset; + UINT32 DeviceIdSize; + UINT32 FirmwareInfoOffset; + UINT32 FirmwareInfoSize; + UINT32 HardwareInfoOffset; + UINT32 HardwareInfoSize; + UINT8 DataBuffer[0]; //DeviceId FirmwareInfo HardwareInfo +} MBIM_DEVICE_CAPS_INFO_T; + +typedef enum { + MBIMRadioOff = 0, + MBIMRadioOn = 1, +} MBIM_RADIO_SWITCH_STATE_E; + +typedef struct { + MBIM_RADIO_SWITCH_STATE_E RadioState; +} MBIM_SET_RADIO_STATE_T; + +typedef struct { + MBIM_RADIO_SWITCH_STATE_E HwRadioState; + MBIM_RADIO_SWITCH_STATE_E SwRadioState; +} MBIM_RADIO_STATE_INFO_T; + +typedef enum { + MBIMReadyInfoFlagsNone, + MBIMReadyInfoFlagsProtectUniqueID, +}MBIM_UNIQUE_ID_FLAGS; + +typedef struct { + UINT32 ReadyState; + UINT32 SubscriberIdOffset; + UINT32 SubscriberIdSize; + UINT32 SimIccIdOffset; + UINT32 SimIccIdSize; + UINT32 ReadyInfo; + UINT32 ElementCount; + UINT8 *TelephoneNumbersRefList; + UINT8 *DataBuffer; +} MBIM_SUBSCRIBER_READY_STATUS_T; + +typedef enum { + MBIMRegisterActionAutomatic, + MBIMRegisterActionManual, +}MBIM_REGISTER_ACTION_E; + +typedef enum { + MBIMRegisterStateUnknown = 0, + MBIMRegisterStateDeregistered = 1, + MBIMRegisterStateSearching = 2, + MBIMRegisterStateHome = 3, + MBIMRegisterStateRoaming = 4, + MBIMRegisterStatePartner = 5, + MBIMRegisterStateDenied = 6, +}MBIM_REGISTER_STATE_E; + +typedef enum { + MBIMRegisterModeUnknown = 0, + MBIMRegisterModeAutomatic = 1, + MBIMRegisterModeManual = 2, +}MBIM_REGISTER_MODE_E; + +static const char *MBIMRegisterStateStr(int _val) { + struct { int val;char *name;} _enumstr[] ={ + {MBIMRegisterStateUnknown, "Unknown"}, + {MBIMRegisterStateDeregistered, "Deregistered"}, + {MBIMRegisterStateSearching, "Searching"}, + {MBIMRegisterStateHome, "Home"}, + {MBIMRegisterStateRoaming, "Roaming"}, + {MBIMRegisterStatePartner, "Partner"}, + {MBIMRegisterStateDenied, "Denied"}, + }; + int idx; + + for (idx = 0; idx < (int)(sizeof(_enumstr)/sizeof(_enumstr[0])); idx++) { + if (_val == _enumstr[idx].val) + return _enumstr[idx].name; + } + + return "Undefined"; +}; + +static const char *MBIMRegisterModeStr(int _val) { + struct { int val;char *name;} _enumstr[] = { + {MBIMRegisterModeUnknown, "Unknown"}, + {MBIMRegisterModeAutomatic, "Automatic"}, + {MBIMRegisterModeManual, "Manual"}, + }; + int idx; + + for (idx = 0; idx < (int)(sizeof(_enumstr)/sizeof(_enumstr[0])); idx++) { + if (_val == _enumstr[idx].val) + return _enumstr[idx].name; + } + + return "Undefined"; +}; + +typedef enum { + MBIM_REGISTRATION_NONE, + MBIM_REGISTRATION_MANUAL_SELECTION_NOT_AVAILABLE, + MBIM_REGISTRATION_PACKET_SERVICE_AUTOMATIC_ATTACH, +}MBIM_REGISTRATION_FLAGS_E; + +typedef struct { + UINT32 NwError; + UINT32 RegisterState; //MBIM_REGISTER_STATE_E + UINT32 RegisterMode; + UINT32 AvailableDataClasses; + UINT32 CurrentCellularClass; + UINT32 ProviderIdOffset; + UINT32 ProviderIdSize; + UINT32 ProviderNameOffset; + UINT32 ProviderNameSize; + UINT32 RoamingTextOffset; + UINT32 RoamingTextSize; + UINT32 RegistrationFlag; + UINT8 *DataBuffer; +} MBIM_REGISTRATION_STATE_INFO_T; + +typedef struct { + UINT32 NwError; + UINT32 RegisterState; //MBIM_REGISTER_STATE_E + UINT32 RegisterMode; + UINT32 AvailableDataClasses; + UINT32 CurrentCellularClass; + UINT32 ProviderIdOffset; + UINT32 ProviderIdSize; + UINT32 ProviderNameOffset; + UINT32 ProviderNameSize; + UINT32 RoamingTextOffset; + UINT32 RoamingTextSize; + UINT32 RegistrationFlag; + UINT32 PreferredDataClass; + UINT8 *DataBuffer; +} MBIM_REGISTRATION_STATE_INFO_V2_T; + +typedef struct { + UINT32 MessageType; //Specifies the MBIM message type. + UINT32 MessageLength; //Specifies the total length of this MBIM message in bytes. + /* Specifies the MBIM message id value. This value is used to match host sent messages with function responses. + This value must be unique among all outstanding transactions. + For notifications, the TransactionId must be set to 0 by the function */ + UINT32 TransactionId; +} MBIM_MESSAGE_HEADER; + +typedef struct { + UINT32 TotalFragments; //this field indicates how many fragments there are intotal. + UINT32 CurrentFragment; //This field indicates which fragment this message is. Values are 0 to TotalFragments?\1 +} MBIM_FRAGMENT_HEADER; + +typedef struct { + MBIM_MESSAGE_HEADER MessageHeader; + UINT32 MaxControlTransfer; +} MBIM_OPEN_MSG_T; + +typedef struct { + MBIM_MESSAGE_HEADER MessageHeader; + UINT32 Status; //MBIM_STATUS_CODES_E +} MBIM_OPEN_DONE_T; + +typedef struct { + MBIM_MESSAGE_HEADER MessageHeader; +} MBIM_CLOSE_MSG_T; + +typedef struct { + MBIM_MESSAGE_HEADER MessageHeader; + UINT32 Status; +} MBIM_CLOSE_DONE_T; + +typedef struct { + UINT8 uuid[16]; +} UUID_T; + +typedef struct { + MBIM_MESSAGE_HEADER MessageHeader; + MBIM_FRAGMENT_HEADER FragmentHeader; + UUID_T DeviceServiceId; //A 16 byte UUID that identifies the device service the following CID value applies. + UINT32 CID; //Specifies the CID that identifies the parameter being queried for + UINT32 CommandType; //0 for a query operation, 1 for a Set operation + UINT32 InformationBufferLength; //Size of the Total InformationBuffer, may be larger than current message if fragmented. + UINT8 InformationBuffer[0]; //Data supplied to device specific to the CID +} MBIM_COMMAND_MSG_T; + +typedef struct { + MBIM_MESSAGE_HEADER MessageHeader; + MBIM_FRAGMENT_HEADER FragmentHeader; + UUID_T DeviceServiceId; //A 16 byte UUID that identifies the device service the following CID value applies. + UINT32 CID; //Specifies the CID that identifies the parameter being queried for + UINT32 Status; + UINT32 InformationBufferLength; //Size of the Total InformationBuffer, may be larger than current message if fragmented. + UINT8 InformationBuffer[0]; //Data supplied to device specific to the CID +} MBIM_COMMAND_DONE_T; + +typedef struct { + MBIM_MESSAGE_HEADER MessageHeader; + UINT32 ErrorStatusCode; +} MBIM_HOST_ERROR_MSG_T; + +typedef struct { + MBIM_MESSAGE_HEADER MessageHeader; + UINT32 ErrorStatusCode; +} MBIM_FUNCTION_ERROR_MSG_T; + +typedef struct { + MBIM_MESSAGE_HEADER MessageHeader; + MBIM_FRAGMENT_HEADER FragmentHeader; + UUID_T DeviceServiceId; //A 16 byte UUID that identifies the device service the following CID value applies. + UINT32 CID; //Specifies the CID that identifies the parameter being queried for + UINT32 InformationBufferLength; //Size of the Total InformationBuffer, may be larger than current message if fragmented. + UINT8 InformationBuffer[0]; //Data supplied to device specific to the CID +} MBIM_INDICATE_STATUS_MSG_T; + +typedef struct { + UINT32 offset; + UINT32 size; +} OL_PAIR_LIST; + +typedef struct { + UUID_T DeviceServiceId; + UINT32 DssPayload; + UINT32 MaxDssInstances; + UINT32 CidCount; + UINT32 CidList[]; +} MBIM_DEVICE_SERVICE_ELEMENT_T; + +typedef struct { + UINT32 DeviceServicesCount; + UINT32 MaxDssSessions; + OL_PAIR_LIST DeviceServicesRefList[]; +} MBIM_DEVICE_SERVICES_INFO_T; + +typedef enum { + MBIMActivationCommandDeactivate = 0, + MBIMActivationCommandActivate = 1, +} MBIM_ACTIVATION_COMMAND_E; + +typedef enum { + MBIMCompressionNone = 0, + MBIMCompressionEnable = 1, +} MBIM_COMPRESSION_E; + +typedef enum { + MBIMAuthProtocolNone = 0, + MBIMAuthProtocolPap = 1, + MBIMAuthProtocolChap = 2, + MBIMAuthProtocolMsChapV2 = 3, +} MBIM_AUTH_PROTOCOL_E; + +typedef enum { + MBIMContextIPTypeDefault = 0, + MBIMContextIPTypeIPv4 = 1, + MBIMContextIPTypeIPv6 = 2, + MBIMContextIPTypeIPv4v6 = 3, + MBIMContextIPTypeIPv4AndIPv6 = 4, +} MBIM_CONTEXT_IP_TYPE_E; + +typedef enum { + MBIMActivationStateUnknown = 0, + MBIMActivationStateActivated = 1, + MBIMActivationStateActivating = 2, + MBIMActivationStateDeactivated = 3, + MBIMActivationStateDeactivating = 4, +} MBIM_ACTIVATION_STATE_E; + +typedef enum { + MBIMVoiceCallStateNone = 0, + MBIMVoiceCallStateInProgress = 1, + MBIMVoiceCallStateHangUp = 2, +} MBIM_VOICECALL_STATE_E; + +static const char *MBIMMSGTypeStr(int _val) { + struct { int val;char *name;} _enumstr[] = { + {MBIM_OPEN_MSG, "MBIM_OPEN_MSG"}, + {MBIM_CLOSE_MSG, "MBIM_CLOSE_MSG"}, + {MBIM_COMMAND_MSG, "MBIM_COMMAND_MSG"}, + {MBIM_HOST_ERROR_MSG, "MBIM_HOST_ERROR_MSG"}, + {MBIM_OPEN_DONE, "MBIM_OPEN_DONE"}, + {MBIM_CLOSE_DONE, "MBIM_CLOSE_DONE"}, + {MBIM_COMMAND_DONE, "MBIM_COMMAND_DONE"}, + {MBIM_FUNCTION_ERROR_MSG, "MBIM_FUNCTION_ERROR_MSG"}, + {MBIM_INDICATE_STATUS_MSG, "MBIM_INDICATE_STATUS_MSG"}, + }; + int idx; + + for (idx = 0; idx < (int)(sizeof(_enumstr)/sizeof(_enumstr[0])); idx++) { + if (_val == _enumstr[idx].val) + return _enumstr[idx].name; + } + + return "MBIMMSGTypeUnknow"; +}; + +static const char *MBIMContextIPTypeStr(int _val) { + struct { int val;char *name;} _enumstr[] = { + {MBIMContextIPTypeDefault, "MBIMContextIPTypeDefault"}, + {MBIMContextIPTypeIPv4, "MBIMContextIPTypeIPv4"}, + {MBIMContextIPTypeIPv6, "MBIMContextIPTypeIPv6"}, + {MBIMContextIPTypeIPv4v6, "MBIMContextIPTypeIPv4v6"}, + {MBIMContextIPTypeIPv4AndIPv6, "MBIMContextIPTypeIPv4AndIPv6"}, + }; + int idx; + + for (idx = 0; idx < (int)(sizeof(_enumstr)/sizeof(_enumstr[0])); idx++) { + if (_val == _enumstr[idx].val) + return _enumstr[idx].name; + } + + return "MBIMContextIPTypeUnknow"; +} + +static const char *MBIMActivationStateStr(int _val) { + struct { int val;char *name;} _enumstr[] = { + {MBIMActivationStateUnknown, "Unknown"}, + {MBIMActivationStateActivated, "Activated"}, + {MBIMActivationStateActivating, "Activating"}, + {MBIMActivationStateDeactivated, "Deactivated"}, + {MBIMActivationStateDeactivating, "Deactivating"}, + }; + int idx; + + for (idx = 0; idx < (int)(sizeof(_enumstr)/sizeof(_enumstr[0])); idx++) { + if (_val == _enumstr[idx].val) + return _enumstr[idx].name; + } + + return "Undefined"; +}; + +static const char *MBIMVoiceCallStateStr(int _val) { + struct { int val;char *name;} _enumstr[] = { + {MBIMVoiceCallStateNone, "None"}, + {MBIMVoiceCallStateInProgress, "InProgress"}, + {MBIMVoiceCallStateHangUp, "HangUp"}, + }; + int idx; + + for (idx = 0; idx < (int)(sizeof(_enumstr)/sizeof(_enumstr[0])); idx++) { + if (_val == _enumstr[idx].val) + return _enumstr[idx].name; + } + + return "Undefined"; +}; + +typedef struct { + const char *uuid; + UINT32 cid; + const char *name; +} UUID_CID_STR; + +static const UUID_CID_STR uuid_cid_string[] = { + {UUID_BASIC_CONNECT, MBIM_CID_DEVICE_CAPS, "MBIM_CID_DEVICE_CAPS"}, + {UUID_BASIC_CONNECT, MBIM_CID_SUBSCRIBER_READY_STATUS, "MBIM_CID_SUBSCRIBER_READY_STATUS"}, + {UUID_BASIC_CONNECT, MBIM_CID_RADIO_STATE, "MBIM_CID_RADIO_STATE"}, + {UUID_BASIC_CONNECT, MBIM_CID_PIN, "MBIM_CID_PIN"}, + {UUID_BASIC_CONNECT, MBIM_CID_PIN_LIS, "MBIM_CID_PIN_LIS"}, + {UUID_BASIC_CONNECT, MBIM_CID_HOME_PROVIDER, "MBIM_CID_HOME_PROVIDER"}, + {UUID_BASIC_CONNECT, MBIM_CID_PREFERRED_PROVIDERS, "MBIM_CID_PREFERRED_PROVIDERS"}, + {UUID_BASIC_CONNECT, MBIM_CID_VISIBLE_PROVIDERS, "MBIM_CID_VISIBLE_PROVIDERS"}, + {UUID_BASIC_CONNECT, MBIM_CID_REGISTER_STATE, "MBIM_CID_REGISTER_STATE"}, + {UUID_BASIC_CONNECT, MBIM_CID_PACKET_SERVICE, "MBIM_CID_PACKET_SERVICE"}, + {UUID_BASIC_CONNECT, MBIM_CID_SIGNAL_STATE, "MBIM_CID_SIGNAL_STATE"}, + {UUID_BASIC_CONNECT, MBIM_CID_CONNECT, "MBIM_CID_CONNECT"}, + {UUID_BASIC_CONNECT, MBIM_CID_PROVISIONED_CONTEXTS, "MBIM_CID_PROVISIONED_CONTEXTS"}, + {UUID_BASIC_CONNECT, MBIM_CID_SERVICE_ACTIVATION, "MBIM_CID_SERVICE_ACTIVATION"}, + {UUID_BASIC_CONNECT, MBIM_CID_IP_CONFIGURATION, "MBIM_CID_IP_CONFIGURATION"}, + {UUID_BASIC_CONNECT, MBIM_CID_DEVICE_SERVICES, "MBIM_CID_DEVICE_SERVICES"}, + {UUID_BASIC_CONNECT, MBIM_CID_DEVICE_SERVICE_SUBSCRIBE_LIST, "MBIM_CID_DEVICE_SERVICE_SUBSCRIBE_LIST"}, + {UUID_BASIC_CONNECT, MBIM_CID_PACKET_STATISTICS, "MBIM_CID_PACKET_STATISTICS"}, + {UUID_BASIC_CONNECT, MBIM_CID_NETWORK_IDLE_HINT, "MBIM_CID_NETWORK_IDLE_HINT"}, + {UUID_BASIC_CONNECT, MBIM_CID_EMERGENCY_MODE, "MBIM_CID_EMERGENCY_MODE"}, + {UUID_BASIC_CONNECT, MBIM_CID_IP_PACKET_FILTERS, "MBIM_CID_IP_PACKET_FILTERS"}, + {UUID_BASIC_CONNECT, MBIM_CID_MULTICARRIER_PROVIDERS, "MBIM_CID_MULTICARRIER_PROVIDERS"}, + + {UUID_BASIC_CONNECT_EXT, MBIM_CID_MS_PROVISIONED_CONTEXT_V2, "MBIM_CID_MS_PROVISIONED_CONTEXT_V2"}, + {UUID_BASIC_CONNECT_EXT, MBIM_CID_MS_NETWORK_BLACKLIST, "MBIM_CID_MS_NETWORK_BLACKLIST"}, + {UUID_BASIC_CONNECT_EXT, MBIM_CID_MS_LTE_ATTACH_CONFIG, "MBIM_CID_MS_LTE_ATTACH_CONFIG"}, + {UUID_BASIC_CONNECT_EXT, MBIM_CID_MS_LTE_ATTACH_STATUS, "MBIM_CID_MS_LTE_ATTACH_STATUS"}, + {UUID_BASIC_CONNECT_EXT, MBIM_CID_MS_SYS_CAPS, "MBIM_CID_MS_SYS_CAPS"}, + {UUID_BASIC_CONNECT_EXT, MBIM_CID_MS_DEVICE_CAPS_V2, "MBIM_CID_MS_DEVICE_CAPS_V2"}, + {UUID_BASIC_CONNECT_EXT, MBIM_CID_MS_DEVICE_SLOT_MAPPING, "MBIM_CID_MS_DEVICE_SLOT_MAPPING"}, + {UUID_BASIC_CONNECT_EXT, MBIM_CID_MS_SLOT_INFO_STATUS, "MBIM_CID_MS_SLOT_INFO_STATUS"}, + {UUID_BASIC_CONNECT_EXT, MBIM_CID_MS_PCO, "MBIM_CID_MS_PCO"}, + {UUID_BASIC_CONNECT_EXT, MBIM_CID_MS_DEVICE_RESET, "MBIM_CID_MS_DEVICE_RESET"}, + {UUID_BASIC_CONNECT_EXT, MBIM_CID_MS_BASE_STATIONS_INFO, "MBIM_CID_MS_BASE_STATIONS_INFO"}, + {UUID_BASIC_CONNECT_EXT, MBIM_CID_MS_LOCATION_INFO_STATUS, "MBIM_CID_MS_LOCATION_INFO_STATUS"}, + {UUID_BASIC_CONNECT_EXT, MBIM_CID_NOT_DEFINED, "MBIM_CID_NOT_DEFINED"}, + {UUID_BASIC_CONNECT_EXT, MBIM_CID_MS_PIN_EX, "MBIM_CID_MS_PIN_EX"}, + {UUID_BASIC_CONNECT_EXT, MBIM_CID_MS_VERSION, "MBIM_CID_MS_VERSION"}, + + {UUID_MS_UICC_LOW_LEVEL, MBIM_CID_MS_UICC_ATR, "MBIM_CID_MS_UICC_ATR"}, + {UUID_MS_UICC_LOW_LEVEL, MBIM_CID_MS_UICC_OPEN_CHANNEL, "MBIM_CID_MS_UICC_OPEN_CHANNEL"}, + {UUID_MS_UICC_LOW_LEVEL, MBIM_CID_MS_UICC_CLOSE_CHANNEL, "MBIM_CID_MS_UICC_CLOSE_CHANNEL"}, + {UUID_MS_UICC_LOW_LEVEL, MBIM_CID_MS_UICC_APDU, "MBIM_CID_MS_UICC_APDU"}, + {UUID_MS_UICC_LOW_LEVEL, MBIM_CID_MS_UICC_TERMINAL_CAPABILITY, "MBIM_CID_MS_UICC_TERMINAL_CAPABILITY"}, + {UUID_MS_UICC_LOW_LEVEL, MBIM_CID_MS_UICC_RESET, "MBIM_CID_MS_UICC_RESET"}, + {UUID_MS_UICC_LOW_LEVEL, MBIM_CID_MS_APP_LIST, "MBIM_CID_MS_APP_LIST"}, +}; + +typedef struct { + UINT32 SessionId; + UINT32 ActivationCommand; //MBIM_ACTIVATION_COMMAND_E + UINT32 AccessStringOffset; + UINT32 AccessStringSize; + UINT32 UserNameOffset; + UINT32 UserNameSize; + UINT32 PasswordOffset; + UINT32 PasswordSize; + UINT32 Compression; //MBIM_COMPRESSION_E + UINT32 AuthProtocol; //MBIM_AUTH_PROTOCOL_E + UINT32 IPType; //MBIM_CONTEXT_IP_TYPE_E + UUID_T ContextType; + UINT8 DataBuffer[0]; /* apn, username, password */ +} MBIM_SET_CONNECT_T; + +typedef struct { + UINT32 SessionId; + UINT32 ActivationState; //MBIM_ACTIVATION_STATE_E + UINT32 VoiceCallState; + UINT32 IPType; //MBIM_CONTEXT_IP_TYPE_E + UUID_T ContextType; + UINT32 NwError; +} MBIM_CONNECT_T; + +typedef struct { + UINT32 OnLinkPrefixLength; + UINT8 IPv4Address[4]; +} MBIM_IPV4_ELEMENT_T; + +typedef struct { + UINT32 OnLinkPrefixLength; + UINT8 IPv6Address[16]; +} MBIM_IPV6_ELEMENT_T; + +typedef struct { + UINT32 SessionId; + UINT32 IPv4ConfigurationAvailable; //bit0~Address, bit1~gateway, bit2~DNS, bit3~MTU + UINT32 IPv6ConfigurationAvailable; //bit0~Address, bit1~gateway, bit2~DNS, bit3~MTU + UINT32 IPv4AddressCount; + UINT32 IPv4AddressOffset; + UINT32 IPv6AddressCount; + UINT32 IPv6AddressOffset; + UINT32 IPv4GatewayOffset; + UINT32 IPv6GatewayOffset; + UINT32 IPv4DnsServerCount; + UINT32 IPv4DnsServerOffset; + UINT32 IPv6DnsServerCount; + UINT32 IPv6DnsServerOffset; + UINT32 IPv4Mtu; + UINT32 IPv6Mtu; + UINT8 DataBuffer[]; +} MBIM_IP_CONFIGURATION_INFO_T; + +typedef struct { + UINT32 RSRP; + UINT32 SNR; + UINT32 RSRPThreshold; + UINT32 SNRThreshold; + UINT32 SystemType; +} MBIM_RSRP_SNR_INFO_T; + +typedef struct { + UINT32 Elementcount; + MBIM_RSRP_SNR_INFO_T RsrpSnr[0]; +} MBIM_RSRP_SNR_T; + +typedef struct { + UINT32 Rssi; + UINT32 ErrorRate; + UINT32 SignalStrengthInterval; + UINT32 RssiThreshold; + UINT32 ErrorRateThreshold; +} MBIM_SIGNAL_STATE_INFO_T; + +typedef struct { + UINT32 Rssi; + UINT32 ErrorRate; + UINT32 SignalStrengthInterval; + UINT32 RssiThreshold; + UINT32 ErrorRateThreshold; + UINT32 RsrpSnrOffset; + UINT32 RsrpSnrSize; + UINT8 DataBuffer[]; +} MBIM_SIGNAL_STATE_INFO_V2_T; + +typedef struct { + UINT32 SignalStrengthInterval; + UINT32 RssiThreshold; + UINT32 ErrorRateThreshold; +} MBIM_SET_SIGNAL_STATE_T; + +typedef struct { + UINT32 DevicePathOffset; + UINT32 DevicePathSize; + UINT32 Timeout; + UINT8 DataBuffer[]; +} MBIM_LIBQMI_PROXY_CONFIG_T; + +typedef struct { + UINT32 AtrSize; + UINT32 AtrOffset; + UINT8 DataBuffer[]; +} MBIM_MS_ATR_INFO_T; + +#pragma pack() + +static pthread_t s_tid_reader = 0; +static int mbim_verbose = 0; +static UINT32 TransactionId = 1; +static unsigned mbim_default_timeout = 30000; +static const char *mbim_apn = NULL; +static const char *mbim_user = NULL; +static const char *mbim_passwd = NULL; +static int mbim_iptype = MBIMContextIPTypeDefault; +static int mbim_auth = MBIMAuthProtocolNone; +static int mbim_sessionID = 0; +static int mbim_fd = -1; +static MBIM_MESSAGE_HEADER *mbim_pRequest; +static MBIM_MESSAGE_HEADER *mbim_pResponse; + +static unsigned int qmi_over_mbim_support = 0; +static int qmi_over_mbim_sk[2] = {-1, -1}; +static pthread_mutex_t mbim_command_mutex = PTHREAD_MUTEX_INITIALIZER; +static pthread_cond_t mbim_command_cond = PTHREAD_COND_INITIALIZER; +static int mbim_ms_version = 1; +static uint8_t qmi_over_mbim_nas = 0; +int qmi_over_mbim_qmidev_send(PQCQMIMSG pQMI); + +static const UUID_T * str2uuid(const char *str) { + static UUID_T uuid; + UINT32 d[16]; + char tmp[16*2+4+1]; + unsigned i = 0; + + while (str[i]) { + tmp[i] = tolower(str[i]); + i++; + } + tmp[i] = '\0'; + + sscanf(tmp, "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x", + &d[0], &d[1], &d[2], &d[3], &d[4], &d[5], &d[6], &d[7], + &d[8], &d[9], &d[10], &d[11], &d[12], &d[13], &d[14], &d[15]); + + for (i = 0; i < 16; i++) { + uuid.uuid[i] = d[i]&0xFF; + } + + return &uuid; +} + +static void wchar2char(const char *src, size_t src_size, char *dst, size_t dst_len) { + size_t i; + + for (i = 0; i < (dst_len-1) && i < (src_size/2); i++) + dst[i] = src[i*2]; + dst[i] = 0; +} + +static size_t char2wchar(const char *src, size_t src_len, uint8_t *dst, size_t dst_len) { + size_t i; + + if (src_len > (dst_len/2)) + src_len = (dst_len/2); + + for (i = 0; i < src_len; i++) { + *dst++ = *src++; + *dst++ = 0; + } + + return i*2; +} + +#define mbim_alloc( _size) malloc(_size) +#define mbim_free(_mem) do { if (_mem) { free(_mem); _mem = NULL;}} while(0) + +static int mbim_open_state = 0; +static MBIM_SUBSCRIBER_READY_STATE_E ReadyState = MBIMSubscriberReadyStateNotInitialized; +static MBIM_REGISTER_STATE_E RegisterState = MBIMRegisterStateUnknown; +static MBIM_PACKET_SERVICE_STATE_E PacketServiceState = MBIMPacketServiceStateUnknown; +static MBIM_ACTIVATION_STATE_E ActivationState = MBIMActivationStateUnknown; +static MBIM_SUBSCRIBER_READY_STATE_E oldReadyState = MBIMSubscriberReadyStateNotInitialized; +static MBIM_REGISTER_STATE_E oldRegisterState = MBIMRegisterStateUnknown; +static MBIM_PACKET_SERVICE_STATE_E oldPacketServiceState = MBIMPacketServiceStateUnknown; +static MBIM_ACTIVATION_STATE_E oldActivationState = MBIMActivationStateUnknown; +static int mbim_update_state(void); + +static __inline uint32_t mbim2qmi_ipv4addr(uint32_t addr) { + return (addr>>24) | (addr>>8&0xff00) | (addr<<8&0xff0000) | (addr<<24); +} + +static __inline void mbim2qmi_ipv6addr(const unsigned char *src, unsigned char *dst) { + int i; + + for (i = 0; i < 16 ; i++) { + dst[i] = src[i]; + } +} + +static MBIM_MESSAGE_HEADER *compose_open_command(UINT32 MaxControlTransfer) +{ + MBIM_OPEN_MSG_T *pRequest = (MBIM_OPEN_MSG_T *)mbim_alloc(sizeof(MBIM_OPEN_MSG_T)); + + if(!pRequest) + return NULL; + + pRequest->MessageHeader.MessageType = htole32(MBIM_OPEN_MSG); + pRequest->MessageHeader.MessageLength = htole32(sizeof(MBIM_OPEN_MSG_T)); + pRequest->MessageHeader.TransactionId = htole32(TransactionId++); + pRequest->MaxControlTransfer = htole32(MaxControlTransfer); + + return &pRequest->MessageHeader; +} + +static MBIM_MESSAGE_HEADER *compose_close_command(void) +{ + MBIM_CLOSE_MSG_T *pRequest = (MBIM_CLOSE_MSG_T *)mbim_alloc(sizeof(MBIM_CLOSE_MSG_T)); + + if(!pRequest) + return NULL; + + pRequest->MessageHeader.MessageType = htole32(MBIM_CLOSE_MSG); + pRequest->MessageHeader.MessageLength = htole32(sizeof(MBIM_CLOSE_MSG_T)); + pRequest->MessageHeader.TransactionId = htole32(TransactionId++); + + return &pRequest->MessageHeader; +} + +static MBIM_MESSAGE_HEADER *compose_basic_connect_command(UINT32 CID, UINT32 CommandType, void *pInformationBuffer, UINT32 InformationBufferLength) +{ + MBIM_COMMAND_MSG_T *pRequest = (MBIM_COMMAND_MSG_T *)mbim_alloc(sizeof(MBIM_COMMAND_MSG_T) + InformationBufferLength); + + if (!pRequest) + return NULL; + + pRequest->MessageHeader.MessageType = htole32(MBIM_COMMAND_MSG); + pRequest->MessageHeader.MessageLength = htole32((sizeof(MBIM_COMMAND_MSG_T) + InformationBufferLength)); + pRequest->MessageHeader.TransactionId = htole32(TransactionId++); + + pRequest->FragmentHeader.TotalFragments = htole32(1); + pRequest->FragmentHeader.CurrentFragment= htole32(0); + + memcpy(pRequest->DeviceServiceId.uuid, str2uuid(UUID_BASIC_CONNECT), 16); + + pRequest->CID = htole32(CID); + pRequest->CommandType = htole32(CommandType); + if (InformationBufferLength && pInformationBuffer) { + pRequest->InformationBufferLength = htole32(InformationBufferLength); + memcpy(pRequest->InformationBuffer, pInformationBuffer, InformationBufferLength); + } else { + pRequest->InformationBufferLength = htole32(0); + } + + return &pRequest->MessageHeader; +} + +static MBIM_MESSAGE_HEADER *compose_basic_connect_ext_command(UINT32 CID, UINT32 CommandType, void *pInformationBuffer, UINT32 InformationBufferLength) +{ + MBIM_COMMAND_MSG_T *pRequest = (MBIM_COMMAND_MSG_T *)compose_basic_connect_command(CID, CommandType, pInformationBuffer, InformationBufferLength); + + if (!pRequest) + return NULL; + + memcpy(pRequest->DeviceServiceId.uuid, str2uuid(UUID_BASIC_CONNECT_EXT), 16); + + return &pRequest->MessageHeader; +} + +static MBIM_MESSAGE_HEADER *compose_qmi_over_mbim_command(UINT32 CID, UINT32 CommandType, void *pInformationBuffer, UINT32 InformationBufferLength) +{ + MBIM_COMMAND_MSG_T *pRequest = (MBIM_COMMAND_MSG_T *)compose_basic_connect_command(CID, CommandType, pInformationBuffer, InformationBufferLength); + + if (!pRequest) + return NULL; + + memcpy(pRequest->DeviceServiceId.uuid, str2uuid(uuid_ext_qmux), 16); + + return &pRequest->MessageHeader; +} + +static const char * uuid2str(const UUID_T *pUUID) { + static char str[16*2+4+1]; + const UINT8 *d = pUUID->uuid; + + snprintf(str, sizeof(str), "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x", + d[0], d[1], d[2], d[3], d[4], d[5], d[6], d[7], + d[8], d[9], d[10], d[11], d[12], d[13], d[14], d[15]); + + return str; +} + +static const char *DeviceServiceId2str(const UUID_T *pUUID) { + const char *str = uuid2str(pUUID); + + struct { char *val;char *name;} _enumstr[] = { + {UUID_BASIC_CONNECT, "UUID_BASIC_CONNECT"}, + {UUID_BASIC_CONNECT_EXT, "UUID_BASIC_CONNECT_EXT"}, + {UUID_SMS, "UUID_SMS"}, + {UUID_USSD, "UUID_USSD"}, + {UUID_PHONEBOOK, "UUID_PHONEBOOK"}, + {UUID_STK, "UUID_STK"}, + {UUID_AUTH, "UUID_AUTH"}, + {UUID_DSS, "UUID_DSS"}, + {uuid_ext_qmux, "uuid_ext_qmux"}, + {uuid_mshsd, "uuid_mshsd"}, + {uuid_qmbe, "uuid_qmbe"}, + {UUID_MSFWID, "UUID_MSFWID"}, + {uuid_atds, "uuid_atds"}, + {uuid_qdu, "uuid_qdu"}, + {UUID_MS_UICC_LOW_LEVEL, "UUID_MS_UICC_LOW_LEVEL"}, + {UUID_MS_SARControl, "UUID_MS_SARControl"}, + {UUID_VOICEEXTENSIONS, "UUID_VOICEEXTENSIONS"}, + {UUID_LIBMBIM_PROXY, "UUID_LIBMBIM_PROXY"}, + }; + int idx; + + for (idx = 0; idx < (int)(sizeof(_enumstr)/sizeof(_enumstr[0])); idx++) { + if (!strcasecmp(str, _enumstr[idx].val)) + return _enumstr[idx].name; + } + + return str; +} + +static const char *mbim_get_segment(void *_pMsg, UINT32 offset, UINT32 len) +{ + int idx; + static char buff[256] = {'\0'}; + UINT8 *pMsg = (UINT8*)_pMsg; + + for (idx = 0; idx < (int)(len/2); idx++) + buff[idx] = pMsg[offset+idx*2]; + buff[idx] = '\0'; + return buff; +} + +static void mbim_dump_header(MBIM_MESSAGE_HEADER *pMsg, const char *direction) { + mbim_debug("%s Header:", direction); + mbim_debug("%s MessageLength = %u", direction, le32toh(pMsg->MessageLength)); + mbim_debug("%s MessageType = %s (0x%08x)", direction, MBIMMSGTypeStr(le32toh(pMsg->MessageType)), le32toh(pMsg->MessageType)); + mbim_debug("%s TransactionId = %u", direction, le32toh(pMsg->TransactionId)); + mbim_debug("%s Contents:", direction); +} + +static void mbim_dump_uuid_cid(const UUID_T *pUUID, UINT32 CID, const char *direction) { + size_t idx; + const char *uuidStr = uuid2str(pUUID); + const char *cidStr = "unknow"; + + for (idx = 0; idx < (sizeof(uuid_cid_string)/sizeof(uuid_cid_string[0])); idx++) { + if (!strcmp(uuidStr, uuid_cid_string[idx].uuid) && uuid_cid_string[idx].cid == CID) { + cidStr = uuid_cid_string[idx].name; + } + } + + mbim_debug("%s DeviceServiceId = %s (%s)", direction, DeviceServiceId2str(pUUID), uuidStr); + mbim_debug("%s CID = %s (%u)", direction, cidStr, le32toh(CID)); +} + + +static void mbim_dump_command_msg(MBIM_COMMAND_MSG_T *pCmdMsg, const char *direction) { + mbim_dump_uuid_cid(&pCmdMsg->DeviceServiceId, le32toh(pCmdMsg->CID), direction); + mbim_debug("%s CommandType = %s (%u)", direction, le32toh(pCmdMsg->CommandType) ? "set" : "query", le32toh(pCmdMsg->CommandType)); + mbim_debug("%s InformationBufferLength = %u", direction, le32toh(pCmdMsg->InformationBufferLength)); +} + +static void mbim_dump_command_done(MBIM_COMMAND_DONE_T *pCmdDone, const char *direction) { + mbim_dump_uuid_cid(&pCmdDone->DeviceServiceId, le32toh(pCmdDone->CID), direction); + mbim_debug("%s Status = %u", direction, le32toh(pCmdDone->Status)); + mbim_debug("%s InformationBufferLength = %u", direction, le32toh(pCmdDone->InformationBufferLength)); +} + +static void mbim_dump_indicate_msg(MBIM_INDICATE_STATUS_MSG_T *pIndMsg, const char *direction) { + mbim_dump_uuid_cid(&pIndMsg->DeviceServiceId, le32toh(pIndMsg->CID), direction); + mbim_debug("%s InformationBufferLength = %u", direction, le32toh(pIndMsg->InformationBufferLength)); +} + +static void mbim_dump_connect(MBIM_CONNECT_T *pInfo, const char *direction) { + mbim_debug("%s SessionId = %u", direction, le32toh(pInfo->SessionId)); + mbim_debug("%s ActivationState = %s (%u)", direction, MBIMActivationStateStr(le32toh(pInfo->ActivationState)), le32toh(pInfo->ActivationState)); + mbim_debug("%s IPType = %s", direction, MBIMContextIPTypeStr(le32toh(pInfo->IPType))); + mbim_debug("%s VoiceCallState = %s", direction, MBIMVoiceCallStateStr(le32toh(pInfo->VoiceCallState))); + mbim_debug("%s ContextType = %s", direction, uuid2str(&pInfo->ContextType)); + mbim_debug("%s NwError = %u", direction, le32toh(pInfo->NwError)); +} + +static void mbim_dump_signal_state(MBIM_SIGNAL_STATE_INFO_T *pInfo, const char *direction) +{ + mbim_debug("%s Rssi = %u", direction, le32toh(pInfo->Rssi)); + mbim_debug("%s ErrorRate = %u", direction, le32toh(pInfo->ErrorRate)); + mbim_debug("%s SignalStrengthInterval = %u", direction, le32toh(pInfo->SignalStrengthInterval)); + mbim_debug("%s RssiThreshold = %u", direction, le32toh(pInfo->RssiThreshold)); + mbim_debug("%s ErrorRateThreshold = %u", direction, le32toh(pInfo->ErrorRateThreshold)); +} + +static void mbim_dump_packet_service(MBIM_PACKET_SERVICE_INFO_T *pInfo, const char *direction) +{ + mbim_debug("%s NwError = %u", direction, le32toh(pInfo->NwError)); + mbim_debug("%s PacketServiceState = %s", direction, MBIMPacketServiceStateStr(le32toh(pInfo->PacketServiceState))); + mbim_debug("%s HighestAvailableDataClass = %s", direction, MBIMDataClassStr(le32toh(pInfo->HighestAvailableDataClass))); + mbim_debug("%s UplinkSpeed = %ld", direction, (long)le64toh(pInfo->UplinkSpeed)); + mbim_debug("%s DownlinkSpeed = %ld", direction, (long)le64toh(pInfo->DownlinkSpeed)); +} + +static void mbim_dump_subscriber_status(MBIM_SUBSCRIBER_READY_STATUS_T *pInfo, const char *direction) +{ + mbim_debug("%s ReadyState = %s", direction, MBIMSubscriberReadyStateStr(le32toh(pInfo->ReadyState))); + mbim_debug("%s SIMICCID = %s", direction, mbim_get_segment(pInfo, le32toh(pInfo->SimIccIdOffset), le32toh(pInfo->SimIccIdSize))); + mbim_debug("%s SubscriberID = %s", direction, mbim_get_segment(pInfo, le32toh(pInfo->SubscriberIdOffset), le32toh(pInfo->SubscriberIdSize))); + /* maybe more than one number */ + uint32_t idx; + for (idx = 0; idx < le32toh(pInfo->ElementCount); idx++) { + UINT32 offset = ((UINT32*)((UINT8*)pInfo+offsetof(MBIM_SUBSCRIBER_READY_STATUS_T, TelephoneNumbersRefList)))[0]; + UINT32 length = ((UINT32*)((UINT8*)pInfo+offsetof(MBIM_SUBSCRIBER_READY_STATUS_T, TelephoneNumbersRefList)))[1]; + mbim_debug("%s Number = %s", direction, mbim_get_segment(pInfo, le32toh(offset), le32toh(length))); + } +} + +static void mbim_dump_regiester_status(MBIM_REGISTRATION_STATE_INFO_T *pInfo, const char *direction) +{ + mbim_debug("%s NwError = %u", direction, le32toh(pInfo->NwError)); + mbim_debug("%s RegisterState = %s", direction, MBIMRegisterStateStr(le32toh(pInfo->RegisterState))); + mbim_debug("%s RegisterMode = %s", direction, MBIMRegisterModeStr(le32toh(pInfo->RegisterMode))); +} + +static void mbim_dump_ipconfig(MBIM_IP_CONFIGURATION_INFO_T *pInfo, const char *direction) +{ + UINT8 prefix = 0, *ipv4=NULL, *ipv6=NULL, *gw=NULL, *dns1=NULL, *dns2=NULL; + + mbim_debug("%s SessionId = %u", direction, le32toh(pInfo->SessionId)); + mbim_debug("%s IPv4ConfigurationAvailable = 0x%x", direction, le32toh(pInfo->IPv4ConfigurationAvailable)); + mbim_debug("%s IPv6ConfigurationAvailable = 0x%x", direction, le32toh(pInfo->IPv6ConfigurationAvailable)); + mbim_debug("%s IPv4AddressCount = 0x%x", direction, le32toh(pInfo->IPv4AddressCount)); + mbim_debug("%s IPv4AddressOffset = 0x%x", direction, le32toh(pInfo->IPv4AddressOffset)); + mbim_debug("%s IPv6AddressCount = 0x%x", direction, le32toh(pInfo->IPv6AddressCount)); + mbim_debug("%s IPv6AddressOffset = 0x%x", direction, le32toh(pInfo->IPv6AddressOffset)); + + /* IPv4 */ + if (le32toh(pInfo->IPv4ConfigurationAvailable)&0x1) { + MBIM_IPV4_ELEMENT_T *pAddress = (MBIM_IPV4_ELEMENT_T *)(&pInfo->DataBuffer[le32toh(pInfo->IPv4AddressOffset)-sizeof(MBIM_IP_CONFIGURATION_INFO_T)]); + prefix = le32toh(pAddress->OnLinkPrefixLength); + ipv4 = pAddress->IPv4Address; + mbim_debug("%s IPv4 = %u.%u.%u.%u/%u", direction, ipv4[0], ipv4[1], ipv4[2], ipv4[3], prefix); + } + if (le32toh(pInfo->IPv4ConfigurationAvailable)&0x2) { + gw = (UINT8 *)(&pInfo->DataBuffer[le32toh(pInfo->IPv4GatewayOffset)-sizeof(MBIM_IP_CONFIGURATION_INFO_T)]); + mbim_debug("%s gw = %u.%u.%u.%u", direction, gw[0], gw[1], gw[2], gw[3]); + } + if (le32toh(pInfo->IPv4ConfigurationAvailable)&0x3) { + dns1 = (UINT8 *)(&pInfo->DataBuffer[le32toh(pInfo->IPv4DnsServerOffset) -sizeof(MBIM_IP_CONFIGURATION_INFO_T)]); + mbim_debug("%s dns1 = %u.%u.%u.%u", direction, dns1[0], dns1[1], dns1[2], dns1[3]); + if (le32toh(pInfo->IPv4DnsServerCount) == 2) { + dns2 = dns1 + 4; + mbim_debug("%s dns2 = %u.%u.%u.%u", direction, dns2[0], dns2[1], dns2[2], dns2[3]); + } + } + if (le32toh(pInfo->IPv4Mtu)) mbim_debug("%s ipv4 mtu = %u", direction, le32toh(pInfo->IPv4Mtu)); + + /* IPv6 */ + if (le32toh(pInfo->IPv6ConfigurationAvailable)&0x1) { + MBIM_IPV6_ELEMENT_T *pAddress = (MBIM_IPV6_ELEMENT_T *)(&pInfo->DataBuffer[le32toh(pInfo->IPv6AddressOffset)-sizeof(MBIM_IP_CONFIGURATION_INFO_T)]); + prefix = le32toh(pAddress->OnLinkPrefixLength); + ipv6 = pAddress->IPv6Address; + mbim_debug("%s IPv6 = %02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x/%d", \ + direction, ipv6[0], ipv6[1], ipv6[2], ipv6[3], ipv6[4], ipv6[5], ipv6[6], ipv6[7], \ + ipv6[8], ipv6[9], ipv6[10], ipv6[11], ipv6[12], ipv6[13], ipv6[14], ipv6[15], prefix); + } + if (le32toh(pInfo->IPv6ConfigurationAvailable)&0x2) { + gw = (UINT8 *)(&pInfo->DataBuffer[le32toh(pInfo->IPv6GatewayOffset)-sizeof(MBIM_IP_CONFIGURATION_INFO_T)]); + mbim_debug("%s gw = %02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x", \ + direction, gw[0], gw[1], gw[2], gw[3], gw[4], gw[5], gw[6], gw[7], \ + gw[8], gw[9], gw[10], gw[11], gw[12], gw[13], gw[14], gw[15]); + } + if (le32toh(pInfo->IPv6ConfigurationAvailable)&0x3) { + dns1 = (UINT8 *)(&pInfo->DataBuffer[le32toh(pInfo->IPv6DnsServerOffset)-sizeof(MBIM_IP_CONFIGURATION_INFO_T)]); + mbim_debug("%s dns1 = %02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x", \ + direction, dns1[0], dns1[1], dns1[2], dns1[3], dns1[4], dns1[5], dns1[6], dns1[7], \ + dns1[8], dns1[9], dns1[10], dns1[11], dns1[12], dns1[13], dns1[14], dns1[15]); + if (le32toh(pInfo->IPv6DnsServerCount) == 2) { + dns2 = dns1 + 16; + mbim_debug("%s dns2 = %02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x", \ + direction, dns2[0], dns2[1], dns2[2], dns2[3], dns1[4], dns1[5], dns1[6], dns1[7], + dns2[8], dns2[9], dns2[10], dns2[11], dns2[12], dns2[13], dns2[14], dns2[15]); + } + } + if (le32toh(pInfo->IPv6Mtu)) mbim_debug("%s ipv6 mtu = %u", direction, le32toh(pInfo->IPv6Mtu)); +} + +static void mbim_dump(MBIM_MESSAGE_HEADER *pMsg, int mbim_verbose) { + unsigned char *data = (unsigned char *)pMsg; + const char *direction = (le32toh(pMsg->MessageType) & 0x80000000) ? "<" : ">"; + + if (!mbim_verbose) + return; + + if (mbim_verbose) { + unsigned i; + static char *_tmp = NULL; + + if (!_tmp) + _tmp = (char *)malloc(4096); + + if (_tmp) { + _tmp[0] = (le32toh(pMsg->MessageType) & 0x80000000) ? '<' : '>'; + _tmp[1] = '\0'; + for (i = 0; i < le32toh(pMsg->MessageLength) && i < 4096; i++) + snprintf(_tmp + strlen(_tmp), 4096 - strlen(_tmp), "%02X:", data[i]); + mbim_debug("%s", _tmp); + } + } + + mbim_dump_header(pMsg, direction); + + switch (le32toh(pMsg->MessageType)) { + case MBIM_OPEN_MSG: { + MBIM_OPEN_MSG_T *pOpenMsg = (MBIM_OPEN_MSG_T *)pMsg; + mbim_debug("%s MaxControlTransfer = %u", direction, le32toh(pOpenMsg->MaxControlTransfer)); + } + break; + case MBIM_OPEN_DONE: { + MBIM_OPEN_DONE_T *pOpenDone = (MBIM_OPEN_DONE_T *)pMsg; + mbim_debug("%s Status = %u", direction, le32toh(pOpenDone->Status)); + } + break; + case MBIM_CLOSE_MSG: { + + } + break; + case MBIM_CLOSE_DONE: { + MBIM_CLOSE_DONE_T *pCloseDone = (MBIM_CLOSE_DONE_T *)pMsg; + mbim_debug("%s Status = %u", direction, le32toh(pCloseDone->Status)); + } + break; + case MBIM_COMMAND_MSG: { + MBIM_COMMAND_MSG_T *pCmdMsg = (MBIM_COMMAND_MSG_T *)pMsg; + + mbim_dump_command_msg(pCmdMsg, direction); + if (!memcmp(pCmdMsg->DeviceServiceId.uuid, str2uuid(UUID_BASIC_CONNECT), 16)) { + switch (le32toh(pCmdMsg->CID)) { + case MBIM_CID_CONNECT: { + MBIM_SET_CONNECT_T *pInfo = (MBIM_SET_CONNECT_T *)pCmdMsg->InformationBuffer; + mbim_debug("%s SessionId = %u", direction, le32toh(pInfo->SessionId)); + } + break; + case MBIM_CID_IP_CONFIGURATION: { + MBIM_IP_CONFIGURATION_INFO_T *pInfo = (MBIM_IP_CONFIGURATION_INFO_T *)pCmdMsg->InformationBuffer; + mbim_debug("%s SessionId = %u", direction, le32toh(pInfo->SessionId)); + } + break; + default: + break; + } + } + } + break; + case MBIM_COMMAND_DONE: { + MBIM_COMMAND_DONE_T *pCmdDone = (MBIM_COMMAND_DONE_T *)pMsg; + + mbim_dump_command_done(pCmdDone, direction); + if (le32toh(pCmdDone->InformationBufferLength) == 0) + return; + + if (!memcmp(pCmdDone->DeviceServiceId.uuid, str2uuid(UUID_BASIC_CONNECT), 16)) { + switch (le32toh(pCmdDone->CID)) { + case MBIM_CID_CONNECT: { + MBIM_CONNECT_T *pInfo = (MBIM_CONNECT_T *)pCmdDone->InformationBuffer; + mbim_dump_connect(pInfo, direction); + } + break; + case MBIM_CID_IP_CONFIGURATION: { + //MBIM_IP_CONFIGURATION_INFO_T *pInfo = (MBIM_IP_CONFIGURATION_INFO_T *)pCmdDone->InformationBuffer; + //mbim_dump_ipconfig(pInfo, direction); + } + break; + case MBIM_CID_PACKET_SERVICE: { + MBIM_PACKET_SERVICE_INFO_T *pInfo = (MBIM_PACKET_SERVICE_INFO_T *)pCmdDone->InformationBuffer; + mbim_dump_packet_service(pInfo, direction); + } + break; + case MBIM_CID_SUBSCRIBER_READY_STATUS: { + MBIM_SUBSCRIBER_READY_STATUS_T *pInfo = (MBIM_SUBSCRIBER_READY_STATUS_T *)pCmdDone->InformationBuffer; + mbim_dump_subscriber_status(pInfo, direction); + } + break; + case MBIM_CID_REGISTER_STATE: { + MBIM_REGISTRATION_STATE_INFO_T *pInfo = (MBIM_REGISTRATION_STATE_INFO_T *)pCmdDone->InformationBuffer; + mbim_dump_regiester_status(pInfo, direction); + } + break; + default: + break; + } + } + } + break; + case MBIM_INDICATE_STATUS_MSG: { + MBIM_INDICATE_STATUS_MSG_T *pIndMsg = (MBIM_INDICATE_STATUS_MSG_T *)pMsg; + + mbim_dump_indicate_msg(pIndMsg, direction); + if (le32toh(pIndMsg->InformationBufferLength) == 0) + return; + + if (!memcmp(pIndMsg->DeviceServiceId.uuid, str2uuid(UUID_BASIC_CONNECT), 16)) { + switch (le32toh(pIndMsg->CID)) { + case MBIM_CID_CONNECT: { + MBIM_CONNECT_T *pInfo = (MBIM_CONNECT_T *)pIndMsg->InformationBuffer; + mbim_dump_connect(pInfo, direction); + } + break; + case MBIM_CID_SIGNAL_STATE: { + MBIM_SIGNAL_STATE_INFO_T *pInfo = (MBIM_SIGNAL_STATE_INFO_T *)pIndMsg->InformationBuffer; + mbim_dump_signal_state(pInfo, direction); + } + break; + case MBIM_CID_SUBSCRIBER_READY_STATUS: { + MBIM_SUBSCRIBER_READY_STATUS_T *pInfo = (MBIM_SUBSCRIBER_READY_STATUS_T *)pIndMsg->InformationBuffer; + mbim_dump_subscriber_status(pInfo, direction); + } + break; + case MBIM_CID_REGISTER_STATE: { + MBIM_REGISTRATION_STATE_INFO_T *pInfo = (MBIM_REGISTRATION_STATE_INFO_T *)pIndMsg->InformationBuffer; + mbim_dump_regiester_status(pInfo, direction); + } + break; + case MBIM_CID_PACKET_SERVICE: { + MBIM_PACKET_SERVICE_INFO_T *pInfo = (MBIM_PACKET_SERVICE_INFO_T *)pIndMsg->InformationBuffer; + mbim_dump_packet_service(pInfo, direction); + } + break; + default: + break; + } + } + else if (!memcmp(pIndMsg->DeviceServiceId.uuid, str2uuid(UUID_BASIC_CONNECT_EXT), 16)) { + } + } + break; + case MBIM_FUNCTION_ERROR_MSG: { + MBIM_FUNCTION_ERROR_MSG_T *pErrMsg = (MBIM_FUNCTION_ERROR_MSG_T*)pMsg; + mbim_debug("%s ErrorStatusCode = %u", direction, le32toh(pErrMsg->ErrorStatusCode)); + } + break; + default: + break; + } +} + +static void mbim_recv_command(MBIM_MESSAGE_HEADER *pResponse, unsigned size) +{ + (void)size; + pthread_mutex_lock(&mbim_command_mutex); + + if (pResponse) + mbim_dump(pResponse, mbim_verbose); + + if (pResponse == NULL) { + pthread_cond_signal(&mbim_command_cond); + } + else if (mbim_pRequest && le32toh(mbim_pRequest->TransactionId) == le32toh(pResponse->TransactionId)) { + mbim_pResponse = mbim_alloc(le32toh(pResponse->MessageLength)); + if (mbim_pResponse) + memcpy(mbim_pResponse, pResponse, le32toh(pResponse->MessageLength)); + pthread_cond_signal(&mbim_command_cond); + } + else if (le32toh(pResponse->MessageType) == MBIM_INDICATE_STATUS_MSG) { + MBIM_INDICATE_STATUS_MSG_T *pIndMsg = (MBIM_INDICATE_STATUS_MSG_T *)pResponse; + + if (!memcmp(pIndMsg->DeviceServiceId.uuid, str2uuid(UUID_BASIC_CONNECT), 16)) + { + switch (le32toh(pIndMsg->CID)) { + case MBIM_CID_SUBSCRIBER_READY_STATUS: { + MBIM_SUBSCRIBER_READY_STATUS_T *pInfo = (MBIM_SUBSCRIBER_READY_STATUS_T *)pIndMsg->InformationBuffer; + if (oldReadyState != le32toh(pInfo->ReadyState)) + qmidevice_send_event_to_main(RIL_UNSOL_RESPONSE_VOICE_NETWORK_STATE_CHANGED); + } + break; + case MBIM_CID_REGISTER_STATE: { + MBIM_REGISTRATION_STATE_INFO_T *pInfo = (MBIM_REGISTRATION_STATE_INFO_T *)pIndMsg->InformationBuffer; + if (oldRegisterState != le32toh(pInfo->RegisterState)) + qmidevice_send_event_to_main(RIL_UNSOL_RESPONSE_VOICE_NETWORK_STATE_CHANGED); + } + break; + case MBIM_CID_PACKET_SERVICE: { + MBIM_PACKET_SERVICE_INFO_T *pInfo = (MBIM_PACKET_SERVICE_INFO_T *)pIndMsg->InformationBuffer; + MBIM_PACKET_SERVICE_STATE_E state = le32toh(pInfo->PacketServiceState); + + if (oldPacketServiceState != state + && (1 || MBIMPacketServiceStateAttached == state || MBIMPacketServiceStateDetached == state)) + qmidevice_send_event_to_main(RIL_UNSOL_RESPONSE_VOICE_NETWORK_STATE_CHANGED); + } + break; + case MBIM_CID_CONNECT: { + MBIM_CONNECT_T *pInfo = (MBIM_CONNECT_T *)pIndMsg->InformationBuffer; + if (pInfo->SessionId == (uint32_t)mbim_sessionID) { + MBIM_ACTIVATION_STATE_E state = le32toh(pInfo->ActivationState); + + if (oldActivationState != state + && (1 || MBIMActivationStateActivated == state || MBIMActivationStateDeactivated == state)) + qmidevice_send_event_to_main(RIL_UNSOL_DATA_CALL_LIST_CHANGED); + } + } + break; + default: + break; + } + } + } + + pthread_mutex_unlock(&mbim_command_mutex); +} + +static int mbim_send_command(MBIM_MESSAGE_HEADER *pRequest, MBIM_COMMAND_DONE_T **ppCmdDone, unsigned msecs) { + int ret; + + if (ppCmdDone) + *ppCmdDone = NULL; + + if (mbim_fd <= 0) + return -ENODEV; + + if (s_tid_reader == 0) + return -EINVAL; + + if (!pRequest) + return -ENOMEM; + + pthread_mutex_lock(&mbim_command_mutex); + + if (pRequest) { + if (pRequest->TransactionId == (0xFFFFFF + 1)) { //quectel-mbim-proxy need 0xFF000000 to indicat client + TransactionId = 1; + pRequest->TransactionId = htole32(TransactionId++); + } + mbim_dump(pRequest, mbim_verbose); + } + + mbim_pRequest = pRequest; + mbim_pResponse = NULL; + + ret = write(mbim_fd, pRequest, le32toh(pRequest->MessageLength)); + + if (ret > 0 && (uint32_t)ret == le32toh(pRequest->MessageLength)) { + ret = pthread_cond_timeout_np(&mbim_command_cond, &mbim_command_mutex, msecs); + if (!ret) { + if (mbim_pResponse && ppCmdDone) { + *ppCmdDone = (MBIM_COMMAND_DONE_T *)mbim_pResponse; + } + } + } else { + mbim_debug("%s pthread_cond_timeout_np=%d", __func__, ret); + } + + mbim_pRequest = mbim_pResponse = NULL; + + pthread_mutex_unlock(&mbim_command_mutex); + + return ret; +} + +static ssize_t mbim_proxy_read (int fd, MBIM_MESSAGE_HEADER *pResponse, size_t size) { + ssize_t nreads; + + nreads = read(fd, pResponse, sizeof(MBIM_MESSAGE_HEADER)); + if (nreads == sizeof(MBIM_MESSAGE_HEADER) && le32toh(pResponse->MessageLength) <= size) { + nreads += read(fd, pResponse+1, le32toh(pResponse->MessageLength) - sizeof(MBIM_MESSAGE_HEADER)); + } + + return nreads; +} + +static void * mbim_read_thread(void *param) { + PROFILE_T *profile = (PROFILE_T *)param; + const char *cdc_wdm = (const char *)profile->qmichannel; + int wait_for_request_quit = 0; + + mbim_verbose = debug_qmi; + s_tid_reader = pthread_self(); + + if (profile->qmap_mode > 1 && profile->qmapnet_adapter[0]) { + if (!profile->proxy[0]) + sprintf(profile->proxy, "%s", QUECTEL_MBIM_PROXY); + mbim_sessionID = profile->pdp; + } + + if (profile->proxy[0]) { + mbim_fd = cm_open_proxy(profile->proxy); + } + else { + mbim_fd = cm_open_dev(cdc_wdm); + } + + if (mbim_fd <= 0) { + mbim_debug("fail to open (%s), errno: %d (%s)", cdc_wdm, errno, strerror(errno)); + goto __quit; + } + + dbg_time("cdc_wdm_fd = %d", mbim_fd); + + qmidevice_send_event_to_main(RIL_INDICATE_DEVICE_CONNECTED); + + while (mbim_fd > 0) { + struct pollfd pollfds[] = {{mbim_fd, POLLIN, 0}, {qmidevice_control_fd[1], POLLIN, 0}, {qmi_over_mbim_sk[1], POLLIN, 0}}; + int ne, ret, nevents = 2; + + if (pollfds[nevents].fd != -1) + nevents++; + + ret = poll(pollfds, nevents, wait_for_request_quit ? 1000 : -1); + + if (ret == 0 && wait_for_request_quit) { + break; + } + + if (ret < 0) { + mbim_debug("%s poll=%d, errno: %d (%s)", __func__, ret, errno, strerror(errno)); + break; + } + + for (ne = 0; ne < nevents; ne++) { + int fd = pollfds[ne].fd; + short revents = pollfds[ne].revents; + + if (revents & (POLLERR | POLLHUP | POLLNVAL)) { + mbim_debug("%s poll err/hup/inval", __func__); + mbim_debug("epoll fd = %d, events = 0x%04x", fd, revents); + if (revents & (POLLERR | POLLHUP | POLLNVAL)) + goto __quit; + } + + if ((revents & POLLIN) == 0) + continue; + + if (mbim_fd == fd) { + ssize_t nreads; + MBIM_MESSAGE_HEADER *pResponse = (MBIM_MESSAGE_HEADER *) cm_recv_buf; + + if (profile->proxy[0]) + nreads = mbim_proxy_read(fd, pResponse, sizeof(cm_recv_buf)); + else + nreads = read(fd, pResponse, sizeof(cm_recv_buf)); + + if (nreads <= 0) { + mbim_debug("%s read=%d errno: %d (%s)", __func__, (int)nreads, errno, strerror(errno)); + break; + } + + mbim_recv_command(pResponse, nreads); + } + else if (fd == qmidevice_control_fd[1]) { + int triger_event; + if (read(fd, &triger_event, sizeof(triger_event)) == sizeof(triger_event)) { + //mbim_debug("triger_event = 0x%x", triger_event); + switch (triger_event) { + case RIL_REQUEST_QUIT: + goto __quit; + break; + case SIG_EVENT_STOP: + wait_for_request_quit = 1; + break; + default: + break; + } + } + } + else if (fd == qmi_over_mbim_sk[1]) { + ssize_t nreads = read(fd, cm_recv_buf, sizeof(cm_recv_buf)); + if (nreads > 0) + QmiThreadRecvQMI((PQCQMIMSG)cm_recv_buf); + } + } + } + +__quit: + if (mbim_fd != -1) { close(mbim_fd); mbim_fd = -1; } + mbim_recv_command(NULL, 0); + qmidevice_send_event_to_main(RIL_INDICATE_DEVICE_DISCONNECTED); + mbim_debug("%s exit", __func__); + s_tid_reader = 0; + + return NULL; +} + +static int mbim_status_code(MBIM_MESSAGE_HEADER *pMsgHdr) { + int status = 0; + + if (!pMsgHdr) + return 0; + + switch (le32toh(pMsgHdr->MessageType)) { + case MBIM_OPEN_DONE: { + MBIM_OPEN_DONE_T *pOpenDone = (MBIM_OPEN_DONE_T *)pMsgHdr; + status = le32toh(pOpenDone->Status); + } + break; + case MBIM_CLOSE_DONE: { + MBIM_CLOSE_DONE_T *pCloseDone = (MBIM_CLOSE_DONE_T *)pMsgHdr; + status = le32toh(pCloseDone->Status); + } + break; + case MBIM_COMMAND_DONE: { + MBIM_COMMAND_DONE_T *pCmdDone = (MBIM_COMMAND_DONE_T *)pMsgHdr; + status = le32toh(pCmdDone->Status); + } + break; + case MBIM_FUNCTION_ERROR_MSG: { + MBIM_FUNCTION_ERROR_MSG_T *pErrMsg = (MBIM_FUNCTION_ERROR_MSG_T *)pMsgHdr; + status = le32toh(pErrMsg->ErrorStatusCode); + if (status == MBIM_ERROR_NOT_OPENED) + mbim_open_state = 0; //EM06ELAR03A05M4G when suspend/resume, may get this error + } + break; + default: + break; + } + + return status; +} + +#define mbim_check_err(err, pRequest, pCmdDone) do { \ + int _status = mbim_status_code(pCmdDone ? &pCmdDone->MessageHeader : NULL); \ + if (err || _status || !pCmdDone) { \ + if (pCmdDone) { mbim_dump(&pCmdDone->MessageHeader, (mbim_verbose == 0)); } \ + mbim_free(pRequest); mbim_free(pCmdDone); \ + mbim_debug("%s:%d err=%d, Status=%d", __func__, __LINE__, err, _status); \ + if (err) return err; \ + if (_status) return _status; \ + return 8888; \ + } \ +} while(0) + +/* + * MBIM device can be open repeatly without error + * So, we can call the function, no matter it have been opened or not + */ +static int mbim_open_device(uint32_t MaxControlTransfer) { + MBIM_MESSAGE_HEADER *pRequest = NULL; + MBIM_OPEN_DONE_T *pOpenDone = NULL; + int err = 0; + + mbim_debug("%s()", __func__); + pRequest = compose_open_command(MaxControlTransfer); + err = mbim_send_command(pRequest, (MBIM_COMMAND_DONE_T **)&pOpenDone, 3*1000); //EM06ELAR03A09M4G take about 2.5 seconds + mbim_check_err(err, pRequest, pOpenDone); + + err = le32toh(pOpenDone->Status); + mbim_free(pRequest); mbim_free(pOpenDone); + + return err; +} + +static int mbim_close_device(void) { + MBIM_MESSAGE_HEADER *pRequest = NULL; + MBIM_CLOSE_DONE_T *pCloseDone = NULL; + int err = 0; + + mbim_debug("%s()", __func__); + pRequest = compose_close_command(); + err = mbim_send_command(pRequest, (MBIM_COMMAND_DONE_T **)&pCloseDone, mbim_default_timeout); + mbim_check_err(err, pRequest, pCloseDone); + + err = le32toh(pCloseDone->Status); + mbim_free(pRequest); mbim_free(pCloseDone); + + return err; +} + +static int mbim_query_connect(int sessionID) { + MBIM_MESSAGE_HEADER *pRequest = NULL; + MBIM_COMMAND_DONE_T *pCmdDone = NULL; + MBIM_SET_CONNECT_T set_connect; + int err; + + if (ActivationState != MBIMActivationStateActivated || mbim_verbose) + mbim_debug("%s(sessionID=%d)", __func__, sessionID); //avoid two many log + set_connect.SessionId = htole32(sessionID); + pRequest = compose_basic_connect_command(MBIM_CID_CONNECT, MBIM_CID_CMD_TYPE_QUERY, &set_connect, sizeof(set_connect)); + err = mbim_send_command(pRequest, &pCmdDone, mbim_default_timeout); + mbim_check_err(err, pRequest, pCmdDone); + + if (le32toh(pCmdDone->InformationBufferLength)) + { + MBIM_CONNECT_T *pInfo = (MBIM_CONNECT_T *)pCmdDone->InformationBuffer; + ActivationState = le32toh(pInfo->ActivationState); + mbim_update_state(); + } + mbim_free(pRequest); mbim_free(pCmdDone); + return err; +} + +static int mbim_ms_version_query(void) { + MBIM_MESSAGE_HEADER *pRequest = NULL; + MBIM_COMMAND_DONE_T *pCmdDone = NULL; + int err; + + struct _bc_ext_version { + UINT8 ver_minor; + UINT8 ver_major; + UINT8 ext_ver_minor; + UINT8 ext_ver_major; + } __attribute__ ((packed)) bc_ext_version; + + bc_ext_version.ver_major = 1; + bc_ext_version.ver_minor = 0; + bc_ext_version.ext_ver_major = 2; + bc_ext_version.ext_ver_minor = 0; + + pRequest = compose_basic_connect_ext_command(MBIM_CID_MS_VERSION, MBIM_CID_CMD_TYPE_QUERY, &bc_ext_version, sizeof(bc_ext_version)); + err = mbim_send_command(pRequest, &pCmdDone, mbim_default_timeout); + mbim_check_err(err, pRequest, pCmdDone); + + if (le32toh(pCmdDone->InformationBufferLength)) { + struct _bc_ext_version *pInfo = (struct _bc_ext_version *)pCmdDone->InformationBuffer; + //mbim_debug("%s ext_rel_ver major=%d, minor=%d", __func__, pInfo->ext_ver_major, pInfo->ext_ver_minor); + mbim_ms_version = pInfo->ext_ver_major; + } + + mbim_free(pRequest); mbim_free(pCmdDone); + return err; +} + +static int mbim_device_services_query(void) { + MBIM_MESSAGE_HEADER *pRequest = NULL; + MBIM_COMMAND_DONE_T *pCmdDone = NULL; + int err; + int mbim_v2_support = 0; + + mbim_debug("%s()", __func__); + pRequest = compose_basic_connect_command(MBIM_CID_DEVICE_SERVICES, MBIM_CID_CMD_TYPE_QUERY, NULL, 0); + err = mbim_send_command(pRequest, &pCmdDone, mbim_default_timeout); + mbim_check_err(err, pRequest, pCmdDone); + + if (pCmdDone->InformationBufferLength) { + MBIM_DEVICE_SERVICES_INFO_T *pInfo = (MBIM_DEVICE_SERVICES_INFO_T *)pCmdDone->InformationBuffer; + UINT32 i; + + for (i = 0; i < le32toh(pInfo->DeviceServicesCount) ; i++) { + //UINT32 size = pInfo->DeviceServicesRefList[i].size; + UINT32 offset = le32toh(pInfo->DeviceServicesRefList[i].offset); + MBIM_DEVICE_SERVICE_ELEMENT_T *pSrvEle = (MBIM_DEVICE_SERVICE_ELEMENT_T *)((void *)pInfo + offset); + + //mbim_debug("\t[%2d] %s (%s)", i, DeviceServiceId2str(&pSrvEle->DeviceServiceId), uuid2str(&pSrvEle->DeviceServiceId)); + if (!strcasecmp(UUID_BASIC_CONNECT_EXT, uuid2str(&pSrvEle->DeviceServiceId))) { + UINT32 cid = 0; + + for (cid = 0; cid < le32toh(pSrvEle->CidCount); cid++) { + if (MBIM_CID_MS_VERSION == le32toh(pSrvEle->CidList[cid])) { + mbim_v2_support = 1; + } + } + } + else if (!strcasecmp(uuid_ext_qmux, uuid2str(&pSrvEle->DeviceServiceId))) { + qmi_over_mbim_support = 1; + } + } + } + mbim_free(pRequest); mbim_free(pCmdDone); + + if (mbim_v2_support) { + mbim_ms_version_query(); + } + + return err; +} + +static int mbim_device_caps_query(PROFILE_T *profile) { + MBIM_MESSAGE_HEADER *pRequest = NULL; + MBIM_COMMAND_DONE_T *pCmdDone = NULL; + int err; + + mbim_debug("%s()", __func__); + pRequest = compose_basic_connect_command(MBIM_CID_DEVICE_CAPS, MBIM_CID_CMD_TYPE_QUERY, NULL, 0); + err = mbim_send_command(pRequest, &pCmdDone, mbim_default_timeout); + mbim_check_err(err, pRequest, pCmdDone); + + if (le32toh(pCmdDone->InformationBufferLength)) { + MBIM_DEVICE_CAPS_INFO_T *pInfo = (MBIM_DEVICE_CAPS_INFO_T *)pCmdDone->InformationBuffer; + char tmp[32]; + + if (le32toh(pInfo->DeviceIdOffset) && le32toh(pInfo->DeviceIdSize)) { + wchar2char((const char *)pInfo + le32toh(pInfo->DeviceIdOffset), le32toh(pInfo->DeviceIdSize), tmp, sizeof(tmp)); + mbim_debug("DeviceId: %s", tmp); + } + if (le32toh(pInfo->FirmwareInfoOffset) && le32toh(pInfo->FirmwareInfoSize)) { + wchar2char((const char *)pInfo + le32toh(pInfo->FirmwareInfoOffset), le32toh(pInfo->FirmwareInfoSize), tmp, sizeof(tmp)); + strncpy(profile->BaseBandVersion, tmp, sizeof(profile->BaseBandVersion)); + mbim_debug("FirmwareInfo: %s", tmp); + } + if (le32toh(pInfo->HardwareInfoOffset) && le32toh(pInfo->HardwareInfoSize)) { + wchar2char((const char *)pInfo + le32toh(pInfo->HardwareInfoOffset), le32toh(pInfo->HardwareInfoSize), tmp, sizeof(tmp)); + mbim_debug("HardwareInfo: %s", tmp); + } + } + mbim_free(pRequest); mbim_free(pCmdDone); + return err; +} + +#if 0 +static int mbim_radio_state_query(void) { + MBIM_MESSAGE_HEADER *pRequest = NULL; + MBIM_COMMAND_DONE_T *pCmdDone = NULL; + int err; + + mbim_debug("%s()", __func__); + pRequest = compose_basic_connect_command(MBIM_CID_RADIO_STATE, MBIM_CID_CMD_TYPE_QUERY, NULL, 0); + err = mbim_send_command(pRequest, &pCmdDone, mbim_default_timeout); + mbim_check_err(err, pRequest, pCmdDone); + + if (pCmdDone->InformationBufferLength) { + MBIM_RADIO_STATE_INFO_T *pInfo = (MBIM_RADIO_STATE_INFO_T *)pCmdDone->InformationBuffer; + mbim_debug("HwRadioState: %d, SwRadioState: %d", pInfo->HwRadioState, pInfo->SwRadioState); + } + mbim_free(pRequest); mbim_free(pCmdDone); + return err; +} +#endif + +static int mbim_set_radio_state(MBIM_RADIO_SWITCH_STATE_E RadioState) { + MBIM_MESSAGE_HEADER *pRequest = NULL; + MBIM_COMMAND_DONE_T *pCmdDone = NULL; + UINT32 value = htole32(RadioState); + int err; + + mbim_debug("%s( %d )", __func__, RadioState); + pRequest = compose_basic_connect_command(MBIM_CID_RADIO_STATE, MBIM_CID_CMD_TYPE_SET, &value, sizeof(value)); + err = mbim_send_command(pRequest, &pCmdDone, mbim_default_timeout); + mbim_check_err(err, pRequest, pCmdDone); + + if (le32toh(pCmdDone->InformationBufferLength)) { + MBIM_RADIO_STATE_INFO_T *pInfo = (MBIM_RADIO_STATE_INFO_T *)pCmdDone->InformationBuffer; + mbim_debug("HwRadioState: %d, SwRadioState: %d", le32toh(pInfo->HwRadioState), le32toh(pInfo->SwRadioState)); + } + mbim_free(pRequest); mbim_free(pCmdDone); + return err; +} + +static int mbim_subscriber_status_query(void) { + MBIM_MESSAGE_HEADER *pRequest = NULL; + MBIM_COMMAND_DONE_T *pCmdDone = NULL; + int err; + + mbim_debug("%s()", __func__); + pRequest = compose_basic_connect_command(MBIM_CID_SUBSCRIBER_READY_STATUS, MBIM_CID_CMD_TYPE_QUERY, NULL, 0); + err = mbim_send_command(pRequest, &pCmdDone, mbim_default_timeout); + mbim_check_err(err, pRequest, pCmdDone); + + if (le32toh(pCmdDone->InformationBufferLength)) { + MBIM_SUBSCRIBER_READY_STATUS_T *pInfo = (MBIM_SUBSCRIBER_READY_STATUS_T *)pCmdDone->InformationBuffer; + char tmp[32]; + + wchar2char((const char *)pInfo + le32toh(pInfo->SubscriberIdOffset), le32toh(pInfo->SubscriberIdSize), tmp, sizeof(tmp)); + mbim_debug("SubscriberId: %s", tmp); + wchar2char((const char *)pInfo + le32toh(pInfo->SimIccIdOffset), le32toh(pInfo->SimIccIdSize), tmp, sizeof(tmp)); + mbim_debug("SimIccId: %s", tmp); + ReadyState = le32toh(pInfo->ReadyState); + mbim_update_state(); + } + mbim_free(pRequest); mbim_free(pCmdDone); + return err; +} + +static int mbim_register_state_query(void) { + MBIM_MESSAGE_HEADER *pRequest = NULL; + MBIM_COMMAND_DONE_T *pCmdDone = NULL; + int err; + + mbim_debug("%s()", __func__); + pRequest = compose_basic_connect_command(MBIM_CID_REGISTER_STATE, MBIM_CID_CMD_TYPE_QUERY, NULL, 0); + err = mbim_send_command(pRequest, &pCmdDone, mbim_default_timeout); + mbim_check_err(err, pRequest, pCmdDone); + + if (le32toh(pCmdDone->InformationBufferLength)) { + MBIM_REGISTRATION_STATE_INFO_T *pInfo = (MBIM_REGISTRATION_STATE_INFO_T *)pCmdDone->InformationBuffer;; + RegisterState = le32toh(pInfo->RegisterState); + mbim_update_state(); + } + mbim_free(pRequest); mbim_free(pCmdDone); + return err; +} + +static int mbim_packet_service_query(void) { + MBIM_MESSAGE_HEADER *pRequest = NULL; + MBIM_COMMAND_DONE_T *pCmdDone = NULL; + int err; + + mbim_debug("%s()", __func__); + pRequest = compose_basic_connect_command(MBIM_CID_PACKET_SERVICE, MBIM_CID_CMD_TYPE_QUERY, NULL, 0); + err = mbim_send_command(pRequest, &pCmdDone, mbim_default_timeout); + mbim_check_err(err, pRequest, pCmdDone); + + if (le32toh(pCmdDone->InformationBufferLength)) { + MBIM_PACKET_SERVICE_INFO_T *pInfo = (MBIM_PACKET_SERVICE_INFO_T *)pCmdDone->InformationBuffer; + PacketServiceState = le32toh(pInfo->PacketServiceState); + mbim_update_state(); + + if (le32toh(pCmdDone->InformationBufferLength) == sizeof(MBIM_PACKET_SERVICE_INFO_V2_T)) { + MBIM_PACKET_SERVICE_INFO_V2_T *pInfo = (MBIM_PACKET_SERVICE_INFO_V2_T *)pCmdDone->InformationBuffer; + mbim_debug("CurrentDataClass = %s", MBIMDataClassStr(le32toh(pInfo->CurrentDataClass))); + } + } + mbim_free(pRequest); mbim_free(pCmdDone); + return err; +} + +static int mbim_packet_service_set(MBIM_PACKET_SERVICE_ACTION_E action) { + MBIM_MESSAGE_HEADER *pRequest = NULL; + MBIM_COMMAND_DONE_T *pCmdDone = NULL; + UINT32 value = htole32(action); + int err; + + mbim_debug("%s()", __func__); + pRequest = compose_basic_connect_command(MBIM_CID_PACKET_SERVICE, MBIM_CID_CMD_TYPE_SET, &value, sizeof(value)); + err = mbim_send_command(pRequest, &pCmdDone, mbim_default_timeout); + mbim_check_err(err, pRequest, pCmdDone); + + if (le32toh(pCmdDone->InformationBufferLength)) { + MBIM_PACKET_SERVICE_INFO_T *pInfo = (MBIM_PACKET_SERVICE_INFO_T *)pCmdDone->InformationBuffer; + PacketServiceState = le32toh(pInfo->PacketServiceState); + mbim_update_state(); + } + mbim_free(pRequest); mbim_free(pCmdDone); + return err; +} + +#define _align_32(len) {len += (len % 4) ? (4 - (len % 4)) : 0;} +static int mbim_populate_connect_data(MBIM_SET_CONNECT_T **connect_req_ptr) { + int offset; + int buflen = 0; + + if (mbim_apn && strlen(mbim_apn) > 0) buflen += 2*strlen(mbim_apn) ; + _align_32(buflen); + if (mbim_user && strlen(mbim_user) > 0) buflen += 2*strlen(mbim_user); + _align_32(buflen); + if (mbim_passwd && strlen(mbim_passwd) > 0) buflen += 2*strlen(mbim_passwd); + _align_32(buflen); + + *connect_req_ptr = (MBIM_SET_CONNECT_T*)malloc(sizeof(MBIM_SET_CONNECT_T) + buflen); + if (! *connect_req_ptr) { + mbim_debug("not enough memory\n"); + return -1; + } + memset(*connect_req_ptr, 0, sizeof(MBIM_SET_CONNECT_T) + buflen); + + offset = 0; + if (mbim_apn && strlen(mbim_apn) > 0) { + (*connect_req_ptr)->AccessStringSize = htole32(2*strlen(mbim_apn)); + (*connect_req_ptr)->AccessStringOffset = htole32(offset + sizeof(MBIM_SET_CONNECT_T)); + offset = char2wchar(mbim_apn, strlen(mbim_apn), &(*connect_req_ptr)->DataBuffer[offset], buflen - offset); + _align_32(offset); + } + + if (mbim_user && strlen(mbim_user) > 0) { + (*connect_req_ptr)->UserNameSize = htole32(2*strlen(mbim_user)); + (*connect_req_ptr)->UserNameOffset = htole32(offset + sizeof(MBIM_SET_CONNECT_T)); + offset = char2wchar(mbim_user, strlen(mbim_user), &(*connect_req_ptr)->DataBuffer[offset], buflen - offset); + _align_32(offset); + } + + if (mbim_passwd && strlen(mbim_passwd) > 0) { + (*connect_req_ptr)->PasswordSize = htole32(2*strlen(mbim_passwd)); + (*connect_req_ptr)->PasswordOffset = htole32(offset + sizeof(MBIM_SET_CONNECT_T)); + offset = char2wchar(mbim_passwd, strlen(mbim_passwd), &(*connect_req_ptr)->DataBuffer[offset], buflen - offset); + } + + return buflen; +} + +static int mbim_set_connect(int onoff, int sessionID) { + MBIM_MESSAGE_HEADER *pRequest = NULL; + MBIM_COMMAND_DONE_T *pCmdDone = NULL; + MBIM_SET_CONNECT_T *set_connect = NULL; + int err; + + mbim_debug("%s(onoff=%d, sessionID=%d)", __func__, onoff, sessionID); + /* alloc memory then populate APN USERNAME PASSWORD */ + int buflen = mbim_populate_connect_data(&set_connect); + if (buflen < 0) { + return ENOMEM; + } + + set_connect->SessionId = htole32(sessionID); + if (onoff == 0) + set_connect->ActivationCommand = htole32(MBIMActivationCommandDeactivate); + else + set_connect->ActivationCommand = htole32(MBIMActivationCommandActivate); + + set_connect->Compression = htole32(MBIMCompressionNone); + set_connect->AuthProtocol = htole32(mbim_auth); + set_connect->IPType = htole32(mbim_iptype); + memcpy(set_connect->ContextType.uuid, str2uuid(UUID_MBIMContextTypeInternet), 16); + + pRequest = compose_basic_connect_command(MBIM_CID_CONNECT, MBIM_CID_CMD_TYPE_SET, set_connect, sizeof(MBIM_SET_CONNECT_T) + buflen); + mbim_free(set_connect); + err = mbim_send_command(pRequest, &pCmdDone, mbim_default_timeout*10); + mbim_check_err(err, pRequest, pCmdDone); + + if (le32toh(pCmdDone->InformationBufferLength)) { + MBIM_CONNECT_T *pInfo = (MBIM_CONNECT_T *)pCmdDone->InformationBuffer; + ActivationState = le32toh(pInfo->ActivationState); + mbim_update_state(); + } + + mbim_free(pRequest); mbim_free(pCmdDone); + return err; +} + +static int mbim_ip_config(PROFILE_T *profile, int sessionID) { + MBIM_MESSAGE_HEADER *pRequest = NULL; + MBIM_COMMAND_DONE_T *pCmdDone = NULL; + MBIM_IP_CONFIGURATION_INFO_T ip_info; + int err; + + if (profile->ipv4.Address == 0 || mbim_verbose) + mbim_debug("%s(sessionID=%d)", __func__, sessionID); + ip_info.SessionId = htole32(sessionID); + pRequest = compose_basic_connect_command(MBIM_CID_IP_CONFIGURATION, MBIM_CID_CMD_TYPE_QUERY, &ip_info, sizeof(ip_info)); + err = mbim_send_command(pRequest, &pCmdDone, mbim_default_timeout); + mbim_check_err(err, pRequest, pCmdDone); + + if (le32toh(pCmdDone->InformationBufferLength)) { + UINT8 prefix, *ipv4=NULL, *ipv6=NULL, *gw=NULL, *dns1=NULL, *dns2=NULL; + UINT32 mtu = 1500; + MBIM_IP_CONFIGURATION_INFO_T *pInfo = (MBIM_IP_CONFIGURATION_INFO_T *)pCmdDone->InformationBuffer; + + /* IPv4 network configration */ + if (le32toh(pInfo->IPv4ConfigurationAvailable)&0x1) { + MBIM_IPV4_ELEMENT_T *pAddress = (MBIM_IPV4_ELEMENT_T *)(&pInfo->DataBuffer[le32toh(pInfo->IPv4AddressOffset)-sizeof(MBIM_IP_CONFIGURATION_INFO_T)]); + prefix = le32toh(pAddress->OnLinkPrefixLength); + ipv4 = pAddress->IPv4Address; + + if (le32toh(pInfo->IPv4ConfigurationAvailable)&0x2) + gw = (UINT8 *)(&pInfo->DataBuffer[le32toh(pInfo->IPv4GatewayOffset)-sizeof(MBIM_IP_CONFIGURATION_INFO_T)]); + + if (le32toh(pInfo->IPv4ConfigurationAvailable)&0x4) { + dns1 = (UINT8 *)(&pInfo->DataBuffer[le32toh(pInfo->IPv4DnsServerOffset)-sizeof(MBIM_IP_CONFIGURATION_INFO_T)]); + if (le32toh(pInfo->IPv4DnsServerCount) == 2) + dns2 = dns1 + 4; + } + + if (le32toh(pInfo->IPv4ConfigurationAvailable)&0x8) + mtu = le32toh(pInfo->IPv4Mtu); + + if (profile->ipv4.Address != mbim2qmi_ipv4addr(*(uint32_t *)ipv4) || mbim_verbose) { + mbim_dump_ipconfig(pInfo, "<"); + profile->ipv4.Address = mbim2qmi_ipv4addr(*(uint32_t *)ipv4); + } + + if(gw != NULL) + profile->ipv4.Gateway = mbim2qmi_ipv4addr(*(uint32_t *)gw); + profile->ipv4.SubnetMask = mbim2qmi_ipv4addr(0xFFFFFFFF>>(32-prefix)<<(32-prefix)); + if(dns1 != NULL) + profile->ipv4.DnsPrimary = mbim2qmi_ipv4addr(*(uint32_t *)dns1); + if(dns2 != NULL) + profile->ipv4.DnsSecondary = mbim2qmi_ipv4addr(*(uint32_t *)dns2); + profile->ipv4.Mtu = mbim2qmi_ipv4addr(mtu); + } + + /* IPv6 network configration */ + if (le32toh(pInfo->IPv6ConfigurationAvailable)&0x1) { + gw = NULL; dns1 = NULL; dns2 = NULL; + MBIM_IPV6_ELEMENT_T *pAddress = (MBIM_IPV6_ELEMENT_T *)(&pInfo->DataBuffer[le32toh(pInfo->IPv6AddressOffset)-sizeof(MBIM_IP_CONFIGURATION_INFO_T)]); + prefix = le32toh(pAddress->OnLinkPrefixLength); + ipv6 = pAddress->IPv6Address; + + if (le32toh(pInfo->IPv6ConfigurationAvailable)&0x2) + gw = (UINT8 *)(&pInfo->DataBuffer[le32toh(pInfo->IPv6GatewayOffset)-sizeof(MBIM_IP_CONFIGURATION_INFO_T)]); + + if (le32toh(pInfo->IPv6ConfigurationAvailable)&0x4) { + dns1 = (UINT8 *)(&pInfo->DataBuffer[le32toh(pInfo->IPv6DnsServerOffset)-sizeof(MBIM_IP_CONFIGURATION_INFO_T)]); + if (le32toh(pInfo->IPv6DnsServerCount) == 2) + dns2 = dns1 + 16; + } + + if (le32toh(pInfo->IPv6ConfigurationAvailable)&0x8) + mtu = le32toh(pInfo->IPv6Mtu); + + if(ipv6 != NULL) + mbim2qmi_ipv6addr(ipv6, profile->ipv6.Address); + if(gw != NULL) + mbim2qmi_ipv6addr(gw, profile->ipv6.Gateway); + if(dns1 != NULL) + mbim2qmi_ipv6addr(dns1, profile->ipv6.DnsPrimary); + if(dns2 != NULL) + mbim2qmi_ipv6addr(dns2, profile->ipv6.DnsSecondary); + profile->ipv6.PrefixLengthIPAddr = prefix; + profile->ipv6.PrefixLengthGateway = prefix; + profile->ipv6.Mtu = mbim2qmi_ipv4addr(mtu); + } + } + return err; +} + +int mbim_proxy_configure(const char *dev) { + MBIM_MESSAGE_HEADER *pRequest = NULL; + MBIM_COMMAND_DONE_T *pCmdDone = NULL; + MBIM_LIBQMI_PROXY_CONFIG_T *cfg; + int err; + + pRequest = compose_basic_connect_command( + MBIM_CID_PROXY_CONTROL_CONFIGURATION, + MBIM_CID_CMD_TYPE_SET, + NULL, + sizeof(*cfg) + strlen(dev)*2); + if (pRequest) { + memcpy(((MBIM_COMMAND_MSG_T *)pRequest)->DeviceServiceId.uuid, str2uuid(UUID_LIBMBIM_PROXY), 16); + cfg = (MBIM_LIBQMI_PROXY_CONFIG_T *)((MBIM_COMMAND_MSG_T *)pRequest)->InformationBuffer; + + cfg->DevicePathOffset = sizeof(*cfg); + cfg->DevicePathSize = char2wchar(dev, strlen(dev), cfg->DataBuffer, strlen(dev)*2); + cfg->Timeout = 15; + } + + err = mbim_send_command(pRequest, &pCmdDone, mbim_default_timeout); + mbim_check_err(err, pRequest, pCmdDone); + + mbim_free(pRequest); mbim_free(pCmdDone); + return err; +} + +static int mbim_update_state(void) { + int chages = 0; + + if (oldReadyState != ReadyState) { + mbim_debug("SubscriberReadyState %s -> %s ", MBIMSubscriberReadyStateStr(oldReadyState), MBIMSubscriberReadyStateStr(ReadyState)); + oldReadyState = ReadyState; chages++; + } + if (oldRegisterState != RegisterState) { + mbim_debug("RegisterState %s -> %s ", MBIMRegisterStateStr(oldRegisterState), MBIMRegisterStateStr(RegisterState)); + oldRegisterState = RegisterState; chages++; + } + if (oldPacketServiceState != PacketServiceState) { + mbim_debug("PacketServiceState %s -> %s ", MBIMPacketServiceStateStr(oldPacketServiceState), MBIMPacketServiceStateStr(PacketServiceState)); + oldPacketServiceState = PacketServiceState; chages++; + } + if (oldActivationState != ActivationState) { + mbim_debug("ActivationState %s -> %s ", MBIMActivationStateStr(oldActivationState), MBIMActivationStateStr(ActivationState)); + oldActivationState = ActivationState; chages++; + } + + return chages; +} + +static int mbim_init(PROFILE_T *profile) { + int retval; + int t = 0; + + if (profile->proxy[0] && !strcmp(profile->proxy, LIBMBIM_PROXY)) { + retval = mbim_proxy_configure(profile->qmichannel); + if (retval) goto exit; + } + + while (t++ < 10) { + retval = mbim_open_device(4096); + if (retval != ETIMEDOUT) + break; + } + if (retval) goto exit; + retval = mbim_device_caps_query(profile); + if (retval) goto exit; + mbim_update_state(); + retval = mbim_device_services_query(); + if (retval) goto exit; + mbim_update_state(); + retval = mbim_set_radio_state(MBIMRadioOn); + if (retval) goto exit; + mbim_update_state(); + + if (qmi_over_mbim_support) { + if (!socketpair( AF_LOCAL, SOCK_STREAM, 0, qmi_over_mbim_sk)) { + qmidev_send = qmi_over_mbim_qmidev_send; +#ifdef CONFIG_CELLINFO //by now, only this function need QMI OVER MBIM + qmi_over_mbim_nas = qmi_over_mbim_get_client_id(QMUX_TYPE_NAS); +#endif + } + } + + return 0; + +exit: + return retval; +} + +static int mbim_deinit(void) { + if (qmi_over_mbim_nas) { + qmi_over_mbim_release_client_id(QMUX_TYPE_NAS, qmi_over_mbim_nas); + qmi_over_mbim_nas = 0; + } + + mbim_close_device(); + + if (qmi_over_mbim_sk[0] != -1) { + close(qmi_over_mbim_sk[0]); + close(qmi_over_mbim_sk[1]); + } + + return 0; +} + +const struct qmi_device_ops mbim_dev_ops = { + .init = mbim_init, + .deinit = mbim_deinit, + .read = mbim_read_thread, +}; + +static int requestBaseBandVersion(PROFILE_T *profile) { + (void)profile; + return 0; +} + +static int requestGetSIMStatus(SIM_Status *pSIMStatus) +{ + int retval; + + *pSIMStatus = SIM_ABSENT; + retval = mbim_subscriber_status_query(); + if (retval) + goto exit; + mbim_update_state(); + + switch(ReadyState) { + case MBIMSubscriberReadyStateNotInitialized: *pSIMStatus = SIM_NOT_READY; break; + case MBIMSubscriberReadyStateInitialized: *pSIMStatus = SIM_READY; break; + case MBIMSubscriberReadyStateSimNotInserted: *pSIMStatus = SIM_ABSENT; break; + case MBIMSubscriberReadyStateBadSim: *pSIMStatus = SIM_BAD; break; + case MBIMSubscriberReadyStateFailure: *pSIMStatus = SIM_ABSENT; break; + case MBIMSubscriberReadyStateNotActivated: *pSIMStatus = SIM_ABSENT; break; + case MBIMSubscriberReadyStateDeviceLocked: *pSIMStatus = SIM_PIN; break; + default: *pSIMStatus = SIM_ABSENT; break; + } + +exit: + return retval; +} + +static int requestRegistrationState(UCHAR *pPSAttachedState) { + int retval; + + *pPSAttachedState = 0; + retval = mbim_register_state_query(); + if (retval) + goto exit; + mbim_update_state(); + + switch (RegisterState) { + case MBIMRegisterStateUnknown: *pPSAttachedState = 0; break; + case MBIMRegisterStateDeregistered: *pPSAttachedState = 0; break; + case MBIMRegisterStateSearching: *pPSAttachedState = 0; break; + case MBIMRegisterStateHome: *pPSAttachedState = 1; break; + case MBIMRegisterStateRoaming: *pPSAttachedState = 1; break; + case MBIMRegisterStatePartner: *pPSAttachedState = 0; break; + case MBIMRegisterStateDenied: *pPSAttachedState = 0; break; + default: *pPSAttachedState = 0; break; + } + + if (*pPSAttachedState == 0) + goto exit; + + retval = mbim_packet_service_query(); + if (retval) + goto exit; + + switch (PacketServiceState) { + case MBIMPacketServiceStateUnknown: *pPSAttachedState = 0; break; + case MBIMPacketServiceStateAttaching: *pPSAttachedState = 0; break; + case MBIMPacketServiceStateAttached: *pPSAttachedState = 1; break; + case MBIMPacketServiceStateDetaching: *pPSAttachedState = 0; break; + case MBIMPacketServiceStateDetached: *pPSAttachedState = 0; break; + default: *pPSAttachedState = 0; break; + } + + if (*pPSAttachedState == 0) + mbim_packet_service_set(MBIMPacketServiceActionAttach); + +exit: + return retval; +} + +static int requestSetupDataCall(PROFILE_T *profile, int curIpFamily) { + int retval; + + (void)curIpFamily; + + if (profile->apn) + mbim_apn = profile->apn; + if (profile->user) + mbim_user = profile->user; + if (profile->password) + mbim_passwd = profile->password; + if (profile->auth) + mbim_auth = profile->auth; + if (profile->enable_ipv4) + mbim_iptype = MBIMContextIPTypeIPv4; + if (profile->enable_ipv6) + mbim_iptype = MBIMContextIPTypeIPv6; + if (profile->enable_ipv4 && profile->enable_ipv6) + mbim_iptype = MBIMContextIPTypeIPv4AndIPv6; + + retval = mbim_set_connect(1, mbim_sessionID); + if (retval) + goto exit; + +exit: + return retval; +} + +static int requestQueryDataCall(UCHAR *pConnectionStatus, int curIpFamily) { + int retval; + + (void)curIpFamily; + + *pConnectionStatus = QWDS_PKT_DATA_DISCONNECTED; + + retval = mbim_query_connect(mbim_sessionID); + if (retval) + goto exit; + + switch(ActivationState) { + case MBIMActivationStateUnknown: *pConnectionStatus = QWDS_PKT_DATA_UNKNOW; break; + case MBIMActivationStateActivated: *pConnectionStatus = QWDS_PKT_DATA_CONNECTED; break; + case MBIMActivationStateActivating: *pConnectionStatus = QWDS_PKT_DATA_DISCONNECTED; break; + case MBIMActivationStateDeactivated: *pConnectionStatus = QWDS_PKT_DATA_DISCONNECTED; break; + case MBIMActivationStateDeactivating: *pConnectionStatus = QWDS_PKT_DATA_DISCONNECTED; break; + default: *pConnectionStatus = QWDS_PKT_DATA_DISCONNECTED; break; + } + +exit: + return retval; +} + +static int requestDeactivateDefaultPDP(PROFILE_T *profile, int curIpFamily) { + int retval; + + (void)profile; + (void)curIpFamily; + + retval = mbim_set_connect(0, mbim_sessionID); + if (retval) + goto exit; + +exit: + return retval; +} + +static int requestGetIPAddress(PROFILE_T *profile, int curIpFamily) { + int retval; + + (void)curIpFamily; + retval = mbim_ip_config(profile, mbim_sessionID); + if (retval) + goto exit; + +exit: + return retval; +} + +#ifdef CONFIG_CELLINFO +static int requestGetCellInfoList(void) { + if (qmi_over_mbim_nas) { + if (qmi_request_ops.requestGetCellInfoList) + return qmi_request_ops.requestGetCellInfoList(); + } + + return 0; +} +#endif + +const struct request_ops mbim_request_ops = { + .requestBaseBandVersion = requestBaseBandVersion, + .requestGetSIMStatus = requestGetSIMStatus, + .requestRegistrationState = requestRegistrationState, + .requestSetupDataCall = requestSetupDataCall, + .requestQueryDataCall = requestQueryDataCall, + .requestDeactivateDefaultPDP = requestDeactivateDefaultPDP, + .requestGetIPAddress = requestGetIPAddress, +#ifdef CONFIG_CELLINFO + .requestGetCellInfoList = requestGetCellInfoList, +#endif +}; + +int qmi_over_mbim_qmidev_send(PQCQMIMSG pQMI) { + MBIM_MESSAGE_HEADER *pRequest = NULL; + MBIM_COMMAND_DONE_T *pCmdDone = NULL; + int err; + size_t len = le16toh(pQMI->QMIHdr.Length) + 1; + + if (pQMI->QMIHdr.QMIType != QMUX_TYPE_CTL) { + if (pQMI->QMIHdr.QMIType == QMUX_TYPE_NAS) + pQMI->QMIHdr.ClientId = qmi_over_mbim_nas; + + if (pQMI->QMIHdr.ClientId == 0) { + dbg_time("QMIType %d has no clientID", pQMI->QMIHdr.QMIType); + return -ENODEV; + } + } + + pRequest = compose_qmi_over_mbim_command(1, MBIM_CID_CMD_TYPE_SET, pQMI, len); + err = mbim_send_command(pRequest, &pCmdDone, mbim_default_timeout); + mbim_check_err(err, pRequest, pCmdDone); + + err = -1; + len = le32toh(pCmdDone->InformationBufferLength); + if (len) { + if (write(qmi_over_mbim_sk[0], pCmdDone->InformationBuffer, len) == (long)len) { + err = 0; + }; + } + + mbim_free(pRequest); mbim_free(pCmdDone); + return err; +} diff --git a/package/wwan/driver/quectel_cm_5G/src/qendian.h b/package/wwan/driver/quectel_cm_5G/src/qendian.h new file mode 100644 index 000000000..ba9b7668b --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/qendian.h @@ -0,0 +1,52 @@ +#ifndef __QUECTEL_ENDIAN_H__ +#define __QUECTEL_ENDIAN_H__ +#include + +#ifndef htole32 +#if __BYTE_ORDER == __LITTLE_ENDIAN +#define htole16(x) (uint16_t)(x) +#define le16toh(x) (uint16_t)(x) +#define letoh16(x) (uint16_t)(x) +#define htole32(x) (uint32_t)(x) +#define le32toh(x) (uint32_t)(x) +#define letoh32(x) (uint32_t)(x) +#define htole64(x) (uint64_t)(x) +#define le64toh(x) (uint64_t)(x) +#define letoh64(x) (uint64_t)(x) +#else +static __inline uint16_t __bswap16(uint16_t __x) { + return (__x<<8) | (__x>>8); +} + +static __inline uint32_t __bswap32(uint32_t __x) { + return (__x>>24) | (__x>>8&0xff00) | (__x<<8&0xff0000) | (__x<<24); +} + +static __inline uint64_t __bswap64(uint64_t __x) { + return (__bswap32(__x)+0ULL<<32) | (__bswap32(__x>>32)); +} + +#define htole16(x) __bswap16(x) +#define le16toh(x) __bswap16(x) +#define letoh16(x) __bswap16(x) +#define htole32(x) __bswap32(x) +#define le32toh(x) __bswap32(x) +#define letoh32(x) __bswap32(x) +#define htole64(x) __bswap64(x) +#define le64toh(x) __bswap64(x) +#define letoh64(x) __bswap64(x) +#endif +#endif + +#define le16_to_cpu(x) le16toh((uint16_t)(x)) +#define le32_to_cpu(x) le32toh((uint32_t)(x)) +#define le64_to_cpu(x) le64toh((uint64_t)(x)) +#define cpu_to_le16(x) htole16((uint16_t)(x)) +#define cpu_to_le32(x) htole32((uint32_t)(x)) +#define cpu_to_le64(x) htole64((uint64_t)(x)) + +static __inline uint32_t ql_swap32(uint32_t __x) { + return (__x>>24) | (__x>>8&0xff00) | (__x<<8&0xff0000) | (__x<<24); +} +#endif //__QUECTEL_ENDIAN_H__ + diff --git a/package/wwan/driver/quectel_cm_5G/src/qlist.h b/package/wwan/driver/quectel_cm_5G/src/qlist.h new file mode 100644 index 000000000..4fe86ba84 --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/qlist.h @@ -0,0 +1,38 @@ +#ifndef __QUECTEL_LIST_H__ +#define __QUECTEL_LIST_H__ +struct qlistnode +{ + struct qlistnode *next; + struct qlistnode *prev; +}; + +#define qnode_to_item(node, container, member) \ + (container *) (((char*) (node)) - offsetof(container, member)) + +#define qlist_for_each(node, list) \ + for (node = (list)->next; node != (list); node = node->next) + +#define qlist_empty(list) ((list) == (list)->next) +#define qlist_head(list) ((list)->next) +#define qlist_tail(list) ((list)->prev) + +static void qlist_init(struct qlistnode *node) +{ + node->next = node; + node->prev = node; +} + +static void qlist_add_tail(struct qlistnode *head, struct qlistnode *item) +{ + item->next = head; + item->prev = head->prev; + head->prev->next = item; + head->prev = item; +} + +static void qlist_remove(struct qlistnode *item) +{ + item->next->prev = item->prev; + item->prev->next = item->next; +} +#endif \ No newline at end of file diff --git a/package/wwan/driver/quectel_cm_5G/src/qmap_bridge_mode.c b/package/wwan/driver/quectel_cm_5G/src/qmap_bridge_mode.c new file mode 100644 index 000000000..f6c81cb93 --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/qmap_bridge_mode.c @@ -0,0 +1,402 @@ +/****************************************************************************** + @file qmap_bridge_mode.c + @brief Connectivity bridge manager. + + DESCRIPTION + Connectivity Management Tool for USB network adapter of Quectel wireless cellular modules. + + INITIALIZATION AND SEQUENCING REQUIREMENTS + None. + + --------------------------------------------------------------------------- + Copyright (c) 2016 - 2020 Quectel Wireless Solution, Co., Ltd. All Rights Reserved. + Quectel Wireless Solution Proprietary and Confidential. + --------------------------------------------------------------------------- +******************************************************************************/ +#include "QMIThread.h" + +static size_t ql_fread(const char *filename, void *buf, size_t size) { + FILE *fp = fopen(filename , "r"); + size_t n = 0; + + memset(buf, 0x00, size); + + if (fp) { + n = fread(buf, 1, size, fp); + if (n <= 0 || n == size) { + dbg_time("warnning: fail to fread(%s), fread=%zu, buf_size=%zu: (%s)", filename, n, size, strerror(errno)); + } + fclose(fp); + } + + return n > 0 ? n : 0; +} + +static size_t ql_fwrite(const char *filename, const void *buf, size_t size) { + FILE *fp = fopen(filename , "w"); + size_t n = 0; + + if (fp) { + n = fwrite(buf, 1, size, fp); + if (n != size) { + dbg_time("warnning: fail to fwrite(%s), fwrite=%zu, buf_size=%zu: (%s)", filename, n, size, strerror(errno)); + } + fclose(fp); + } + + return n > 0 ? n : 0; +} + +int ql_bridge_mode_detect(PROFILE_T *profile) { + const char *ifname = profile->qmapnet_adapter[0] ? profile->qmapnet_adapter : profile->usbnet_adapter; + const char *driver; + char bridge_mode[128]; + char bridge_ipv4[128]; + char ipv4[128]; + char buf[64]; + size_t n; + int in_bridge = 0; + + driver = profile->driver_name; + snprintf(bridge_mode, sizeof(bridge_mode), "/sys/class/net/%s/bridge_mode", ifname); + snprintf(bridge_ipv4, sizeof(bridge_ipv4), "/sys/class/net/%s/bridge_ipv4", ifname); + + if (access(bridge_ipv4, R_OK)) { + if (errno != ENOENT) { + dbg_time("fail to access %s, errno: %d (%s)", bridge_mode, errno, strerror(errno)); + return 0; + } + + snprintf(bridge_mode, sizeof(bridge_mode), "/sys/module/%s/parameters/bridge_mode", driver); + snprintf(bridge_ipv4, sizeof(bridge_ipv4), "/sys/module/%s/parameters/bridge_ipv4", driver); + + if (access(bridge_mode, R_OK)) { + if (errno != ENOENT) { + dbg_time("fail to access %s, errno: %d (%s)", bridge_mode, errno, strerror(errno)); + } + return 0; + } + } + + n = ql_fread(bridge_mode, buf, sizeof(buf)); + if (n > 0) { + in_bridge = (buf[0] != '0'); + } + if (!in_bridge) + return 0; + + memset(ipv4, 0, sizeof(ipv4)); + + if (strstr(bridge_ipv4, "/sys/class/net/") || profile->qmap_mode == 0 || profile->qmap_mode == 1) { + snprintf(ipv4, sizeof(ipv4), "0x%x", profile->ipv4.Address); + dbg_time("echo '%s' > %s", ipv4, bridge_ipv4); + ql_fwrite(bridge_ipv4, ipv4, strlen(ipv4)); + } + else { + snprintf(ipv4, sizeof(ipv4), "0x%x:%d", profile->ipv4.Address, profile->muxid); + dbg_time("echo '%s' > %s", ipv4, bridge_ipv4); + ql_fwrite(bridge_ipv4, ipv4, strlen(ipv4)); + } + + return in_bridge; +} + +int ql_enable_qmi_wwan_rawip_mode(PROFILE_T *profile) { + char filename[256]; + char buf[4]; + size_t n; + FILE *fp; + + if (!qmidev_is_qmiwwan(profile->qmichannel)) + return 0; + + snprintf(filename, sizeof(filename), "/sys/class/net/%s/qmi/rawip", profile->usbnet_adapter); + n = ql_fread(filename, buf, sizeof(buf)); + + if (n == 0) + return 0; + + if (buf[0] == '1' || buf[0] == 'Y') + return 0; + + fp = fopen(filename , "w"); + if (fp == NULL) { + dbg_time("Fail to fopen(%s, \"w\"), errno: %d (%s)", filename, errno, strerror(errno)); + return 1; + } + + buf[0] = 'Y'; + n = fwrite(buf, 1, 1, fp); + if (n != 1) { + dbg_time("Fail to fwrite(%s), errno: %d (%s)", filename, errno, strerror(errno)); + fclose(fp); + return 1; + } + fclose(fp); + + return 0; +} + +int ql_driver_type_detect(PROFILE_T *profile) { + if (qmidev_is_gobinet(profile->qmichannel)) { + profile->qmi_ops = &gobi_qmidev_ops; + } + else { + profile->qmi_ops = &qmiwwan_qmidev_ops; + } + qmidev_send = profile->qmi_ops->send; + + return 0; +} + +void ql_set_driver_bridge_mode(PROFILE_T *profile) { + char enable[16]; + char filename[256]; + + if(profile->qmap_mode) + snprintf(filename, sizeof(filename), "/sys/class/net/%s/bridge_mode", profile->qmapnet_adapter); + else + snprintf(filename, sizeof(filename), "/sys/class/net/%s/bridge_mode", profile->usbnet_adapter); + snprintf(enable, sizeof(enable), "%02d\n", profile->enable_bridge); + ql_fwrite(filename, enable, sizeof(enable)); +} + +static int ql_qmi_qmap_mode_detect(PROFILE_T *profile) { + char buf[128]; + int n; + struct { + char filename[255 * 2]; + char linkname[255 * 2]; + } *pl; + + pl = (typeof(pl)) malloc(sizeof(*pl)); + + snprintf(pl->linkname, sizeof(pl->linkname), "/sys/class/net/%s/device/driver", profile->usbnet_adapter); + n = readlink(pl->linkname, pl->filename, sizeof(pl->filename)); + pl->filename[n] = '\0'; + while (pl->filename[n] != '/') + n--; + strncpy(profile->driver_name, &pl->filename[n+1], sizeof(profile->driver_name) - 1); + + ql_get_driver_rmnet_info(profile, &profile->rmnet_info); + if (profile->rmnet_info.size) { + profile->qmap_mode = profile->rmnet_info.qmap_mode; + if (profile->qmap_mode) { + int offset_id = (profile->muxid == 0)? profile->pdp - 1 : profile->muxid - 0x81; + + if (profile->qmap_mode == 1) + offset_id = 0; + profile->muxid = profile->rmnet_info.mux_id[offset_id]; + strncpy(profile->qmapnet_adapter, profile->rmnet_info.ifname[offset_id], sizeof(profile->qmapnet_adapter) - 1); + profile->qmap_size = profile->rmnet_info.rx_urb_size; + profile->qmap_version = profile->rmnet_info.qmap_version; + } + + goto _out; + } + + snprintf(pl->filename, sizeof(pl->filename), "/sys/class/net/%s/qmap_mode", profile->usbnet_adapter); + if (access(pl->filename, R_OK)) { + if (errno != ENOENT) { + dbg_time("fail to access %s, errno: %d (%s)", pl->filename, errno, strerror(errno)); + goto _out; + } + + snprintf(pl->filename, sizeof(pl->filename), "/sys/module/%s/parameters/qmap_mode", profile->driver_name); + if (access(pl->filename, R_OK)) { + if (errno != ENOENT) { + dbg_time("fail to access %s, errno: %d (%s)", pl->filename, errno, strerror(errno)); + goto _out; + } + + snprintf(pl->filename, sizeof(pl->filename), "/sys/class/net/%s/device/driver/module/parameters/qmap_mode", profile->usbnet_adapter); + if (access(pl->filename, R_OK)) { + if (errno != ENOENT) { + dbg_time("fail to access %s, errno: %d (%s)", pl->filename, errno, strerror(errno)); + goto _out; + } + } + } + } + + if (!access(pl->filename, R_OK)) { + n = ql_fread(pl->filename, buf, sizeof(buf)); + if (n > 0) { + profile->qmap_mode = atoi(buf); + + if (profile->qmap_mode > 1) { + if(!profile->muxid) + profile->muxid = profile->pdp + 0x80; //muxis is 0x8X for PDN-X + snprintf(profile->qmapnet_adapter, sizeof(profile->qmapnet_adapter), + "%.16s.%d", profile->usbnet_adapter, profile->muxid - 0x80); + } if (profile->qmap_mode == 1) { + profile->muxid = 0x81; + strncpy(profile->qmapnet_adapter, profile->usbnet_adapter, sizeof(profile->qmapnet_adapter)); + } + } + } + else if (qmidev_is_qmiwwan(profile->qmichannel)) { + snprintf(pl->filename, sizeof(pl->filename), "/sys/class/net/qmimux%d", profile->pdp - 1); + if (access(pl->filename, R_OK)) { + if (errno != ENOENT) { + dbg_time("fail to access %s, errno: %d (%s)", pl->filename, errno, strerror(errno)); + } + goto _out; + } + + //upstream Kernel Style QMAP qmi_wwan.c + snprintf(pl->filename, sizeof(pl->filename), "/sys/class/net/%s/qmi/add_mux", profile->usbnet_adapter); + n = ql_fread(pl->filename, buf, sizeof(buf)); + if (n >= 5) { + dbg_time("If use QMAP by /sys/class/net/%s/qmi/add_mux", profile->usbnet_adapter); + #if 1 + dbg_time("Please set mtu of wwan0 >= max dl qmap packet size"); + #else + dbg_time("File:%s Line:%d Please make sure add next patch to qmi_wwan.c", __func__, __LINE__); + /* + diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c + index 74bebbd..db8a777 100644 + --- a/drivers/net/usb/qmi_wwan.c + +++ b/drivers/net/usb/qmi_wwan.c + @@ -379,6 +379,24 @@ static ssize_t add_mux_store(struct device *d, struct device_attribute *attr, c + if (!ret) { + info->flags |= QMI_WWAN_FLAG_MUX; + ret = len; + +#if 1 //Add by Quectel + + if (le16_to_cpu(dev->udev->descriptor.idVendor) == 0x2c7c) { + + int idProduct = le16_to_cpu(dev->udev->descriptor.idProduct); + + + + if (idProduct == 0x0121 || idProduct == 0x0125 || idProduct == 0x0435) //MDM9x07 + + dev->rx_urb_size = 4*1024; + + else if (idProduct == 0x0306) //MDM9x40 + + dev->rx_urb_size = 16*1024; + + else if (idProduct == 0x0512) //SDX20 + + dev->rx_urb_size = 32*1024; + + else if (idProduct == 0x0620) //SDX24 + + dev->rx_urb_size = 32*1024; + + else if (idProduct == 0x0800) //SDX55 + + dev->rx_urb_size = 32*1024; + + else + + dev->rx_urb_size = 32*1024; + + } + +#endif + } + err: + rtnl_unlock(); + */ + #endif + profile->qmap_mode = n/5; //0x11\n0x22\n0x33\n + if (profile->qmap_mode > 1) { + //PDN-X map to qmimux-X + if(!profile->muxid) { + profile->muxid = (buf[5*(profile->pdp - 1) + 2] - '0')*16 + (buf[5*(profile->pdp - 1) + 3] - '0'); + snprintf(profile->qmapnet_adapter, sizeof(profile->qmapnet_adapter), "qmimux%d", profile->pdp - 1); + } else { + profile->muxid = (buf[5*(profile->muxid - 0x81) + 2] - '0')*16 + (buf[5*(profile->muxid - 0x81) + 3] - '0'); + snprintf(profile->qmapnet_adapter, sizeof(profile->qmapnet_adapter), "qmimux%d", profile->muxid - 0x81); + } + } else if (profile->qmap_mode == 1) { + profile->muxid = (buf[5*0 + 2] - '0')*16 + (buf[5*0 + 3] - '0'); + snprintf(profile->qmapnet_adapter, sizeof(profile->qmapnet_adapter), + "qmimux%d", 0); + } + } + } + +_out: + if (profile->qmap_mode) { + if (profile->qmap_size == 0) { + profile->qmap_size = 16*1024; + snprintf(pl->filename, sizeof(pl->filename), "/sys/class/net/%s/qmap_size", profile->usbnet_adapter); + if (!access(pl->filename, R_OK)) { + size_t n; + char buf[32]; + n = ql_fread(pl->filename, buf, sizeof(buf)); + if (n > 0) { + profile->qmap_size = atoi(buf); + } + } + } + + if (profile->qmap_version == 0) { + profile->qmap_version = WDA_DL_DATA_AGG_QMAP_ENABLED; + } + + dbg_time("qmap_mode = %d, qmap_version = %d, qmap_size = %d, muxid = 0x%02x, qmap_netcard = %s", + profile->qmap_mode, profile->qmap_version, profile->qmap_size, profile->muxid, profile->qmapnet_adapter); + } + ql_set_driver_bridge_mode(profile); + free(pl); + + return 0; +} + +static int ql_mbim_usb_vlan_mode_detect(PROFILE_T *profile) { + char tmp[128]; + + snprintf(tmp, sizeof(tmp), "/sys/class/net/%s.%d", profile->usbnet_adapter, profile->pdp); + if (!access(tmp, F_OK)) { + profile->qmap_mode = 4; + profile->muxid = profile->pdp; + no_trunc_strncpy(profile->qmapnet_adapter, tmp + strlen("/sys/class/net/"), sizeof(profile->qmapnet_adapter) - 1); + + dbg_time("mbim_qmap_mode = %d, vlan_id = 0x%02x, qmap_netcard = %s", + profile->qmap_mode, profile->muxid, profile->qmapnet_adapter); + } + + return 0; +} + +static int ql_mbim_mhi_qmap_mode_detect(PROFILE_T *profile) { + ql_get_driver_rmnet_info(profile, &profile->rmnet_info); + if (profile->rmnet_info.size) { + profile->qmap_mode = profile->rmnet_info.qmap_mode; + if (profile->qmap_mode) { + int offset_id = profile->pdp - 1; + + if (profile->qmap_mode == 1) + offset_id = 0; + profile->muxid = profile->pdp; + strcpy(profile->qmapnet_adapter, profile->rmnet_info.ifname[offset_id]); + profile->qmap_size = profile->rmnet_info.rx_urb_size; + profile->qmap_version = profile->rmnet_info.qmap_version; + + dbg_time("mbim_qmap_mode = %d, vlan_id = 0x%02x, qmap_netcard = %s", + profile->qmap_mode, profile->muxid, profile->qmapnet_adapter); + } + + goto _out; + } + +_out: + return 0; +} + +int ql_qmap_mode_detect(PROFILE_T *profile) { + if (profile->software_interface == SOFTWARE_MBIM) { + if (profile->hardware_interface == HARDWARE_USB) + return ql_mbim_usb_vlan_mode_detect(profile); + else if (profile->hardware_interface == HARDWARE_PCIE) + return ql_mbim_mhi_qmap_mode_detect(profile); + } else if (profile->software_interface == SOFTWARE_QMI) { + return ql_qmi_qmap_mode_detect(profile); + } +#ifdef CONFIG_QRTR + else if(profile->software_interface == SOFTWARE_QRTR) { + char tmp[128]; + + profile->qmap_mode = 4; + profile->qmap_version = WDA_DL_DATA_AGG_QMAP_V5_ENABLED; + profile->qmap_size = 31*1024; + profile->muxid = 0x80 | profile->pdp; + snprintf(profile->qmapnet_adapter, sizeof(profile->qmapnet_adapter), "rmnet_data%d", profile->muxid&0xF); + + snprintf(tmp, sizeof(tmp), "/sys/class/net/%s", profile->qmapnet_adapter); + if (access(tmp, F_OK)) { + rtrmnet_ctl_create_vnd(profile->usbnet_adapter, profile->qmapnet_adapter, + profile->muxid, profile->qmap_version, 11, 4096); + } + } +#endif + return 0; +} diff --git a/package/wwan/driver/quectel_cm_5G/src/qrtr.c b/package/wwan/driver/quectel_cm_5G/src/qrtr.c new file mode 100644 index 000000000..111a6b2ce --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/qrtr.c @@ -0,0 +1,657 @@ +//https://github.com/andersson/qrtr +/****************************************************************************** + @file QrtrCM.c + @brief GobiNet driver. + + DESCRIPTION + Connectivity Management Tool for USB network adapter of Quectel wireless cellular modules. + + INITIALIZATION AND SEQUENCING REQUIREMENTS + None. + + --------------------------------------------------------------------------- + Copyright (c) 2016 - 2020 Quectel Wireless Solution, Co., Ltd. All Rights Reserved. + Quectel Wireless Solution Proprietary and Confidential. + --------------------------------------------------------------------------- +******************************************************************************/ +#include +#include +#include +#include +#include +#include "QMIThread.h" + +typedef struct { + uint32_t service; + uint32_t version; + uint32_t instance; + uint32_t node; + uint32_t port; +} QrtrService; + +#define QRTR_MAX (QMUX_TYPE_WDS_ADMIN + 1) +static QrtrService service_list[QRTR_MAX]; +static int qmiclientId[QRTR_MAX]; +static int get_client(UCHAR QMIType); +static uint32_t node_modem = 3; //IPQ ~ 3, QCM ~ 0 + +#ifdef USE_LINUX_MSM_IPC +#include + +struct xport_ipc_router_server_addr { + uint32_t service; + uint32_t instance; + uint32_t node_id; + uint32_t port_id; +}; + +union ctl_msg { + uint32_t cmd; + struct { + uint32_t cmd; + uint32_t service; + uint32_t instance; + uint32_t node_id; + uint32_t port_id; + } srv; + struct { + uint32_t cmd; + uint32_t node_id; + uint32_t port_id; + } cli; + }; +#define CTL_CMD_NEW_SERVER 4 +#define CTL_CMD_REMOVE_SERVER 5 + +#define VERSION_MASK 0xff +#define GET_VERSION(x) (x & 0xff) +#define GET_XPORT_SVC_INSTANCE(x) GET_VERSION(x) +#define GET_INSTANCE(x) ((x & 0xff00) >> 8) + +static int msm_ipc_socket(const char *name) +{ + int sock; + int flags; + + sock = socket(AF_MSM_IPC, SOCK_DGRAM, 0); + if (sock < 0) { + dbg_time("%s(%s) errno: %d (%s)\n", __func__, name, errno, strerror(errno)); + return -1; + } + + fcntl(sock, F_SETFD, FD_CLOEXEC); + flags = fcntl(sock, F_GETFL, 0); + fcntl(sock, F_SETFL, flags | O_NONBLOCK); + + return sock; +} + +static uint32_t xport_lookup +( + int lookup_sock_fd, + uint32_t service_id, + uint32_t version +) +{ + uint32_t num_servers_found = 0; + uint32_t num_entries_to_fill = 4; + struct server_lookup_args *lookup_arg; + int i; + + lookup_arg = (struct server_lookup_args *)malloc(sizeof(*lookup_arg) + + (num_entries_to_fill * sizeof(struct msm_ipc_server_info))); + if (!lookup_arg) + { + dbg_time("%s: Malloc failed\n", __func__); + return 0; + } + + lookup_arg->port_name.service = service_id; + lookup_arg->port_name.instance = GET_XPORT_SVC_INSTANCE(version); + lookup_arg->num_entries_in_array = num_entries_to_fill; + lookup_arg->lookup_mask = VERSION_MASK; + lookup_arg->num_entries_found = 0; + if (ioctl(lookup_sock_fd, IPC_ROUTER_IOCTL_LOOKUP_SERVER, lookup_arg) < 0) + { + dbg_time("%s: Lookup failed for %08x: %08x\n", __func__, service_id, version); + free(lookup_arg); + return 0; + } + + dbg_time("%s: num_entries_found %d for type=%d instance=%d", __func__, + lookup_arg->num_entries_found, service_id, version); + num_servers_found = 0; + for (i = 0; ((i < (int)num_entries_to_fill) && (i < lookup_arg->num_entries_found)); i++) + { + QrtrService service_info[1]; + + if (lookup_arg->srv_info[i].node_id != node_modem) + continue; + num_servers_found++; + + service_info[0].service = lookup_arg->srv_info[i].service; + service_info[0].version = GET_VERSION(lookup_arg->srv_info[i].instance); + service_info[0].instance = GET_INSTANCE(lookup_arg->srv_info[i].instance); + service_info[0].node = lookup_arg->srv_info[i].node_id; + service_info[0].port = lookup_arg->srv_info[i].port_id; + + service_list[service_id] = service_info[0]; + qmiclientId[service_id] = get_client(service_id); + } + + free(lookup_arg); + return num_servers_found; +} + +static int xport_send(int sock, uint32_t node, uint32_t port, const void *data, unsigned int sz) +{ + struct sockaddr_msm_ipc addr = {}; + int rc; + + addr.family = AF_MSM_IPC; + addr.address.addrtype = MSM_IPC_ADDR_ID; + addr.address.addr.port_addr.node_id = node; + addr.address.addr.port_addr.port_id = port; + + rc = sendto(sock, data, sz, MSG_DONTWAIT, (void *)&addr, sizeof(addr)); + if (rc < 0) { + dbg_time("xport_send errno: %d (%s)\n", errno, strerror(errno)); + return -1; + } + + return 0; +} + +static int xport_recv(int sock, void *data, unsigned int sz, uint32_t *node, uint32_t *port) +{ + struct sockaddr_msm_ipc addr = {}; + socklen_t addr_size = sizeof(struct sockaddr_msm_ipc); + int rc; + + rc = recvfrom(sock, data, sz, MSG_DONTWAIT, (void *)&addr, &addr_size); + if (rc < 0) { + dbg_time("xport_recv errno: %d (%s)\n", errno, strerror(errno)); + } + else if (addr.address.addrtype != MSM_IPC_ADDR_ID) { + dbg_time("xport_recv addrtype is NOT MSM_IPC_ADDR_ID\n"); + rc = -1; + } + + *node = addr.address.addr.port_addr.node_id; + *port = addr.address.addr.port_addr.port_id; + return rc; +} +#define qmi_recv xport_recv + +static int xport_ctrl_init(void) +{ + int ctrl_sock; + int rc; + uint32_t instance = 1; //modem + uint32_t version; + + ctrl_sock = msm_ipc_socket("ctrl_port"); + if (ctrl_sock == -1) + return -1; + + rc = ioctl(ctrl_sock, IPC_ROUTER_IOCTL_GET_VERSION, &version); + if (rc < 0) { + dbg_time("%s: failed to get ipc version\n", __func__); + goto init_close_ctrl_fd; + } + dbg_time("%s ipc_version = %d", __func__, version); + + rc = ioctl(ctrl_sock, IPC_ROUTER_IOCTL_BIND_CONTROL_PORT, NULL); + if (rc < 0) { + dbg_time("%s: failed to bind as control port\n", __func__); + goto init_close_ctrl_fd; + } + + //cat /sys/kernel/debug/msm_ipc_router/dump_servers + rc = 0; + rc += xport_lookup(ctrl_sock, QMUX_TYPE_WDS, instance); + if (service_list[QMUX_TYPE_WDS].port) { + qmiclientId[QMUX_TYPE_WDS_IPV6] = get_client(QMUX_TYPE_WDS); + } + rc += xport_lookup(ctrl_sock, QMUX_TYPE_NAS, instance); + rc += xport_lookup(ctrl_sock, QMUX_TYPE_UIM, instance); + rc += xport_lookup(ctrl_sock, QMUX_TYPE_DMS, instance); + rc += xport_lookup(ctrl_sock, QMUX_TYPE_WDS_ADMIN, instance); + + if (rc == 0) { + dbg_time("%s: failed to lookup qmi service\n", __func__); + goto init_close_ctrl_fd; + } + + return ctrl_sock; + +init_close_ctrl_fd: + close(ctrl_sock); + return -1; +} + +static void handle_ctrl_pkt(int sock) { + union ctl_msg pkt; + uint32_t type; + int rc; + + rc = recvfrom(sock, &pkt, sizeof(pkt), 0, NULL, NULL); + if (rc < 0) + return; + + type = le32toh(pkt.cmd); + if (CTL_CMD_NEW_SERVER == type || CTL_CMD_REMOVE_SERVER == type) { + QrtrService s; + + s.service = le32toh(pkt.srv.service); + s.version = le32toh(pkt.srv.instance) & 0xff; + s.instance = le32toh(pkt.srv.instance) >> 8; + s.node = le32toh(pkt.srv.node_id); + s.port = le32toh(pkt.srv.port_id); + + if (debug_qmi) + dbg_time ("[qrtr] %s server on %u:%u -> service %u, version %u, instance %u", + CTL_CMD_NEW_SERVER == type ? "add" : "remove", + s.node, s.port, s.service, s.version, s.instance); + + if (CTL_CMD_NEW_SERVER == type) { + if (s.service < QRTR_MAX) { + service_list[s.service] = s; + } + } + else if (CTL_CMD_REMOVE_SERVER == type) { + if (s.service < QRTR_MAX) { + memset(&service_list[s.service], 0, sizeof(QrtrService)); + } + } + } +} +#else +#include +#include "qrtr.h" +#endif + +static int qrtr_socket(void) +{ + struct sockaddr_qrtr sq; + socklen_t sl = sizeof(sq); + int sock; + int rc; + + sock = socket(AF_QIPCRTR, SOCK_DGRAM, 0); + if (sock < 0) { + dbg_time("qrtr_socket errno: %d (%s)\n", errno, strerror(errno)); + return -1; + } + + rc = getsockname(sock, (void *)&sq, &sl); + if (rc || sq.sq_family != AF_QIPCRTR || sl != sizeof(sq)) { + dbg_time("getsockname: %d (%s)\n", errno, strerror(errno)); + close(sock); + return -1; + } + + return sock; +} + +static int qrtr_send(int sock, uint32_t node, uint32_t port, const void *data, unsigned int sz) +{ + struct sockaddr_qrtr sq = {}; + int rc; + + sq.sq_family = AF_QIPCRTR; + sq.sq_node = node; + sq.sq_port = port; + + rc = sendto(sock, data, sz, MSG_DONTWAIT, (void *)&sq, sizeof(sq)); + if (rc < 0) { + dbg_time("sendto errno: %d (%s)\n", errno, strerror(errno)); + return -1; + } + + return 0; +} + +static int qrtr_recv(int sock, void *data, unsigned int sz, uint32_t *node, uint32_t *port) +{ + struct sockaddr_qrtr sq = {}; + socklen_t sl = sizeof(sq); + int rc; + + rc = recvfrom(sock, data, sz, MSG_DONTWAIT, (void *)&sq, &sl); + if (rc < 0) { + dbg_time("qrtr_recv errno: %d (%s)\n", errno, strerror(errno)); + } + + *node = sq.sq_node; + *port = sq.sq_port; + return rc; + } +#define qmi_recv qrtr_recv + +static int qrtr_ctrl_init(void) { + int sock; + int rc; + struct qrtr_ctrl_pkt pkt; + struct sockaddr_qrtr sq; + socklen_t sl = sizeof(sq); + + sock = qrtr_socket(); + if (sock == -1) + return -1; + + memset(&pkt, 0, sizeof(pkt)); + pkt.cmd = htole32(QRTR_TYPE_NEW_LOOKUP); + + getsockname(sock, (void *)&sq, &sl); + rc = qrtr_send(sock, sq.sq_node, QRTR_PORT_CTRL, &pkt, sizeof(pkt)); + if (rc == -1) { + dbg_time("qrtr_send errno: %d (%s)\n", errno, strerror(errno)); + close(sock); + return -1; + } + + return sock; +} + +static void handle_server_change(uint32_t type, struct qrtr_ctrl_pkt *ppkt) { + struct qrtr_ctrl_pkt pkt = *ppkt; + QrtrService s; + + s.service = le32toh(pkt.server.service); + s.version = le32toh(pkt.server.instance) & 0xff; + s.instance = le32toh(pkt.server.instance) >> 8; + s.node = le32toh(pkt.server.node); + s.port = le32toh(pkt.server.port); + + if (debug_qmi) + dbg_time ("[qrtr] %s server on %u:%u -> service %u, version %u, instance %u", + QRTR_TYPE_NEW_SERVER == type ? "add" : "remove", + s.node, s.port, s.service, s.version, s.instance); + + if (s.node != node_modem) + return; //we only care modem + + if (QRTR_TYPE_NEW_SERVER == type) { + if (s.service < QRTR_MAX) { + service_list[s.service] = s; + } + } + else if (QRTR_TYPE_DEL_SERVER == type) { + if (s.service < QRTR_MAX) { + memset(&service_list[s.service], 0, sizeof(QrtrService)); + } + } + } + +static void handle_ctrl_pkt(int sock) { + struct qrtr_ctrl_pkt pkt; + struct sockaddr_qrtr sq; + socklen_t sl = sizeof(sq); + uint32_t type; + int rc; + + rc = recvfrom(sock, &pkt, sizeof(pkt), 0, (void *)&sq, &sl); + if (rc < 0) + return; + + type = le32toh(pkt.cmd); + if (debug_qmi) + dbg_time("type %u, node %u, sq.port %x, len: %d", type, sq.sq_node, sq.sq_port, rc); + + if (sq.sq_port != QRTR_PORT_CTRL) + return; + + if (QRTR_TYPE_NEW_SERVER == type || QRTR_TYPE_DEL_SERVER == type) { + handle_server_change(type, &pkt); + } +} + +static int get_client(UCHAR QMIType) { + int ClientId; + QrtrService *s = &service_list[QMIType]; + + if (!s ->service) { + dbg_time("%s service: %d for QMIType: %d", __func__, s ->service, QMIType); + return -ENODEV; + } + +#ifdef USE_LINUX_MSM_IPC + ClientId = msm_ipc_socket("xport"); +#else + ClientId = qrtr_socket(); +#endif + if (ClientId == -1) { + return 0; + } + + switch (QMIType) { + case QMUX_TYPE_WDS: dbg_time("Get clientWDS = %d", ClientId); break; + case QMUX_TYPE_DMS: dbg_time("Get clientDMS = %d", ClientId); break; + case QMUX_TYPE_NAS: dbg_time("Get clientNAS = %d", ClientId); break; + case QMUX_TYPE_QOS: dbg_time("Get clientQOS = %d", ClientId); break; + case QMUX_TYPE_WMS: dbg_time("Get clientWMS = %d", ClientId); break; + case QMUX_TYPE_PDS: dbg_time("Get clientPDS = %d", ClientId); break; + case QMUX_TYPE_UIM: dbg_time("Get clientUIM = %d", ClientId); break; + case QMUX_TYPE_WDS_ADMIN: dbg_time("Get clientWDA = %d", ClientId); + break; + default: break; + } + + return ClientId; +} + +static void handle_alloc_client(PROFILE_T *profile) { + int srv_list[] = {QMUX_TYPE_WDS, QMUX_TYPE_NAS, QMUX_TYPE_UIM, QMUX_TYPE_DMS, QMUX_TYPE_WDS_ADMIN}; + size_t i = 0, srv_ready = 0; + static int report = -1; + + if (report != -1) + return; + + for(i = 0; i < sizeof(srv_list)/sizeof(srv_list[0]); i++) { + int srv = srv_list[i]; + + if (service_list[srv].service) + srv_ready++; + else + continue; + + if (qmiclientId[srv] == 0) { + qmiclientId[srv] = get_client(srv); + + if (qmiclientId[srv] != 0) { + if (srv == QMUX_TYPE_WDS) { + qmiclientId[QMUX_TYPE_WDS_IPV6] = get_client(QMUX_TYPE_WDS); + } + else if (srv == QMUX_TYPE_WDS_ADMIN) { + profile->wda_client = qmiclientId[QMUX_TYPE_WDS_ADMIN]; + } + } + } + } + + if (srv_ready == sizeof(srv_list)/sizeof(srv_list[0])) { + if (qmiclientId[QMUX_TYPE_WDS]) { + qmidevice_send_event_to_main(RIL_INDICATE_DEVICE_CONNECTED); + } else { + qmidevice_send_event_to_main(RIL_INDICATE_DEVICE_DISCONNECTED); + } + report = 1; + } +} + +static int qmi_send(PQCQMIMSG pRequest) { + uint8_t QMIType = pRequest->QMIHdr.QMIType; + int sock; + QrtrService *s = &service_list[QMIType == QMUX_TYPE_WDS_IPV6 ? QMUX_TYPE_WDS: QMIType]; + sock = qmiclientId[QMIType]; + + pRequest->QMIHdr.ClientId = 0xaa; + if (!s ->service || !sock) { + dbg_time("%s service: %d, sock: %d for QMIType: %d", __func__, s ->service, sock, QMIType); + return -ENODEV; + } + +#ifdef USE_LINUX_MSM_IPC + return xport_send(sock, s->node, s->port, &pRequest->MUXMsg, + le16_to_cpu(pRequest->QMIHdr.Length) + 1 - sizeof(QCQMI_HDR)); +#else + return qrtr_send(sock, s->node, s->port, &pRequest->MUXMsg, + le16_to_cpu(pRequest->QMIHdr.Length) + 1 - sizeof(QCQMI_HDR)); +#endif +} + +static int qmi_deinit(void) { + unsigned int i; + + for (i = 0; i < sizeof(qmiclientId)/sizeof(qmiclientId[0]); i++) + { + if (qmiclientId[i] != 0) + { + close(qmiclientId[i]); + qmiclientId[i] = 0; + } + } + + return 0; +} + +static void * qmi_read(void *pData) { + PROFILE_T *profile = (PROFILE_T *)pData; + int ctrl_sock; + int wait_for_request_quit = 0; + +#ifdef USE_LINUX_MSM_IPC + ctrl_sock = xport_ctrl_init(); + if (ctrl_sock != -1) + qmidevice_send_event_to_main(RIL_INDICATE_DEVICE_CONNECTED); +#else + ctrl_sock = qrtr_ctrl_init(); +#endif + + if (ctrl_sock == -1) + goto _quit; + + while (1) { + struct pollfd pollfds[16] = {{qmidevice_control_fd[1], POLLIN, 0}, {ctrl_sock, POLLIN, 0}}; + int ne, ret, nevents = 2; + unsigned int i; + + for (i = 0; i < sizeof(qmiclientId)/sizeof(qmiclientId[0]); i++) + { + if (qmiclientId[i] != 0) + { + pollfds[nevents].fd = qmiclientId[i]; + pollfds[nevents].events = POLLIN; + pollfds[nevents].revents = 0; + nevents++; + } + } + + do { + ret = poll(pollfds, nevents, wait_for_request_quit ? 1000 : -1); + } while ((ret < 0) && (errno == EINTR)); + + if (ret == 0 && wait_for_request_quit) { + QmiThreadRecvQMI(NULL); //main thread may pending on QmiThreadSendQMI() + continue; + } + + if (ret <= 0) { + dbg_time("%s poll=%d, errno: %d (%s)", __func__, ret, errno, strerror(errno)); + break; + } + + for (ne = 0; ne < nevents; ne++) { + int fd = pollfds[ne].fd; + short revents = pollfds[ne].revents; + + if (revents & (POLLERR | POLLHUP | POLLNVAL)) { + dbg_time("%s poll err/hup/inval", __func__); + dbg_time("epoll fd = %d, events = 0x%04x", fd, revents); + if (fd == qmidevice_control_fd[1]) { + } else { + } + if (revents & (POLLERR | POLLHUP | POLLNVAL)) + goto _quit; + } + + if ((revents & POLLIN) == 0) + continue; + + if (fd == qmidevice_control_fd[1]) { + int triger_event; + if (read(fd, &triger_event, sizeof(triger_event)) == sizeof(triger_event)) { + //DBG("triger_event = 0x%x", triger_event); + switch (triger_event) { + case RIL_REQUEST_QUIT: + goto _quit; + break; + case SIG_EVENT_STOP: + wait_for_request_quit = 1; + break; + default: + break; + } + } + } + else if (fd == ctrl_sock) { + handle_ctrl_pkt(ctrl_sock); + handle_alloc_client(profile); + } + else + { + PQCQMIMSG pResponse = (PQCQMIMSG)cm_recv_buf; + int rc; + uint32_t sq_node = 0; + uint32_t sq_port = 0; + + rc = qmi_recv(fd, &pResponse->MUXMsg, sizeof(cm_recv_buf) - sizeof(QCQMI_HDR), &sq_node, &sq_port); + if (debug_qmi) + dbg_time("fd %d, node %u, port %x, len: %d", fd, sq_node, sq_port, rc); + + if (rc <= 0) + { + dbg_time("%s read=%d errno: %d (%s)", __func__, rc, errno, strerror(errno)); + break; + } + + for (i = 0; i < sizeof(qmiclientId)/sizeof(qmiclientId[0]); i++) + { + if (qmiclientId[i] == fd) + { + pResponse->QMIHdr.QMIType = i; + + if (service_list[i].node != sq_node || service_list[i].port != sq_port) { + continue; + } + } + } + + pResponse->QMIHdr.IFType = USB_CTL_MSG_TYPE_QMI; + pResponse->QMIHdr.Length = cpu_to_le16(rc + sizeof(QCQMI_HDR) - 1); + pResponse->QMIHdr.CtlFlags = 0x00; + pResponse->QMIHdr.ClientId = 0xaa; + + QmiThreadRecvQMI(pResponse); + } + } + } + +_quit: + qmi_deinit(); + close(ctrl_sock); + qmidevice_send_event_to_main(RIL_INDICATE_DEVICE_DISCONNECTED); + QmiThreadRecvQMI(NULL); //main thread may pending on QmiThreadSendQMI() + dbg_time("%s exit", __func__); + pthread_exit(NULL); + return NULL; +} + +const struct qmi_device_ops qrtr_qmidev_ops = { + .deinit = qmi_deinit, + .send = qmi_send, + .read = qmi_read, +}; + diff --git a/package/wwan/driver/quectel_cm_5G/src/qrtr.h b/package/wwan/driver/quectel_cm_5G/src/qrtr.h new file mode 100644 index 000000000..d1727a819 --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/qrtr.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _LINUX_QRTR_H +#define _LINUX_QRTR_H + +#include +#include + +#ifndef AF_QIPCRTR +#define AF_QIPCRTR 42 +#endif + +#define QRTR_NODE_BCAST 0xffffffffu +#define QRTR_PORT_CTRL 0xfffffffeu + +struct sockaddr_qrtr { + __kernel_sa_family_t sq_family; + __u32 sq_node; + __u32 sq_port; +}; + +enum qrtr_pkt_type { + QRTR_TYPE_DATA = 1, + QRTR_TYPE_HELLO = 2, + QRTR_TYPE_BYE = 3, + QRTR_TYPE_NEW_SERVER = 4, + QRTR_TYPE_DEL_SERVER = 5, + QRTR_TYPE_DEL_CLIENT = 6, + QRTR_TYPE_RESUME_TX = 7, + QRTR_TYPE_EXIT = 8, + QRTR_TYPE_PING = 9, + QRTR_TYPE_NEW_LOOKUP = 10, + QRTR_TYPE_DEL_LOOKUP = 11, +}; + +#define QRTR_TYPE_DEL_PROC 13 + +struct qrtr_ctrl_pkt { + __le32 cmd; + + union { + struct { + __le32 service; + __le32 instance; + __le32 node; + __le32 port; + } server; + + struct { + __le32 node; + __le32 port; + } client; + + struct { + __le32 rsvd; + __le32 node; + } proc; + + }; +} __attribute__ ((packed)); + +#define QRTR_PROTO_VER_1 1 + +struct qrtr_hdr_v1 { + __le32 version; + __le32 type; + __le32 src_node_id; + __le32 src_port_id; + __le32 confirm_rx; + __le32 size; + __le32 dst_node_id; + __le32 dst_port_id; +} __attribute__ ((packed)); + +#endif /* _LINUX_QRTR_H */ diff --git a/package/wwan/driver/quectel_cm_5G/src/quectel-atc-proxy.c b/package/wwan/driver/quectel_cm_5G/src/quectel-atc-proxy.c new file mode 100644 index 000000000..dc929ade6 --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/quectel-atc-proxy.c @@ -0,0 +1,506 @@ +/****************************************************************************** + @file quectel-atc-proxy.c + @brief atc proxy. + + DESCRIPTION + Connectivity Management Tool for USB network adapter of Quectel wireless cellular modules. + + INITIALIZATION AND SEQUENCING REQUIREMENTS + None. + + --------------------------------------------------------------------------- + Copyright (c) 2016 - 2020 Quectel Wireless Solution, Co., Ltd. All Rights Reserved. + Quectel Wireless Solution Proprietary and Confidential. + --------------------------------------------------------------------------- +******************************************************************************/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "qlist.h" +#include "QMIThread.h" +#include "atchannel.h" +#include "at_tok.h" + +#define dprintf(fmt, args...) do { fprintf(stdout, "%s " fmt, get_time(), ##args); } while(0); +#define SYSCHECK(c) do{if((c)<0) {dprintf("%s %d error: '%s' (code: %d)\n", __func__, __LINE__, strerror(errno), errno); return -1;}}while(0) +#define cfmakenoblock(fd) do{fcntl(fd, F_SETFL, fcntl(fd,F_GETFL) | O_NONBLOCK);}while(0) + +#define safe_free(__x) do { if (__x) { free((void *)__x); __x = NULL;}} while(0) +#define safe_at_response_free(__x) { if (__x) { at_response_free(__x); __x = NULL;}} + +#define at_response_error(err, p_response) \ + (err \ + || p_response == NULL \ + || p_response->finalResponse == NULL \ + || p_response->success == 0) + +typedef struct { + struct qlistnode qnode; + int ClientFd; + unsigned AccessTime; +} ATC_PROXY_CONNECTION; + +static int atc_proxy_quit = 0; +static pthread_t thread_id = 0; +static int atc_dev_fd = -1; +static int atc_proxy_server_fd = -1; +static struct qlistnode atc_proxy_connection; +static int verbose_debug = 0; +static int modem_reset_flag = 0; +static uint8_t atc_buf[4096]; +static int asr_style_atc = 0; +extern int asprintf(char **s, const char *fmt, ...); +static ATC_PROXY_CONNECTION *current_client_fd = NULL; + +static void dump_atc(uint8_t *pATC, int fd,int size, const char flag) +{ + if (verbose_debug) { + printf("%c %d:\n", flag, fd); + printf("%.*s\n", size, pATC); + } +} + +static int send_atc_to_client(int clientFd, uint8_t *pATC, int size) { + struct pollfd pollfds[]= {{clientFd, POLLOUT, 0}}; + ssize_t ret = 0; + + do { + ret = poll(pollfds, sizeof(pollfds)/sizeof(pollfds[0]), 5000); + } while (ret == -1 && errno == EINTR && atc_proxy_quit == 0); + + if (pollfds[0].revents & POLLOUT) { + ret = write(clientFd, pATC, size); + } + + return ret; +} + +static void onUnsolicited (const char *s, const char *sms_pdu) +{ + struct qlistnode *con_node; + int ret; + char buf[1024]; + + if(s) { + strcpy(buf, s); + strcat(buf, "\r\n"); + } + if(sms_pdu) { + strcat(buf, sms_pdu); + strcat(buf, "\r\n"); + } + + if(current_client_fd) { + ATC_PROXY_CONNECTION *atc_con = current_client_fd; + ret = send_atc_to_client(atc_con->ClientFd, (uint8_t *)buf, strlen(buf)); + if(ret < 0) { + close(atc_con->ClientFd); + qlist_remove(&atc_con->qnode); + free(atc_con); + } + return; + } + + qlist_for_each(con_node, &atc_proxy_connection) { + ATC_PROXY_CONNECTION *atc_con = qnode_to_item(con_node, ATC_PROXY_CONNECTION, qnode); + if(atc_con && atc_con->ClientFd > 0) { + ret = send_atc_to_client(atc_con->ClientFd, (uint8_t *)buf, strlen(buf)); + if(ret < 0) { + close(atc_con->ClientFd); + con_node = con_node->prev; + qlist_remove(&atc_con->qnode); + free(atc_con); + continue; + } + } + } +} + +static void onTimeout(void) { + dprintf("%s", __func__); + //TODO +} + +static void onClose(void) { + dprintf("%s", __func__); +} + +static int create_local_server(const char *name) { + int sockfd = -1; + int reuse_addr = 1; + struct sockaddr_un sockaddr; + socklen_t alen; + + /*Create server socket*/ + SYSCHECK(sockfd = socket(AF_LOCAL, SOCK_STREAM, 0)); + + memset(&sockaddr, 0, sizeof(sockaddr)); + sockaddr.sun_family = AF_LOCAL; + sockaddr.sun_path[0] = 0; + memcpy(sockaddr.sun_path + 1, name, strlen(name) ); + + alen = strlen(name) + offsetof(struct sockaddr_un, sun_path) + 1; + SYSCHECK(setsockopt(sockfd, SOL_SOCKET, SO_REUSEADDR, &reuse_addr,sizeof(reuse_addr))); + if(bind(sockfd, (struct sockaddr *)&sockaddr, alen) < 0) { + close(sockfd); + dprintf("bind %s errno: %d (%s)\n", name, errno, strerror(errno)); + return -1; + } + + dprintf("local server: %s sockfd = %d\n", name, sockfd); + cfmakenoblock(sockfd); + listen(sockfd, 1); + + return sockfd; +} + +static void accept_atc_connection(int serverfd) { + int clientfd = -1; + unsigned char addr[128]; + socklen_t alen = sizeof(addr); + ATC_PROXY_CONNECTION *atc_con; + + clientfd = accept(serverfd, (struct sockaddr *)addr, &alen); + + atc_con = (ATC_PROXY_CONNECTION *)malloc(sizeof(ATC_PROXY_CONNECTION)); + if (atc_con) { + qlist_init(&atc_con->qnode); + atc_con->ClientFd= clientfd; + atc_con->AccessTime = 0; + dprintf("+++ ClientFd=%d\n", atc_con->ClientFd); + qlist_add_tail(&atc_proxy_connection, &atc_con->qnode); + } + + cfmakenoblock(clientfd); +} + +static void cleanup_atc_connection(int clientfd) { + struct qlistnode *con_node; + + qlist_for_each(con_node, &atc_proxy_connection) { + ATC_PROXY_CONNECTION *atc_con = qnode_to_item(con_node, ATC_PROXY_CONNECTION, qnode); + if (atc_con->ClientFd == clientfd) { + dprintf("--- ClientFd=%d\n", atc_con->ClientFd); + close(atc_con->ClientFd); + qlist_remove(&atc_con->qnode); + free(atc_con); + if (current_client_fd == atc_con) + current_client_fd = NULL; + break; + } + } +} + +static int atc_proxy_init(void) { + int err; + char *cmd; + ATResponse *p_response = NULL; + + err = at_handshake(); + if (err) { + dprintf("handshake fail, TODO ... "); + goto exit; + } + + at_send_command_singleline("AT+QCFG=\"usbnet\"", "+QCFG:", NULL); + at_send_command_multiline("AT+QNETDEVCTL=?", "+QNETDEVCTL:", NULL); + at_send_command("AT+CGREG=2", NULL); //GPRS Network Registration Status + at_send_command("AT+CEREG=2", NULL); //EPS Network Registration Status + at_send_command("AT+C5GREG=2", NULL); //5GS Network Registration Status + + at_send_command_singleline("AT+QNETDEVSTATUS=?", "+QNETDEVSTATUS:", &p_response); + if (at_response_error(err, p_response)) + asr_style_atc = 1; //EC200T/EC100Y do not support this AT, but RG801/RG500U support + + safe_at_response_free(p_response); + + err = at_send_command_singleline("AT+QCFG=\"NAT\"", "+QCFG:", &p_response); + if (!at_response_error(err, p_response)) { + int old_nat, new_nat = asr_style_atc ? 1 : 0; + + err = at_tok_scanf(p_response->p_intermediates->line, "%s%d", NULL, &old_nat); + if (err == 2 && old_nat != new_nat) { + safe_at_response_free(p_response); + asprintf(&cmd, "AT+QCFG=\"NAT\",%d", new_nat); + err = at_send_command(cmd, &p_response); + safe_free(cmd); + if (!at_response_error(err, p_response)) { + err = at_send_command("at+cfun=1,1",NULL); + } + safe_at_response_free(p_response); + } + err = 0; + } + safe_at_response_free(p_response); + +exit: + return err; +} + +static void atc_start_server(const char* servername) { + atc_proxy_server_fd = create_local_server(servername); + dprintf("atc_proxy_server_fd = %d\n", atc_proxy_server_fd); + if (atc_proxy_server_fd == -1) { + dprintf("Failed to create %s, errno: %d (%s)\n", servername, errno, strerror(errno)); + } +} + +static void atc_close_server(const char* servername) { + if (atc_proxy_server_fd != -1) { + dprintf("%s %s close server\n", __func__, servername); + close(atc_proxy_server_fd); + atc_proxy_server_fd = -1; + } +} + +static void *atc_proxy_loop(void *param) +{ + uint8_t *pATC = atc_buf; + struct qlistnode *con_node; + ATC_PROXY_CONNECTION *atc_con; + + (void)param; + dprintf("%s enter thread_id %p\n", __func__, (void *)pthread_self()); + + qlist_init(&atc_proxy_connection); + while (atc_dev_fd > 0 && atc_proxy_quit == 0) { + struct pollfd pollfds[2+64]; + int ne, ret, nevents = 0; + ssize_t nreads; + + pollfds[nevents].fd = atc_dev_fd; + pollfds[nevents].events = POLLIN; + pollfds[nevents].revents= 0; + nevents++; + + if (atc_proxy_server_fd > 0) { + pollfds[nevents].fd = atc_proxy_server_fd; + pollfds[nevents].events = POLLIN; + pollfds[nevents].revents= 0; + nevents++; + } + + qlist_for_each(con_node, &atc_proxy_connection) { + atc_con = qnode_to_item(con_node, ATC_PROXY_CONNECTION, qnode); + + pollfds[nevents].fd = atc_con->ClientFd; + pollfds[nevents].events = POLLIN; + pollfds[nevents].revents= 0; + nevents++; + + if (nevents == (sizeof(pollfds)/sizeof(pollfds[0]))) + break; + } + + do { + ret = poll(pollfds, nevents, (atc_proxy_server_fd > 0) ? -1 : 200); + } while (ret == -1 && errno == EINTR && atc_proxy_quit == 0); + + if (ret < 0) { + dprintf("%s poll=%d, errno: %d (%s)\n", __func__, ret, errno, strerror(errno)); + goto atc_proxy_loop_exit; + } + + for (ne = 0; ne < nevents; ne++) { + int fd = pollfds[ne].fd; + short revents = pollfds[ne].revents; + + if (revents & (POLLERR | POLLHUP | POLLNVAL)) { + dprintf("%s poll fd = %d, revents = %04x\n", __func__, fd, revents); + if (fd == atc_dev_fd) { + goto atc_proxy_loop_exit; + } else if(fd == atc_proxy_server_fd) { + + } else { + cleanup_atc_connection(fd); + } + continue; + } + + if (!(pollfds[ne].revents & POLLIN)) { + continue; + } + + if (fd == atc_proxy_server_fd) { + accept_atc_connection(fd); + } + else if (fd == atc_dev_fd) { + usleep(10*1000); //let atchannel.c read at response. + if (modem_reset_flag) + goto atc_proxy_loop_exit; + } + else { + memset(atc_buf, 0x0, sizeof(atc_buf)); + nreads = read(fd, pATC, sizeof(atc_buf)); + if (nreads <= 0) { + dprintf("%s read=%d errno: %d (%s)", __func__, (int)nreads, errno, strerror(errno)); + cleanup_atc_connection(fd); + break; + } + + dump_atc(pATC, fd, nreads, 'r'); + qlist_for_each(con_node, &atc_proxy_connection) { + atc_con = qnode_to_item(con_node, ATC_PROXY_CONNECTION, qnode); + if (atc_con->ClientFd == pollfds[nevents].fd) { + current_client_fd = atc_con; + break; + } + } + at_send_command ((const char *)pATC, NULL); + current_client_fd = NULL; + } + } + } + +atc_proxy_loop_exit: + at_close(); + while (!qlist_empty(&atc_proxy_connection)) { + ATC_PROXY_CONNECTION *atc_con = qnode_to_item(qlist_head(&atc_proxy_connection), ATC_PROXY_CONNECTION, qnode); + cleanup_atc_connection(atc_con->ClientFd); + } + dprintf("%s exit, thread_id %p\n", __func__, (void *)pthread_self()); + + return NULL; +} + +static void usage(void) { + dprintf(" -d A valid atc device\n" + " default /dev/ttyUSB2, but /dev/ttyUSB2 may be invalid\n" + " -i netcard name\n" + " -v Will show all details\n"); +} + +static void sig_action(int sig) { + if (atc_proxy_quit == 0) { + atc_proxy_quit = 1; + if (thread_id) + pthread_kill(thread_id, sig); + } +} + +int main(int argc, char *argv[]) { + int opt; + char atc_dev[32+1] = "/dev/ttyUSB2"; + int retry_times = 0; + char servername[64] = {0}; + + optind = 1; + signal(SIGINT, sig_action); + + while ( -1 != (opt = getopt(argc, argv, "d:i:vh"))) { + switch (opt) { + case 'd': + strcpy(atc_dev, optarg); + break; + case 'v': + verbose_debug = 1; + break; + default: + usage(); + return 0; + } + } + + if (access(atc_dev, R_OK | W_OK)) { + dprintf("Fail to access %s, errno: %d (%s). break\n", atc_dev, errno, strerror(errno)); + return -1; + } + + sprintf(servername, "quectel-atc-proxy%c", atc_dev[strlen(atc_dev) - 1]); + dprintf("Will use atc-dev='%s', proxy='%s'\n", atc_dev, servername); + + while (atc_proxy_quit == 0) { + if (access(atc_dev, R_OK | W_OK)) { + dprintf("Fail to access %s, errno: %d (%s). continue\n", atc_dev, errno, strerror(errno)); + // wait device + sleep(3); + continue; + } + + atc_dev_fd = open(atc_dev, O_RDWR | O_NONBLOCK | O_NOCTTY); + if (atc_dev_fd == -1) { + dprintf("Failed to open %s, errno: %d (%s). break\n", atc_dev, errno, strerror(errno)); + return -1; + } + cfmakenoblock(atc_dev_fd); + if (at_open(atc_dev_fd, onUnsolicited, 1)) { + close(atc_dev_fd); + atc_dev_fd = -1; + } + at_set_on_timeout(onTimeout); + at_set_on_reader_closed(onClose); + + /* no atc_proxy_loop lives, create one */ + pthread_create(&thread_id, NULL, atc_proxy_loop, NULL); + /* try to redo init if failed, init function must be successfully */ + while (atc_proxy_init() != 0) { + if (retry_times < 5) { + dprintf("fail to init proxy, try again in 2 seconds.\n"); + sleep(2); + retry_times++; + } else { + dprintf("has failed too much times, restart the modem and have a try...\n"); + break; + } + /* break loop if modem is detached */ + if (access(atc_dev, F_OK|R_OK|W_OK)) + break; + } + retry_times = 0; + atc_start_server(servername); + if (atc_proxy_server_fd == -1) + pthread_cancel(thread_id); + pthread_join(thread_id, NULL); + + /* close local server at last */ + atc_close_server(servername); + close(atc_dev_fd); + /* DO RESTART IN 20s IF MODEM RESET ITSELF */ + if (modem_reset_flag) { + unsigned int time_to_wait = 20; + while (time_to_wait) { + time_to_wait = sleep(time_to_wait); + } + modem_reset_flag = 0; + } + } + + return 0; +} \ No newline at end of file diff --git a/package/wwan/driver/quectel_cm_5G/src/quectel-mbim-proxy.c b/package/wwan/driver/quectel_cm_5G/src/quectel-mbim-proxy.c new file mode 100644 index 000000000..5cf6f41f9 --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/quectel-mbim-proxy.c @@ -0,0 +1,453 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "qendian.h" + +#define QUECTEL_MBIM_PROXY "quectel-mbim-proxy" +#define safe_close(_fd) do { if (_fd > 0) { close(_fd); _fd = -1; } } while(0) + +#define CM_MAX_CLIENT 32 +#define TID_MASK (0xFFFFFF) +#define TID_SHIFT (24) + +typedef enum { + MBIM_OPEN_MSG = 1, + MBIM_CLOSE_MSG = 2, + MBIM_OPEN_DONE = 0x80000001, + MBIM_CLOSE_DONE = 0x80000002, +} MBIM_MSG; + +typedef struct { + unsigned int MessageType; + unsigned int MessageLength; + unsigned int TransactionId; +} MBIM_MESSAGE_HEADER; + +typedef struct { + MBIM_MESSAGE_HEADER MessageHeader; + unsigned int MaxControlTransfer; +} MBIM_OPEN_MSG_T; + +typedef struct { + MBIM_MESSAGE_HEADER MessageHeader; + unsigned int Status; +} MBIM_OPEN_DONE_T; + +typedef struct { + int client_fd; + int client_idx; +} CM_CLIENT_T; + +static unsigned char cm_recv_buffer[4096]; +static CM_CLIENT_T cm_clients[CM_MAX_CLIENT]; +static int verbose = 0; + +const char * get_time(void) { + static char time_buf[128]; + struct timeval tv; + time_t time; + suseconds_t millitm; + struct tm *ti; + + gettimeofday (&tv, NULL); + + time= tv.tv_sec; + millitm = (tv.tv_usec + 500) / 1000; + + if (millitm == 1000) { + ++time; + millitm = 0; + } + + ti = localtime(&time); + sprintf(time_buf, "[%02d-%02d_%02d:%02d:%02d:%03d]", ti->tm_mon+1, ti->tm_mday, ti->tm_hour, ti->tm_min, ti->tm_sec, (int)millitm); + return time_buf; +} + +#define mbim_debug(fmt, args...) do { fprintf(stdout, "%s " fmt, get_time(), ##args); } while(0); + +static int non_block_write(int fd, void *data, int len) +{ + int ret; + struct pollfd pollfd = {fd, POLLOUT, 0}; + ret = poll(&pollfd, 1, 3000); + + if (ret <= 0) { + mbim_debug("%s poll ret=%d, errno: %d(%s)\n", __func__, ret, errno, strerror(errno)); + } + + ret = write (fd, data, len); + if (ret != len) + mbim_debug("%s write ret=%d, errno: %d(%s)\n", __func__, ret, errno, strerror(errno)); + + return len; +} + +static int mbim_send_open_msg(int mbim_dev_fd, uint32_t MaxControlTransfer) { + MBIM_OPEN_MSG_T open_msg; + MBIM_OPEN_MSG_T *pRequest = &open_msg; + + pRequest->MessageHeader.MessageType = htole32(MBIM_OPEN_MSG); + pRequest->MessageHeader.MessageLength = htole32(sizeof(MBIM_OPEN_MSG_T)); + pRequest->MessageHeader.TransactionId = htole32(1); + pRequest->MaxControlTransfer = htole32(MaxControlTransfer); + + mbim_debug("%s()\n", __func__); + return non_block_write(mbim_dev_fd, pRequest, sizeof(MBIM_OPEN_MSG_T)); +} + +/* + * parameter: proxy name + * return: local proxy server fd or -1 +*/ +static int proxy_make_server(const char *proxy_name) +{ + int len, flag; + struct sockaddr_un sockaddr; + int mbim_server_fd; + + mbim_server_fd = socket(AF_LOCAL, SOCK_STREAM, 0); + if (mbim_server_fd < 0) { + mbim_debug("socket failed: %s\n", strerror(errno)); + return -1; + } + if (fcntl(mbim_server_fd, F_SETFL, fcntl(mbim_server_fd, F_GETFL) | O_NONBLOCK) < 0) + mbim_debug("fcntl set server(%d) NONBLOCK attribute failed: %s\n", mbim_server_fd, strerror(errno)); + + memset(&sockaddr, 0, sizeof(sockaddr)); + sockaddr.sun_family = AF_LOCAL; + sockaddr.sun_path[0] = 0; + snprintf(sockaddr.sun_path, UNIX_PATH_MAX, "0%s", proxy_name); + sockaddr.sun_path[0] = '\0'; // string starts with leading '\0' + flag = 1; + if (setsockopt(mbim_server_fd, SOL_SOCKET, SO_REUSEADDR, &flag, sizeof(flag)) < 0) { + safe_close(mbim_server_fd); + mbim_debug("setsockopt failed\n"); + } + + len = strlen(proxy_name) + offsetof(struct sockaddr_un, sun_path) + 1; + if (bind(mbim_server_fd, (struct sockaddr*)&sockaddr, len) < 0) { + safe_close(mbim_server_fd); + mbim_debug("bind failed: %s\n", strerror(errno)); + return -1; + } + + listen(mbim_server_fd, 4); + return mbim_server_fd; +} + +static int handle_client_connect(int server_fd) +{ + int i, client_fd; + struct sockaddr_in cli_addr; + socklen_t len = sizeof(cli_addr); + + client_fd = accept(server_fd, (struct sockaddr *)&cli_addr, &len); + if (client_fd < 0) { + mbim_debug("proxy accept failed: %s\n", strerror(errno)); + return -1; + } + + if (fcntl(client_fd, F_SETFL, fcntl(client_fd, F_GETFL) | O_NONBLOCK) < 0) + mbim_debug("fcntl set client(%d) NONBLOCK attribute failed: %s\n", client_fd, strerror(errno)); + + for (i = 0; i < CM_MAX_CLIENT; i++) { + if (cm_clients[i].client_fd <= 0) { + cm_clients[i].client_fd = client_fd; + cm_clients[i].client_idx= i+1; + mbim_debug("%s client_fd=%d, client_idx=%d\n", __func__, cm_clients[i].client_fd, cm_clients[i].client_idx); + return 0; + } + } + + close(client_fd); + return -1; +} + +static void handle_client_disconnect(int client_fd) +{ + int i; + + for (i = 0; i < CM_MAX_CLIENT; i++) { + if (cm_clients[i].client_fd == client_fd) { + mbim_debug("%s client_fd=%d, client_idx=%d\n", __func__, cm_clients[i].client_fd, cm_clients[i].client_idx); + safe_close(cm_clients[i].client_fd); + return; + } + } +} + +static int handle_client_request(int mbim_dev_fd, int client_fd, void *pdata, int len) +{ + int i; + int client_idx = -1; + int ret; + MBIM_MESSAGE_HEADER *pRequest = (MBIM_MESSAGE_HEADER *)pdata; + unsigned int TransactionId = le32toh(pRequest->TransactionId); + + for (i = 0; i < CM_MAX_CLIENT; i++) { + if (cm_clients[i].client_fd == client_fd) { + client_idx = cm_clients[i].client_idx; + break; + } + } + + if (client_idx == -1) { + goto error; + } + + if (le32toh(pRequest->MessageType) == MBIM_OPEN_MSG + || le32toh(pRequest->MessageType) == MBIM_CLOSE_MSG) { + MBIM_OPEN_DONE_T OpenDone; + OpenDone.MessageHeader.MessageType = htole32(le32toh(pRequest->MessageType) | 0x80000000); + OpenDone.MessageHeader.MessageLength = htole32(sizeof(OpenDone)); + OpenDone.MessageHeader.TransactionId = htole32(TransactionId); + OpenDone.Status = htole32(0); + non_block_write (client_fd, &OpenDone, sizeof(OpenDone)); + return 0; + } + + /* transfer TransicationID to proxy transicationID and record in sender list */ + pRequest->TransactionId = htole32(TransactionId | (client_idx << TID_SHIFT)); + if (verbose) mbim_debug("REQ client_fd=%d, client_idx=%d, tid=%u\n", + cm_clients[client_idx].client_fd, cm_clients[client_idx].client_idx, TransactionId); + ret = non_block_write (mbim_dev_fd, pRequest, len); + if (ret == len) + return 0; + +error: + return -1; +} + +/* + * Will read message from device and transfer it to clients/client + * Notice: + * unsocial message will be send to all clients + */ +static int handle_device_response(void *pdata, int len) +{ + int i; + MBIM_MESSAGE_HEADER *pResponse = (MBIM_MESSAGE_HEADER *)pdata; + unsigned int TransactionId = le32toh(pResponse->TransactionId); + + /* unsocial/function error message */ + if (TransactionId == 0) { + for (i = 0; i < CM_MAX_CLIENT; i++) { + if (cm_clients[i].client_fd > 0) { + non_block_write(cm_clients[i].client_fd, pResponse, len); + } + } + } + else { + /* try to find the sender */ + int client_idx = (TransactionId >> TID_SHIFT); + + for (i = 0; i < CM_MAX_CLIENT; i++) { + if (cm_clients[i].client_idx == client_idx && cm_clients[i].client_fd > 0) { + TransactionId &= TID_MASK; + pResponse->TransactionId = htole32(TransactionId); + if (verbose) mbim_debug("RSP client_fd=%d, client_idx=%d, tid=%u\n", + cm_clients[i].client_fd, cm_clients[i].client_idx, TransactionId); + non_block_write(cm_clients[i].client_fd, pResponse, len); + break; + } + } + + if (i == CM_MAX_CLIENT) { + mbim_debug("%s nobody care tid=%u\n", __func__, TransactionId); + } + } + + return 0; +} + +static int proxy_loop(int mbim_dev_fd) +{ + int i; + int mbim_server_fd = -1; + + while (mbim_dev_fd > 0) { + struct pollfd pollfds[2+CM_MAX_CLIENT]; + int ne, ret, nevents = 0; + + pollfds[nevents].fd = mbim_dev_fd; + pollfds[nevents].events = POLLIN; + pollfds[nevents].revents= 0; + nevents++; + + if (mbim_server_fd > 0) { + pollfds[nevents].fd = mbim_server_fd; + pollfds[nevents].events = POLLIN; + pollfds[nevents].revents= 0; + nevents++; + + for (i = 0; i < CM_MAX_CLIENT; i++) { + if (cm_clients[i].client_fd > 0) { + pollfds[nevents].fd = cm_clients[i].client_fd; + pollfds[nevents].events = POLLIN; + pollfds[nevents].revents= 0; + nevents++; + } + } + } + + ret = poll(pollfds, nevents, (mbim_server_fd > 0) ? -1 : (10*1000)); + if (ret <= 0) { + goto error; + } + + for (ne = 0; ne < nevents; ne++) { + int fd = pollfds[ne].fd; + short revents = pollfds[ne].revents; + + if (revents & (POLLERR | POLLHUP | POLLNVAL)) { + mbim_debug("%s poll fd = %d, revents = %04x\n", __func__, fd, revents); + if (fd == mbim_dev_fd) { + goto error; + } else if(fd == mbim_server_fd) { + + } else { + handle_client_disconnect(fd); + } + continue; + } + + if (!(pollfds[ne].revents & POLLIN)) { + continue; + } + + if (fd == mbim_server_fd) { + handle_client_connect(fd); + } + else { + int len = read(fd, cm_recv_buffer, sizeof(cm_recv_buffer)); + + if (len <= 0) { + mbim_debug("%s read fd=%d, len=%d, errno: %d(%s)\n", __func__, fd, len, errno, strerror(errno)); + if (fd == mbim_dev_fd) + goto error; + else + handle_client_disconnect(fd); + + return len; + } + + if (fd == mbim_dev_fd) { + if (mbim_server_fd == -1) { + MBIM_OPEN_DONE_T *pOpenDone = (MBIM_OPEN_DONE_T *)cm_recv_buffer; + + if (le32toh(pOpenDone->MessageHeader.MessageType) == MBIM_OPEN_DONE) { + mbim_debug("receive MBIM_OPEN_DONE, status=%d\n", htole32(pOpenDone->Status)); + if (htole32(pOpenDone->Status)) + goto error; + mbim_server_fd = proxy_make_server(QUECTEL_MBIM_PROXY); + mbim_debug("mbim_server_fd=%d\n", mbim_server_fd); + } + } + else { + handle_device_response(cm_recv_buffer, len); + } + } + else { + handle_client_request(mbim_dev_fd, fd, cm_recv_buffer, len); + } + } + } + } + +error: + safe_close(mbim_server_fd); + for (i = 0; i < CM_MAX_CLIENT; i++) { + safe_close(cm_clients[i].client_fd); + } + + mbim_debug("%s exit\n", __func__); + return 0; +} + +/* + * How to use this proxy? + * 1. modprobe -a 8021q + * 2. Create network interface for channels: + * ip link add link wwan0 name wwan0.1 type vlan id 1 + * ip link add link wwan0 name wwan0.2 type vlan id 2 + * 3. Start './mbim-proxy' with -d 'device' + * 4. Start Clients: ./quectel-CM -n id1 + * 5. Start Clients: ./quectel-CM -n id2 + * ... + * Notice: + * mbim-proxy can work in backgroud as a daemon + * '-n' sessionID + * The modem may not support multi-PDN mode or how many PDN it supports is undefined. It depends!!! + * Besides, some modem also may not support some sessionID. For instance EC20 doesn't support SessionId 1... + */ +int main(int argc, char **argv) +{ + int optidx = 0; + int opt; + char *optstr = "d:vh"; + const char *device = "/dev/cdc-wdm0"; + + struct option options[] = { + {"verbose", no_argument, NULL, 'v'}, + {"device", required_argument, NULL, 'd'}, + {0, 0, 0, 0}, + }; + while ((opt = getopt_long(argc, argv, optstr, options, &optidx)) != -1) { + switch (opt) { + case 'v': + verbose = 1; + break; + case 'd': + device = optarg; + break; + case 'h': + mbim_debug("-h Show this message\n"); + mbim_debug("-v Verbose\n"); + mbim_debug("-d [device] MBIM device\n"); + return 0; + default: + mbim_debug("illegal argument\n"); + return -1; + } + } + + if (!device) { + mbim_debug("Missing parameter: device\n"); + return -1; + } + + while (1) { + int mbim_dev_fd = open(device, O_RDWR | O_NONBLOCK | O_NOCTTY); + if (mbim_dev_fd < 0) { + mbim_debug("cannot open mbim_device %s: %s\n", device, strerror(errno)); + sleep(2); + continue; + } + mbim_debug ("mbim_dev_fd=%d\n", mbim_dev_fd); + + memset(cm_clients, 0, sizeof(cm_clients)); + mbim_send_open_msg(mbim_dev_fd, sizeof(cm_recv_buffer)); + proxy_loop(mbim_dev_fd); + safe_close(mbim_dev_fd); + } + + return -1; +} diff --git a/package/wwan/driver/quectel_cm_5G/src/quectel-qmi-proxy.c b/package/wwan/driver/quectel_cm_5G/src/quectel-qmi-proxy.c new file mode 100644 index 000000000..e1d28bcaf --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/quectel-qmi-proxy.c @@ -0,0 +1,694 @@ +/****************************************************************************** + @file quectel-qmi-proxy.c + @brief The qmi proxy. + + DESCRIPTION + Connectivity Management Tool for USB network adapter of Quectel wireless cellular modules. + + INITIALIZATION AND SEQUENCING REQUIREMENTS + None. + + --------------------------------------------------------------------------- + Copyright (c) 2016 - 2020 Quectel Wireless Solution, Co., Ltd. All Rights Reserved. + Quectel Wireless Solution Proprietary and Confidential. + --------------------------------------------------------------------------- +******************************************************************************/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "qendian.h" +#include "qlist.h" +#include "MPQMI.h" +#include "MPQCTL.h" +#include "MPQMUX.h" + +#ifndef MIN +#define MIN(a, b) ((a) < (b)? (a): (b)) +#endif + +const char * get_time(void) { + static char time_buf[128]; + struct timeval tv; + time_t time; + suseconds_t millitm; + struct tm *ti; + + gettimeofday (&tv, NULL); + + time= tv.tv_sec; + millitm = (tv.tv_usec + 500) / 1000; + + if (millitm == 1000) { + ++time; + millitm = 0; + } + + ti = localtime(&time); + sprintf(time_buf, "[%02d-%02d_%02d:%02d:%02d:%03d]", ti->tm_mon+1, ti->tm_mday, ti->tm_hour, ti->tm_min, ti->tm_sec, (int)millitm); + return time_buf; +} + +#define dprintf(fmt, args...) do { fprintf(stdout, "%s " fmt, get_time(), ##args); } while(0); +#define SYSCHECK(c) do{if((c)<0) {dprintf("%s %d error: '%s' (code: %d)\n", __func__, __LINE__, strerror(errno), errno); return -1;}}while(0) +#define cfmakenoblock(fd) do{fcntl(fd, F_SETFL, fcntl(fd,F_GETFL) | O_NONBLOCK);}while(0) + +typedef struct { + struct qlistnode qnode; + int ClientFd; + QCQMIMSG qmi[0]; +} QMI_PROXY_MSG; + +typedef struct { + struct qlistnode qnode; + uint8_t QMIType; + uint8_t ClientId; + unsigned AccessTime; +} QMI_PROXY_CLINET; + +typedef struct { + struct qlistnode qnode; + struct qlistnode client_qnode; + int ClientFd; + unsigned AccessTime; +} QMI_PROXY_CONNECTION; + +#ifdef QUECTEL_QMI_MERGE +#define MERGE_PACKET_IDENTITY 0x2c7c +#define MERGE_PACKET_VERSION 0x0001 +#define MERGE_PACKET_MAX_PAYLOAD_SIZE 56 +typedef struct __QMI_MSG_HEADER { + uint16_t idenity; + uint16_t version; + uint16_t cur_len; + uint16_t total_len; +} QMI_MSG_HEADER; + +typedef struct __QMI_MSG_PACKET { + QMI_MSG_HEADER header; + uint16_t len; + char buf[4096]; +} QMI_MSG_PACKET; +#endif + +static int qmi_proxy_quit = 0; +static pthread_t thread_id = 0; +static int cdc_wdm_fd = -1; +static int qmi_proxy_server_fd = -1; +static struct qlistnode qmi_proxy_connection; +static struct qlistnode qmi_proxy_ctl_msg; +static int verbose_debug = 0; +static int modem_reset_flag = 0; +static int qmi_sync_done = 0; +static uint8_t qmi_buf[4096]; + +#ifdef QUECTEL_QMI_MERGE +static int merge_qmi_rsp_packet(void *buf, ssize_t *src_size) { + static QMI_MSG_PACKET s_QMIPacket; + QMI_MSG_HEADER *header = NULL; + ssize_t size = *src_size; + + if((uint16_t)size < sizeof(QMI_MSG_HEADER)) + return -1; + + header = (QMI_MSG_HEADER *)buf; + if(le16toh(header->idenity) != MERGE_PACKET_IDENTITY || le16toh(header->version) != MERGE_PACKET_VERSION || le16toh(header->cur_len) > le16toh(header->total_len)) + return -1; + + if(le16toh(header->cur_len) == le16toh(header->total_len)) { + *src_size = le16toh(header->total_len); + memcpy(buf, buf + sizeof(QMI_MSG_HEADER), *src_size); + s_QMIPacket.len = 0; + return 0; + } + + memcpy(s_QMIPacket.buf + s_QMIPacket.len, buf + sizeof(QMI_MSG_HEADER), le16toh(header->cur_len)); + s_QMIPacket.len += le16toh(header->cur_len); + + if (le16toh(header->cur_len) < MERGE_PACKET_MAX_PAYLOAD_SIZE || s_QMIPacket.len >= le16toh(header->total_len)) { + memcpy(buf, s_QMIPacket.buf, s_QMIPacket.len); + *src_size = s_QMIPacket.len; + s_QMIPacket.len = 0; + return 0; + } + + return -1; +} +#endif + +static int create_local_server(const char *name) { + int sockfd = -1; + int reuse_addr = 1; + struct sockaddr_un sockaddr; + socklen_t alen; + + /*Create server socket*/ + SYSCHECK(sockfd = socket(AF_LOCAL, SOCK_STREAM, 0)); + + memset(&sockaddr, 0, sizeof(sockaddr)); + sockaddr.sun_family = AF_LOCAL; + sockaddr.sun_path[0] = 0; + memcpy(sockaddr.sun_path + 1, name, strlen(name) ); + + alen = strlen(name) + offsetof(struct sockaddr_un, sun_path) + 1; + SYSCHECK(setsockopt(sockfd, SOL_SOCKET, SO_REUSEADDR, &reuse_addr,sizeof(reuse_addr))); + if(bind(sockfd, (struct sockaddr *)&sockaddr, alen) < 0) { + close(sockfd); + dprintf("bind %s errno: %d (%s)\n", name, errno, strerror(errno)); + return -1; + } + + dprintf("local server: %s sockfd = %d\n", name, sockfd); + cfmakenoblock(sockfd); + listen(sockfd, 1); + + return sockfd; +} + +static void accept_qmi_connection(int serverfd) { + int clientfd = -1; + unsigned char addr[128]; + socklen_t alen = sizeof(addr); + QMI_PROXY_CONNECTION *qmi_con; + + clientfd = accept(serverfd, (struct sockaddr *)addr, &alen); + + qmi_con = (QMI_PROXY_CONNECTION *)malloc(sizeof(QMI_PROXY_CONNECTION)); + if (qmi_con) { + qlist_init(&qmi_con->qnode); + qlist_init(&qmi_con->client_qnode); + qmi_con->ClientFd= clientfd; + qmi_con->AccessTime = 0; + dprintf("+++ ClientFd=%d\n", qmi_con->ClientFd); + qlist_add_tail(&qmi_proxy_connection, &qmi_con->qnode); + } + + cfmakenoblock(clientfd); +} + +static void cleanup_qmi_connection(int clientfd) { + struct qlistnode *con_node, *qmi_node; + + qlist_for_each(con_node, &qmi_proxy_connection) { + QMI_PROXY_CONNECTION *qmi_con = qnode_to_item(con_node, QMI_PROXY_CONNECTION, qnode); + + if (qmi_con->ClientFd == clientfd) { + while (!qlist_empty(&qmi_con->client_qnode)) { + QMI_PROXY_CLINET *qmi_client = qnode_to_item(qlist_head(&qmi_con->client_qnode), QMI_PROXY_CLINET, qnode); + + dprintf("xxx ClientFd=%d QMIType=%d ClientId=%d\n", qmi_con->ClientFd, qmi_client->QMIType, qmi_client->ClientId); + + qlist_remove(&qmi_client->qnode); + free(qmi_client); + } + + qlist_for_each(qmi_node, &qmi_proxy_ctl_msg) { + QMI_PROXY_MSG *qmi_msg = qnode_to_item(qmi_node, QMI_PROXY_MSG, qnode); + + if (qmi_msg->ClientFd == qmi_con->ClientFd) { + qlist_remove(&qmi_msg->qnode); + free(qmi_msg); + break; + } + } + + dprintf("--- ClientFd=%d\n", qmi_con->ClientFd); + close(qmi_con->ClientFd); + qlist_remove(&qmi_con->qnode); + free(qmi_con); + break; + } + } +} + +static void get_client_id(QMI_PROXY_CONNECTION *qmi_con, PQMICTL_GET_CLIENT_ID_RESP_MSG pClient) { + if (pClient->QMIResult == 0 && pClient->QMIError == 0) { + QMI_PROXY_CLINET *qmi_client = (QMI_PROXY_CLINET *)malloc(sizeof(QMI_PROXY_CLINET)); + + qlist_init(&qmi_client->qnode); + qmi_client->QMIType = pClient->QMIType; + qmi_client->ClientId = pClient->ClientId; + qmi_client->AccessTime = 0; + + dprintf("+++ ClientFd=%d QMIType=%d ClientId=%d\n", qmi_con->ClientFd, qmi_client->QMIType, qmi_client->ClientId); + qlist_add_tail(&qmi_con->client_qnode, &qmi_client->qnode); + } +} + +static void release_client_id(QMI_PROXY_CONNECTION *qmi_con, PQMICTL_RELEASE_CLIENT_ID_RESP_MSG pClient) { + struct qlistnode *client_node; + + if (pClient->QMIResult == 0 && pClient->QMIError == 0) { + qlist_for_each (client_node, &qmi_con->client_qnode) { + QMI_PROXY_CLINET *qmi_client = qnode_to_item(client_node, QMI_PROXY_CLINET, qnode); + + if (pClient->QMIType == qmi_client->QMIType && pClient->ClientId == qmi_client->ClientId) { + dprintf("--- ClientFd=%d QMIType=%d ClientId=%d\n", qmi_con->ClientFd, qmi_client->QMIType, qmi_client->ClientId); + qlist_remove(&qmi_client->qnode); + free(qmi_client); + break; + } + } + } +} + +static void dump_qmi(PQCQMIMSG pQMI, int fd, const char flag) +{ + if (verbose_debug) + { + unsigned i; + unsigned size = le16toh(pQMI->QMIHdr.Length) + 1; + printf("%c %d %u: ", flag, fd, size); + if (size > 16) + size = 16; + for (i = 0; i < size; i++) + printf("%02x ", ((uint8_t *)pQMI)[i]); + printf("\n"); + } +} + +static int send_qmi_to_cdc_wdm(PQCQMIMSG pQMI) { + struct pollfd pollfds[]= {{cdc_wdm_fd, POLLOUT, 0}}; + ssize_t ret = 0; + + do { + ret = poll(pollfds, sizeof(pollfds)/sizeof(pollfds[0]), 5000); + } while (ret == -1 && errno == EINTR && qmi_proxy_quit == 0); + + if (pollfds[0].revents & POLLOUT) { + ssize_t size = le16toh(pQMI->QMIHdr.Length) + 1; + ret = write(cdc_wdm_fd, pQMI, size); + dump_qmi(pQMI, cdc_wdm_fd, 'w'); + } + + return ret; +} + +static int send_qmi_to_client(PQCQMIMSG pQMI, int clientFd) { + struct pollfd pollfds[]= {{clientFd, POLLOUT, 0}}; + ssize_t ret = 0; + + do { + ret = poll(pollfds, sizeof(pollfds)/sizeof(pollfds[0]), 5000); + } while (ret == -1 && errno == EINTR && qmi_proxy_quit == 0); + + if (pollfds[0].revents & POLLOUT) { + ssize_t size = le16toh(pQMI->QMIHdr.Length) + 1; + ret = write(clientFd, pQMI, size); + dump_qmi(pQMI, clientFd, 'w'); + } + + return ret; +} + +static void recv_qmi_from_dev(PQCQMIMSG pQMI) { + struct qlistnode *con_node, *client_node; + + if (qmi_proxy_server_fd == -1) { + qmi_sync_done = 1; + } + else if (pQMI->QMIHdr.QMIType == QMUX_TYPE_CTL) { + if (pQMI->CTLMsg.QMICTLMsgHdr.CtlFlags == QMICTL_CTL_FLAG_RSP) { + if (!qlist_empty(&qmi_proxy_ctl_msg)) { + QMI_PROXY_MSG *qmi_msg = qnode_to_item(qlist_head(&qmi_proxy_ctl_msg), QMI_PROXY_MSG, qnode); + + qlist_for_each(con_node, &qmi_proxy_connection) { + QMI_PROXY_CONNECTION *qmi_con = qnode_to_item(con_node, QMI_PROXY_CONNECTION, qnode); + + if (qmi_con->ClientFd == qmi_msg->ClientFd) { + send_qmi_to_client(pQMI, qmi_msg->ClientFd); + + if (le16toh(pQMI->CTLMsg.QMICTLMsgHdrRsp.QMICTLType) == QMICTL_GET_CLIENT_ID_RESP) + get_client_id(qmi_con, &pQMI->CTLMsg.GetClientIdRsp); + else if ((le16toh(pQMI->CTLMsg.QMICTLMsgHdrRsp.QMICTLType) == QMICTL_RELEASE_CLIENT_ID_RESP) || + (le16toh(pQMI->CTLMsg.QMICTLMsgHdrRsp.QMICTLType) == QMICTL_REVOKE_CLIENT_ID_IND)) { + release_client_id(qmi_con, &pQMI->CTLMsg.ReleaseClientIdRsp); + if (le16toh(pQMI->CTLMsg.QMICTLMsgHdrRsp.QMICTLType) == QMICTL_REVOKE_CLIENT_ID_IND) + modem_reset_flag = 1; + } + else { + } + } + } + + qlist_remove(&qmi_msg->qnode); + free(qmi_msg); + } + } + + if (!qlist_empty(&qmi_proxy_ctl_msg)) { + QMI_PROXY_MSG *qmi_msg = qnode_to_item(qlist_head(&qmi_proxy_ctl_msg), QMI_PROXY_MSG, qnode); + + qlist_for_each(con_node, &qmi_proxy_connection) { + QMI_PROXY_CONNECTION *qmi_con = qnode_to_item(con_node, QMI_PROXY_CONNECTION, qnode); + + if (qmi_con->ClientFd == qmi_msg->ClientFd) { + send_qmi_to_cdc_wdm(qmi_msg->qmi); + } + } + } + } + else { + qlist_for_each(con_node, &qmi_proxy_connection) { + QMI_PROXY_CONNECTION *qmi_con = qnode_to_item(con_node, QMI_PROXY_CONNECTION, qnode); + + qlist_for_each(client_node, &qmi_con->client_qnode) { + QMI_PROXY_CLINET *qmi_client = qnode_to_item(client_node, QMI_PROXY_CLINET, qnode); + if (pQMI->QMIHdr.QMIType == qmi_client->QMIType) { + if (pQMI->QMIHdr.ClientId == 0 || pQMI->QMIHdr.ClientId == qmi_client->ClientId) { + send_qmi_to_client(pQMI, qmi_con->ClientFd); + } + } + } + } + } +} + +static int recv_qmi_from_client(PQCQMIMSG pQMI, unsigned size, int clientfd) { + if (qmi_proxy_server_fd <= 0) { + send_qmi_to_cdc_wdm(pQMI); + } + else if (pQMI->QMIHdr.QMIType == QMUX_TYPE_CTL) { + QMI_PROXY_MSG *qmi_msg; + + if (pQMI->CTLMsg.QMICTLMsgHdr.QMICTLType == QMICTL_SYNC_REQ) { + dprintf("do not allow client send QMICTL_SYNC_REQ\n"); + return 0; + } + + if (qlist_empty(&qmi_proxy_ctl_msg)) + send_qmi_to_cdc_wdm(pQMI); + + qmi_msg = malloc(sizeof(QMI_PROXY_MSG) + size); + qlist_init(&qmi_msg->qnode); + qmi_msg->ClientFd = clientfd; + memcpy(qmi_msg->qmi, pQMI, size); + qlist_add_tail(&qmi_proxy_ctl_msg, &qmi_msg->qnode); + } + else { + send_qmi_to_cdc_wdm(pQMI); + } + + return 0; +} + +static int qmi_proxy_init(void) { + unsigned i; + QCQMIMSG _QMI; + PQCQMIMSG pQMI = &_QMI; + + dprintf("%s enter\n", __func__); + + pQMI->QMIHdr.IFType = USB_CTL_MSG_TYPE_QMI; + pQMI->QMIHdr.CtlFlags = 0x00; + pQMI->QMIHdr.QMIType = QMUX_TYPE_CTL; + pQMI->QMIHdr.ClientId= 0x00; + + pQMI->CTLMsg.QMICTLMsgHdr.CtlFlags = QMICTL_FLAG_REQUEST; + + qmi_sync_done = 0; + for (i = 0; i < 10; i++) { + pQMI->CTLMsg.SyncReq.TransactionId = i+1; + pQMI->CTLMsg.SyncReq.QMICTLType = QMICTL_SYNC_REQ; + pQMI->CTLMsg.SyncReq.Length = 0; + + pQMI->QMIHdr.Length = + htole16(le16toh(pQMI->CTLMsg.QMICTLMsgHdr.Length) + sizeof(QCQMI_HDR) + sizeof(QCQMICTL_MSG_HDR) - 1); + + if (send_qmi_to_cdc_wdm(pQMI) <= 0) + break; + + sleep(1); + if (qmi_sync_done) + break; + } + + dprintf("%s %s\n", __func__, qmi_sync_done ? "succful" : "fail"); + return qmi_sync_done ? 0 : -1; +} + +static void qmi_start_server(const char* servername) { + qmi_proxy_server_fd = create_local_server(servername); + dprintf("qmi_proxy_server_fd = %d\n", qmi_proxy_server_fd); + if (qmi_proxy_server_fd == -1) { + dprintf("Failed to create %s, errno: %d (%s)\n", servername, errno, strerror(errno)); + } +} + +static void qmi_close_server(const char* servername) { + if (qmi_proxy_server_fd != -1) { + dprintf("%s %s close server\n", __func__, servername); + close(qmi_proxy_server_fd); + qmi_proxy_server_fd = -1; + } +} + +static void *qmi_proxy_loop(void *param) +{ + PQCQMIMSG pQMI = (PQCQMIMSG)qmi_buf; + struct qlistnode *con_node; + QMI_PROXY_CONNECTION *qmi_con; + + (void)param; + dprintf("%s enter thread_id %p\n", __func__, (void *)pthread_self()); + + qlist_init(&qmi_proxy_connection); + qlist_init(&qmi_proxy_ctl_msg); + + while (cdc_wdm_fd > 0 && qmi_proxy_quit == 0) { + struct pollfd pollfds[2+64]; + int ne, ret, nevents = 0; + ssize_t nreads; + + pollfds[nevents].fd = cdc_wdm_fd; + pollfds[nevents].events = POLLIN; + pollfds[nevents].revents= 0; + nevents++; + + if (qmi_proxy_server_fd > 0) { + pollfds[nevents].fd = qmi_proxy_server_fd; + pollfds[nevents].events = POLLIN; + pollfds[nevents].revents= 0; + nevents++; + } + + qlist_for_each(con_node, &qmi_proxy_connection) { + qmi_con = qnode_to_item(con_node, QMI_PROXY_CONNECTION, qnode); + + pollfds[nevents].fd = qmi_con->ClientFd; + pollfds[nevents].events = POLLIN; + pollfds[nevents].revents= 0; + nevents++; + + if (nevents == (sizeof(pollfds)/sizeof(pollfds[0]))) + break; + } + +#if 0 + dprintf("poll "); + for (ne = 0; ne < nevents; ne++) { + dprintf("%d ", pollfds[ne].fd); + } + dprintf("\n"); +#endif + + do { + //ret = poll(pollfds, nevents, -1); + ret = poll(pollfds, nevents, (qmi_proxy_server_fd > 0) ? -1 : 200); + } while (ret == -1 && errno == EINTR && qmi_proxy_quit == 0); + + if (ret < 0) { + dprintf("%s poll=%d, errno: %d (%s)\n", __func__, ret, errno, strerror(errno)); + goto qmi_proxy_loop_exit; + } + + for (ne = 0; ne < nevents; ne++) { + int fd = pollfds[ne].fd; + short revents = pollfds[ne].revents; + + if (revents & (POLLERR | POLLHUP | POLLNVAL)) { + dprintf("%s poll fd = %d, revents = %04x\n", __func__, fd, revents); + if (fd == cdc_wdm_fd) { + goto qmi_proxy_loop_exit; + } else if(fd == qmi_proxy_server_fd) { + + } else { + cleanup_qmi_connection(fd); + } + + continue; + } + + if (!(pollfds[ne].revents & POLLIN)) { + continue; + } + + if (fd == qmi_proxy_server_fd) { + accept_qmi_connection(fd); + } + else if (fd == cdc_wdm_fd) { + nreads = read(fd, pQMI, sizeof(qmi_buf)); + if (nreads <= 0) { + dprintf("%s read=%d errno: %d (%s)\n", __func__, (int)nreads, errno, strerror(errno)); + goto qmi_proxy_loop_exit; + } +#ifdef QUECTEL_QMI_MERGE + if(merge_qmi_rsp_packet(pQMI, &nreads)) + continue; +#endif + if (nreads != (le16toh(pQMI->QMIHdr.Length) + 1)) { + dprintf("%s nreads=%d, pQCQMI->QMIHdr.Length = %d\n", __func__, (int)nreads, le16toh(pQMI->QMIHdr.Length)); + continue; + } + + dump_qmi(pQMI, fd, 'r'); + recv_qmi_from_dev(pQMI); + if (modem_reset_flag) + goto qmi_proxy_loop_exit; + } + else { + nreads = read(fd, pQMI, sizeof(qmi_buf)); + + if (nreads <= 0) { + dprintf("%s read=%d errno: %d (%s)", __func__, (int)nreads, errno, strerror(errno)); + cleanup_qmi_connection(fd); + break; + } + + if (nreads != (le16toh(pQMI->QMIHdr.Length) + 1)) { + dprintf("%s nreads=%d, pQCQMI->QMIHdr.Length = %d\n", __func__, (int)nreads, le16toh(pQMI->QMIHdr.Length)); + continue; + } + + dump_qmi(pQMI, fd, 'r'); + recv_qmi_from_client(pQMI, nreads, fd); + } + } + } + +qmi_proxy_loop_exit: + while (!qlist_empty(&qmi_proxy_connection)) { + QMI_PROXY_CONNECTION *qmi_con = qnode_to_item(qlist_head(&qmi_proxy_connection), QMI_PROXY_CONNECTION, qnode); + + cleanup_qmi_connection(qmi_con->ClientFd); + } + + dprintf("%s exit, thread_id %p\n", __func__, (void *)pthread_self()); + + return NULL; +} + +static void usage(void) { + dprintf(" -d A valid qmi device\n" + " default /dev/cdc-wdm0, but cdc-wdm0 may be invalid\n" + " -i netcard name\n" + " -v Will show all details\n"); +} + +static void sig_action(int sig) { + if (qmi_proxy_quit == 0) { + qmi_proxy_quit = 1; + if (thread_id) + pthread_kill(thread_id, sig); + } +} + +int main(int argc, char *argv[]) { + int opt; + char cdc_wdm[32+1] = "/dev/cdc-wdm0"; + int retry_times = 0; + char servername[64] = {0}; + + optind = 1; + + signal(SIGINT, sig_action); + + while ( -1 != (opt = getopt(argc, argv, "d:i:vh"))) { + switch (opt) { + case 'd': + strcpy(cdc_wdm, optarg); + break; + case 'v': + verbose_debug = 1; + break; + default: + usage(); + return 0; + } + } + + if (access(cdc_wdm, R_OK | W_OK)) { + dprintf("Fail to access %s, errno: %d (%s). break\n", cdc_wdm, errno, strerror(errno)); + return -1; + } + + sprintf(servername, "quectel-qmi-proxy%c", cdc_wdm[strlen(cdc_wdm)-1]); + dprintf("Will use cdc-wdm='%s', proxy='%s'\n", cdc_wdm, servername); + + while (qmi_proxy_quit == 0) { + if (access(cdc_wdm, R_OK | W_OK)) { + dprintf("Fail to access %s, errno: %d (%s). continue\n", cdc_wdm, errno, strerror(errno)); + // wait device + sleep(3); + continue; + } + + cdc_wdm_fd = open(cdc_wdm, O_RDWR | O_NONBLOCK | O_NOCTTY); + if (cdc_wdm_fd == -1) { + dprintf("Failed to open %s, errno: %d (%s). break\n", cdc_wdm, errno, strerror(errno)); + return -1; + } + cfmakenoblock(cdc_wdm_fd); + + /* no qmi_proxy_loop lives, create one */ + pthread_create(&thread_id, NULL, qmi_proxy_loop, NULL); + /* try to redo init if failed, init function must be successfully */ + while (qmi_proxy_init() != 0) { + if (retry_times < 5) { + dprintf("fail to init proxy, try again in 2 seconds.\n"); + sleep(2); + retry_times++; + } else { + dprintf("has failed too much times, restart the modem and have a try...\n"); + break; + } + /* break loop if modem is detached */ + if (access(cdc_wdm, F_OK|R_OK|W_OK)) + break; + } + retry_times = 0; + qmi_start_server(servername); + if (qmi_proxy_server_fd == -1) + pthread_cancel(thread_id); + pthread_join(thread_id, NULL); + + /* close local server at last */ + qmi_close_server(servername); + close(cdc_wdm_fd); + /* DO RESTART IN 20s IF MODEM RESET ITSELF */ + if (modem_reset_flag) { + unsigned int time_to_wait = 20; + while (time_to_wait) { + time_to_wait = sleep(time_to_wait); + } + modem_reset_flag = 0; + } + } + + return 0; +} diff --git a/package/wwan/driver/quectel_cm_5G/src/quectel-qrtr-proxy.c b/package/wwan/driver/quectel_cm_5G/src/quectel-qrtr-proxy.c new file mode 100644 index 000000000..9404e963a --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/quectel-qrtr-proxy.c @@ -0,0 +1,894 @@ +/****************************************************************************** + @file quectel-qrtr-proxy.c + @brief The qrtr proxy. + + DESCRIPTION + Connectivity Management Tool for USB/PCIE network adapter of Quectel wireless cellular modules. + + INITIALIZATION AND SEQUENCING REQUIREMENTS + None. + + --------------------------------------------------------------------------- + Copyright (c) 2016 - 2020 Quectel Wireless Solution, Co., Ltd. All Rights Reserved. + Quectel Wireless Solution Proprietary and Confidential. + --------------------------------------------------------------------------- +******************************************************************************/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "qrtr.h" + +#include "qendian.h" +#include "qlist.h" +#include "MPQMI.h" +#include "MPQCTL.h" +#include "MPQMUX.h" + +static const char * get_time(void) { + static char time_buf[128]; + struct timeval tv; + time_t time; + suseconds_t millitm; + struct tm *ti; + + gettimeofday (&tv, NULL); + + time= tv.tv_sec; + millitm = (tv.tv_usec + 500) / 1000; + + if (millitm == 1000) { + ++time; + millitm = 0; + } + + ti = localtime(&time); + sprintf(time_buf, "[%02d-%02d_%02d:%02d:%02d:%03d]", ti->tm_mon+1, ti->tm_mday, ti->tm_hour, ti->tm_min, ti->tm_sec, (int)millitm); + return time_buf; +} + +#define dprintf(fmt, args...) do { fprintf(stdout, "%s " fmt, get_time(), ##args); } while(0); +#define SYSCHECK(c) do{if((c)<0) {dprintf("%s %d error: '%s' (code: %d)\n", __func__, __LINE__, strerror(errno), errno); return -1;}}while(0) +#define cfmakenoblock(fd) do{fcntl(fd, F_SETFL, fcntl(fd,F_GETFL) | O_NONBLOCK);}while(0) +#define align_4(_len) (((_len) + 3) & ~3) + +typedef struct { + struct qlistnode qnode; + int ClientFd; + QCQMIMSG qrtr[0]; +} QRTR_PROXY_MSG; + +typedef struct { + struct qlistnode qnode; + uint8_t QMIType; + uint8_t ClientId; + uint32_t node_id; + uint32_t port_id; + unsigned AccessTime; +} QRTR_PROXY_CLINET; + +typedef struct { + struct qlistnode qnode; + struct qlistnode client_qnode; + int ClientFd; + unsigned AccessTime; +} QRTR_PROXY_CONNECTION; + +typedef struct { + struct qlistnode qnode; + uint32_t service; + uint32_t version; + uint32_t instance; + uint32_t node; + uint32_t port; + + __le32 src_node_id; + __le32 src_port_id; +} QRTR_SERVICE; + +static int qrtr_proxy_quit = 0; +static pthread_t thread_id = 0; +static int cdc_wdm_fd = -1; +static int qrtr_proxy_server_fd = -1; +static struct qlistnode qrtr_proxy_connection; +static struct qlistnode qrtr_server_list; +static int verbose_debug = 0; +static uint32_t node_modem = 3; //IPQ ~ 3, QCM ~ 0 +static uint32_t node_myself = 1; + +static QRTR_SERVICE *find_qrtr_service(uint8_t QMIType) +{ + struct qlistnode *node; + + qlist_for_each (node, &qrtr_server_list) { + QRTR_SERVICE *srv = qnode_to_item(node, QRTR_SERVICE, qnode); + if (srv->service == QMIType) + return srv; + } + + return NULL; +} + +static uint8_t client_bitmap[0xf0]; +static uint8_t port_bitmap[0xff0]; +static int alloc_client_id(void) { + int id = 1; + + for (id = 1; id < (int)sizeof(client_bitmap); id++) { + if (client_bitmap[id] == 0) { + client_bitmap[id] = id; + return id; + } + } + + dprintf("NOT find %s()\n", __func__); + return 0; +} + +static void free_client_id(int id) { + if (id < (int)sizeof(client_bitmap) && client_bitmap[id] == id) { + client_bitmap[id] = 0; + return; + } + dprintf("NOT find %s(id=%d)\n", __func__, id); +} + +static int alloc_port_id(void) { + int id = 1; + + for (id = 1; id < (int)sizeof(port_bitmap); id++) { + if (port_bitmap[id] == 0) { + port_bitmap[id] = id; + return id; + } + } + + dprintf("NOT find %s()\n", __func__); + return 0; +} + +static void free_port_id(int id) { + if (id < (int)sizeof(port_bitmap) && port_bitmap[id] == id) { + port_bitmap[id] = 0; + return; + } + dprintf("NOT find %s(id=%d)\n", __func__, id); +} + +static void dump_qrtr(void *buf, size_t len, char flag) +{ + size_t i; + static char printf_buf[1024]; + int cnt = 0, limit=1024; + unsigned char *d = (unsigned char *)buf; + struct qrtr_hdr_v1 *hdr = (struct qrtr_hdr_v1 *)buf; + const char *ctrl_pkt_strings[] = { + [QRTR_TYPE_DATA] = "data", + [QRTR_TYPE_HELLO] = "hello", + [QRTR_TYPE_BYE] = "bye", + [QRTR_TYPE_NEW_SERVER] = "new-server", + [QRTR_TYPE_DEL_SERVER] = "del-server", + [QRTR_TYPE_DEL_CLIENT] = "del-client", + [QRTR_TYPE_RESUME_TX] = "resume-tx", + [QRTR_TYPE_EXIT] = "exit", + [QRTR_TYPE_PING] = "ping", + [QRTR_TYPE_NEW_LOOKUP] = "new-lookup", + [QRTR_TYPE_DEL_LOOKUP] = "del-lookup", + }; + + for (i = 0; i < len && i < 64; i++) { + if (i%4 == 0) + cnt += snprintf(printf_buf+cnt, limit-cnt, " "); + cnt += snprintf(printf_buf+cnt, limit-cnt, "%02x", d[i]); + } + dprintf("%s\n", printf_buf); + + dprintf("%c ver=%d, type=%d(%s), %x,%x -> %x,%x, confirm_rx=%d, size=%u\n", + flag, + le32toh(hdr->version), le32toh(hdr->type), ctrl_pkt_strings[le32toh(hdr->type)], + le32toh(hdr->src_node_id), le32toh(hdr->src_port_id), le32toh(hdr->dst_node_id), le32toh(hdr->dst_port_id), + le32toh(hdr->confirm_rx), le32toh(hdr->size)); +} + +static int send_qmi_to_client(PQCQMIMSG pQMI, int fd) { + struct pollfd pollfds[]= {{fd, POLLOUT, 0}}; + ssize_t ret = 0; + ssize_t size = le16toh(pQMI->QMIHdr.Length) + 1; + + do { + ret = poll(pollfds, sizeof(pollfds)/sizeof(pollfds[0]), 5000); + } while (ret == -1 && errno == EINTR && qrtr_proxy_quit == 0); + + if (pollfds[0].revents & POLLOUT) { + ret = write(fd, pQMI, size); + } + + return ret == size ? 0 : -1; +} + +static int send_qrtr_to_dev(struct qrtr_hdr_v1 *hdr, int fd) { + struct pollfd pollfds[]= {{fd, POLLOUT, 0}}; + ssize_t ret = 0; + ssize_t size = align_4(le32toh(hdr->size) + sizeof(*hdr)); + + do { + ret = poll(pollfds, sizeof(pollfds)/sizeof(pollfds[0]), 5000); + } while (ret == -1 && errno == EINTR && qrtr_proxy_quit == 0); + + if (pollfds[0].revents & POLLOUT) { + ret = write(fd, hdr, size); + } + + return ret == size ? 0 : -1; +} + +static int qrtr_node_enqueue(const void *data, size_t len, + int type, struct sockaddr_qrtr *from, + struct sockaddr_qrtr *to, unsigned int confirm_rx) +{ + int rc = -1; + size_t size = sizeof(struct qrtr_hdr_v1) + len; + struct qrtr_hdr_v1 *hdr = (struct qrtr_hdr_v1 *)malloc(align_4(size)); + + if (hdr) { + hdr->version = htole32(QRTR_PROTO_VER_1); + hdr->type = htole32(type); + hdr->src_node_id = htole32(from->sq_node); + hdr->src_port_id = htole32(from->sq_port); + hdr->dst_node_id = htole32(to->sq_node); + hdr->dst_port_id = htole32(to->sq_port); + hdr->size = htole32(len); + hdr->confirm_rx = htole32(!!confirm_rx); + + memcpy(hdr + 1, data, len); + dump_qrtr(hdr, size, '>'); + send_qrtr_to_dev(hdr, cdc_wdm_fd); + free(hdr); + } + + return rc; +} + +static int send_ctrl_hello(__u32 sq_node, __u32 sq_port) +{ + struct qrtr_ctrl_pkt pkt; + int rc; + struct sockaddr_qrtr to = {AF_QIPCRTR, sq_node, sq_port}; + struct sockaddr_qrtr from = {AF_QIPCRTR, node_myself, QRTR_PORT_CTRL}; + + memset(&pkt, 0, sizeof(pkt)); + pkt.cmd = htole32(QRTR_TYPE_HELLO); + + rc = qrtr_node_enqueue(&pkt, sizeof(pkt), QRTR_TYPE_HELLO, &from, &to, 0); + if (rc < 0) + return rc; + + return 0; +} + +static int ctrl_cmd_del_client(__u32 sq_node, __u32 sq_port, uint8_t QMIType) +{ + struct qrtr_ctrl_pkt pkt; + int rc; + struct sockaddr_qrtr to = {AF_QIPCRTR, QRTR_NODE_BCAST, QRTR_PORT_CTRL}; + struct sockaddr_qrtr from = {AF_QIPCRTR, sq_node, sq_port}; + QRTR_SERVICE *srv = find_qrtr_service(QMIType); + + if (srv) { + to.sq_node = srv->src_node_id; + } + + memset(&pkt, 0, sizeof(pkt)); + pkt.cmd = htole32(QRTR_TYPE_DEL_CLIENT); + pkt.client.node = htole32(sq_node); + pkt.client.port = htole32(sq_port); + + rc = qrtr_node_enqueue(&pkt, sizeof(pkt), QRTR_TYPE_DATA, &from, &to, 0); + if (rc < 0) + return rc; + + return 0; +} + +static void handle_server_change(struct qrtr_hdr_v1 *hdr) { + struct qrtr_ctrl_pkt *pkt = (struct qrtr_ctrl_pkt *)(hdr + 1); + QRTR_SERVICE *s; + + dprintf ("[qrtr] %s server on %u:%u(%u:%u) -> service %u, instance %x\n", + QRTR_TYPE_NEW_SERVER == hdr->type ? "add" : "remove", + le32toh(pkt->server.node), le32toh(pkt->server.port), + le32toh(hdr->src_node_id), le32toh(hdr->src_port_id), + le32toh(pkt->server.service), le32toh(pkt->server.instance)); + + if (le32toh(pkt->server.node) != node_modem) { + return; //we only care modem + } + + s = (QRTR_SERVICE *)malloc(sizeof(QRTR_SERVICE)); + if (!s) + return; + + qlist_init(&s->qnode); + s->service = le32toh(pkt->server.service); + s->version = le32toh(pkt->server.instance) & 0xff; + s->instance = le32toh(pkt->server.instance) >> 8; + s->node = le32toh(pkt->server.node); + s->port = le32toh(pkt->server.port); + + s->src_node_id = le32toh(hdr->src_node_id); + s->src_port_id = le32toh(hdr->src_port_id); + + if (QRTR_TYPE_NEW_SERVER == hdr->type) { + qlist_add_tail(&qrtr_server_list, &s->qnode); + } + else if (QRTR_TYPE_DEL_SERVER == hdr->type) { + qlist_remove(&s->qnode); + } +} + +static int create_local_server(const char *name) { + int sockfd = -1; + int reuse_addr = 1; + struct sockaddr_un sockaddr; + socklen_t alen; + + /*Create server socket*/ + SYSCHECK(sockfd = socket(AF_LOCAL, SOCK_STREAM, 0)); + + memset(&sockaddr, 0, sizeof(sockaddr)); + sockaddr.sun_family = AF_LOCAL; + sockaddr.sun_path[0] = 0; + memcpy(sockaddr.sun_path + 1, name, strlen(name) ); + + alen = strlen(name) + offsetof(struct sockaddr_un, sun_path) + 1; + SYSCHECK(setsockopt(sockfd, SOL_SOCKET, SO_REUSEADDR, &reuse_addr,sizeof(reuse_addr))); + if(bind(sockfd, (struct sockaddr *)&sockaddr, alen) < 0) { + close(sockfd); + dprintf("bind %s errno: %d (%s)\n", name, errno, strerror(errno)); + return -1; + } + + dprintf("local server: %s sockfd = %d\n", name, sockfd); + cfmakenoblock(sockfd); + listen(sockfd, 1); + + return sockfd; +} + +static uint8_t alloc_qrtr_client_id(QRTR_PROXY_CONNECTION *qrtr_con, uint8_t QMIType) { + QRTR_PROXY_CLINET *qrtr_client = (QRTR_PROXY_CLINET *)malloc(sizeof(QRTR_PROXY_CLINET)); + + qlist_init(&qrtr_client->qnode); + qrtr_client->QMIType = QMIType; + qrtr_client->ClientId = alloc_client_id(); + qrtr_client->node_id = 1; + qrtr_client->port_id = alloc_port_id(); + qrtr_client->AccessTime = 0; + + dprintf("+++ ClientFd=%d QMIType=%d ClientId=%d, node_id=%d, port_id=%d\n", + qrtr_con->ClientFd, qrtr_client->QMIType, qrtr_client->ClientId, + qrtr_client->node_id, qrtr_client->port_id); + qlist_add_tail(&qrtr_con->client_qnode, &qrtr_client->qnode); + + return qrtr_client->ClientId; +} + +static void release_qrtr_client_id(QRTR_PROXY_CONNECTION *qrtr_con, uint8_t QMIType, uint8_t ClientId) { + struct qlistnode *client_node; + int find = 0; + + qlist_for_each (client_node, &qrtr_con->client_qnode) { + QRTR_PROXY_CLINET *qrtr_client = qnode_to_item(client_node, QRTR_PROXY_CLINET, qnode); + + if (QMIType == qrtr_client->QMIType && ClientId == qrtr_client->ClientId) { + dprintf("--- ClientFd=%d QMIType=%d ClientId=%d, node_id=%d, port_id=%d\n", + qrtr_con->ClientFd, qrtr_client->QMIType, qrtr_client->ClientId, + qrtr_client->node_id, qrtr_client->port_id); + ctrl_cmd_del_client(qrtr_client->node_id, qrtr_client->port_id, qrtr_client->QMIType); + free_client_id(qrtr_client->ClientId); + free_port_id(qrtr_client->port_id); + qlist_remove(&qrtr_client->qnode); + free(qrtr_client); + find++; + break; + } + } + + if (!find) { + dprintf("NOT find on %s(ClientFd=%d, QMIType=%d, ClientId=%d)\n", + __func__, qrtr_con->ClientFd, QMIType, ClientId); + } +} + +static void accept_qrtr_connection(int serverfd) { + int clientfd = -1; + unsigned char addr[128]; + socklen_t alen = sizeof(addr); + QRTR_PROXY_CONNECTION *qrtr_con; + + clientfd = accept(serverfd, (struct sockaddr *)addr, &alen); + + qrtr_con = (QRTR_PROXY_CONNECTION *)malloc(sizeof(QRTR_PROXY_CONNECTION)); + if (qrtr_con) { + qlist_init(&qrtr_con->qnode); + qlist_init(&qrtr_con->client_qnode); + qrtr_con->ClientFd= clientfd; + qrtr_con->AccessTime = 0; + dprintf("+++ ClientFd=%d\n", qrtr_con->ClientFd); + qlist_add_tail(&qrtr_proxy_connection, &qrtr_con->qnode); + } + + cfmakenoblock(clientfd); +} + +static void cleanup_qrtr_connection(int clientfd) { + struct qlistnode *con_node; + int find = 0; + + qlist_for_each(con_node, &qrtr_proxy_connection) { + QRTR_PROXY_CONNECTION *qrtr_con = qnode_to_item(con_node, QRTR_PROXY_CONNECTION, qnode); + + if (qrtr_con->ClientFd == clientfd) { + while (!qlist_empty(&qrtr_con->client_qnode)) { + QRTR_PROXY_CLINET *qrtr_client = qnode_to_item(qlist_head(&qrtr_con->client_qnode), QRTR_PROXY_CLINET, qnode); + + release_qrtr_client_id(qrtr_con, qrtr_client->QMIType, qrtr_client->ClientId); + } + + dprintf("--- ClientFd=%d\n", qrtr_con->ClientFd); + close(qrtr_con->ClientFd); + qlist_remove(&qrtr_con->qnode); + free(qrtr_con); + find = 1; + break; + } + } + + if (!find) { + dprintf("NOT find on %s(ClientFd=%d)\n", __func__, clientfd); + } +} + +static void recv_qrtr_from_dev(struct qrtr_hdr_v1 *hdr) { + int find = 0; + uint32_t type = le32toh(hdr->type); + + if (type == QRTR_TYPE_HELLO) { + send_ctrl_hello(le32toh(hdr->src_node_id), le32toh(hdr->src_port_id)); + find++; + } + else if (type == QRTR_TYPE_NEW_SERVER || type == QRTR_TYPE_DEL_SERVER) { + handle_server_change(hdr); + find++; + } + else if (type == QRTR_TYPE_DATA) { + struct qlistnode *con_node, *client_node; + + qlist_for_each(con_node, &qrtr_proxy_connection) { + QRTR_PROXY_CONNECTION *qrtr_con = qnode_to_item(con_node, QRTR_PROXY_CONNECTION, qnode); + + qlist_for_each(client_node, &qrtr_con->client_qnode) { + QRTR_PROXY_CLINET *qrtr_client = qnode_to_item(client_node, QRTR_PROXY_CLINET, qnode); + + if (qrtr_client->node_id == le32toh(hdr->dst_node_id) && qrtr_client->port_id == le32toh(hdr->dst_port_id)) { + PQCQMIMSG pQMI = (PQCQMIMSG)malloc(hdr->size + sizeof(QCQMI_HDR)); + + if (pQMI) { + pQMI->QMIHdr.IFType = USB_CTL_MSG_TYPE_QMI; + pQMI->QMIHdr.Length = htole16(hdr->size + sizeof(QCQMI_HDR) - 1); + pQMI->QMIHdr.CtlFlags = 0x00; + pQMI->QMIHdr.QMIType = qrtr_client->QMIType; + pQMI->QMIHdr.ClientId = qrtr_client->ClientId; + memcpy(&pQMI->MUXMsg, hdr + 1, hdr->size); + send_qmi_to_client(pQMI, qrtr_con->ClientFd); + free(pQMI); + find++; + } + } + } + } + + if (hdr->confirm_rx) { + struct qrtr_ctrl_pkt pkt; + struct sockaddr_qrtr from = {AF_QIPCRTR, le32toh(hdr->dst_node_id), le32toh(hdr->dst_port_id)}; + struct sockaddr_qrtr to = {AF_QIPCRTR, le32toh(hdr->src_node_id), le32toh(hdr->src_port_id)}; + + memset(&pkt, 0, sizeof(pkt)); + pkt.cmd = htole32(QRTR_TYPE_RESUME_TX); + pkt.client.node = hdr->dst_node_id; + pkt.client.port = hdr->dst_port_id; + + qrtr_node_enqueue(&pkt, sizeof(pkt), QRTR_TYPE_RESUME_TX, &from, &to, 0); + } + } + else if (type == QRTR_TYPE_RESUME_TX) { + } + + if (!find) { + dprintf("NOT find on %s()\n", __func__); + } +} + +static int recv_qmi_from_client(PQCQMIMSG pQMI, int clientfd) { + QRTR_PROXY_CONNECTION *qrtr_con; + struct qlistnode *con_node, *client_node; + int find = 0; + + qlist_for_each(con_node, &qrtr_proxy_connection) { + qrtr_con = qnode_to_item(con_node, QRTR_PROXY_CONNECTION, qnode); + if (qrtr_con->ClientFd == clientfd) + break; + qrtr_con = NULL; + } + + if (!qrtr_con) { + return -1; + } + + if (le16toh(pQMI->QMIHdr.QMIType) == QMUX_TYPE_CTL) { + if (pQMI->CTLMsg.QMICTLMsgHdr.QMICTLType == QMICTL_SYNC_REQ) { + dprintf("do not allow client send QMICTL_SYNC_REQ\n"); + return 0; + } + else if (le16toh(pQMI->CTLMsg.QMICTLMsgHdr.QMICTLType) == QMICTL_GET_CLIENT_ID_REQ) { + uint8_t QMIType = pQMI->CTLMsg.GetClientIdReq.QMIType; + PQCQMIMSG pRsp = (PQCQMIMSG)malloc(256); + + if (pRsp) { + uint8_t ClientId = 0; + + if (find_qrtr_service(QMIType)) { + ClientId = alloc_qrtr_client_id(qrtr_con, QMIType); + } + + pRsp->QMIHdr.IFType = USB_CTL_MSG_TYPE_QMI; + pRsp->QMIHdr.Length = htole16(sizeof(pRsp->CTLMsg.GetClientIdRsp) + sizeof(pRsp->QMIHdr) - 1); + pRsp->QMIHdr.CtlFlags = 0x00; + pRsp->QMIHdr.QMIType = QMUX_TYPE_CTL; + pRsp->QMIHdr.ClientId = 0; + + pRsp->CTLMsg.QMICTLMsgHdrRsp.CtlFlags = QMICTL_FLAG_RESPONSE; + pRsp->CTLMsg.QMICTLMsgHdrRsp.TransactionId = pQMI->CTLMsg.QMICTLMsgHdr.TransactionId; + pRsp->CTLMsg.QMICTLMsgHdrRsp.QMICTLType = pQMI->CTLMsg.QMICTLMsgHdr.QMICTLType; + pRsp->CTLMsg.QMICTLMsgHdrRsp.Length = htole16(sizeof(pRsp->CTLMsg.GetClientIdRsp) - sizeof(pRsp->CTLMsg.QMICTLMsgHdr)); + pRsp->CTLMsg.QMICTLMsgHdrRsp.TLVType = QCTLV_TYPE_RESULT_CODE; + pRsp->CTLMsg.QMICTLMsgHdrRsp.TLVLength = htole16(4); + pRsp->CTLMsg.QMICTLMsgHdrRsp.QMUXResult = htole16(ClientId ? 0 : QMI_RESULT_FAILURE); + pRsp->CTLMsg.QMICTLMsgHdrRsp.QMUXError = htole16(ClientId ? 0 : QMI_ERR_INTERNAL); + pRsp->CTLMsg.GetClientIdRsp.TLV2Type = QCTLV_TYPE_REQUIRED_PARAMETER; + pRsp->CTLMsg.GetClientIdRsp.TLV2Length = htole16(2); + pRsp->CTLMsg.GetClientIdRsp.QMIType = QMIType; + pRsp->CTLMsg.GetClientIdRsp.ClientId = ClientId; + + send_qmi_to_client(pRsp, clientfd); + free(pRsp); + find++; + } + } + else if (le16toh(pQMI->CTLMsg.QMICTLMsgHdr.QMICTLType) == QMICTL_RELEASE_CLIENT_ID_REQ) { + PQCQMIMSG pRsp = (PQCQMIMSG)malloc(256); + release_qrtr_client_id(qrtr_con, pQMI->CTLMsg.ReleaseClientIdReq.QMIType, pQMI->CTLMsg.ReleaseClientIdReq.ClientId); + + if (pRsp) { + pRsp->QMIHdr.IFType = USB_CTL_MSG_TYPE_QMI; + pRsp->QMIHdr.Length = htole16(sizeof(pRsp->CTLMsg.ReleaseClientIdRsp) + sizeof(pRsp->QMIHdr) - 1); + pRsp->QMIHdr.CtlFlags = 0x00; + pRsp->QMIHdr.QMIType = QMUX_TYPE_CTL; + pRsp->QMIHdr.ClientId = 0; + + pRsp->CTLMsg.QMICTLMsgHdrRsp.CtlFlags = QMICTL_FLAG_RESPONSE; + pRsp->CTLMsg.QMICTLMsgHdrRsp.TransactionId = pQMI->CTLMsg.QMICTLMsgHdr.TransactionId; + pRsp->CTLMsg.QMICTLMsgHdrRsp.QMICTLType = pQMI->CTLMsg.QMICTLMsgHdr.QMICTLType; + pRsp->CTLMsg.QMICTLMsgHdrRsp.Length = htole16(sizeof(pRsp->CTLMsg.ReleaseClientIdRsp) - sizeof(pRsp->CTLMsg.QMICTLMsgHdr)); + pRsp->CTLMsg.QMICTLMsgHdrRsp.TLVType = QCTLV_TYPE_RESULT_CODE; + pRsp->CTLMsg.QMICTLMsgHdrRsp.TLVLength = htole16(4); + pRsp->CTLMsg.QMICTLMsgHdrRsp.QMUXResult = htole16(0); + pRsp->CTLMsg.QMICTLMsgHdrRsp.QMUXError = htole16(0); + pRsp->CTLMsg.ReleaseClientIdRsp.TLV2Type = QCTLV_TYPE_REQUIRED_PARAMETER; + pRsp->CTLMsg.ReleaseClientIdRsp.TLV2Length = htole16(2); + pRsp->CTLMsg.ReleaseClientIdRsp.QMIType = pQMI->CTLMsg.ReleaseClientIdReq.QMIType; + pRsp->CTLMsg.ReleaseClientIdRsp.ClientId = pQMI->CTLMsg.ReleaseClientIdReq.ClientId; + + send_qmi_to_client(pRsp, clientfd); + free(pRsp); + find++; + } + } + else if (le16toh(pQMI->CTLMsg.QMICTLMsgHdr.QMICTLType) == QMICTL_GET_VERSION_REQ) { + PQCQMIMSG pRsp = (PQCQMIMSG)malloc(256); + + if (pRsp) { + pRsp->QMIHdr.IFType = USB_CTL_MSG_TYPE_QMI; + pRsp->QMIHdr.Length = htole16(sizeof(pRsp->CTLMsg.GetVersionRsp) + sizeof(pRsp->QMIHdr) - 1); + pRsp->QMIHdr.CtlFlags = 0x00; + pRsp->QMIHdr.QMIType = QMUX_TYPE_CTL; + pRsp->QMIHdr.ClientId = 0; + + pRsp->CTLMsg.QMICTLMsgHdrRsp.CtlFlags = QMICTL_FLAG_RESPONSE; + pRsp->CTLMsg.QMICTLMsgHdrRsp.TransactionId = pQMI->CTLMsg.QMICTLMsgHdr.TransactionId; + pRsp->CTLMsg.QMICTLMsgHdrRsp.QMICTLType = pQMI->CTLMsg.QMICTLMsgHdr.QMICTLType; + pRsp->CTLMsg.QMICTLMsgHdrRsp.Length = htole16(sizeof(pRsp->CTLMsg.GetVersionRsp) - sizeof(pRsp->CTLMsg.QMICTLMsgHdr)); + pRsp->CTLMsg.QMICTLMsgHdrRsp.TLVType = QCTLV_TYPE_RESULT_CODE; + pRsp->CTLMsg.QMICTLMsgHdrRsp.TLVLength = htole16(4); + pRsp->CTLMsg.QMICTLMsgHdrRsp.QMUXResult = htole16(0); + pRsp->CTLMsg.QMICTLMsgHdrRsp.QMUXError = htole16(0); + pRsp->CTLMsg.GetVersionRsp.TLV2Type = QCTLV_TYPE_REQUIRED_PARAMETER; + pRsp->CTLMsg.GetVersionRsp.TLV2Length = htole16(1); + pRsp->CTLMsg.GetVersionRsp.NumElements = 0; + + send_qmi_to_client(pRsp, clientfd); + free(pRsp); + find++; + } + } + } + else { + qlist_for_each (client_node, &qrtr_con->client_qnode) { + QRTR_PROXY_CLINET *qrtr_client = qnode_to_item(client_node, QRTR_PROXY_CLINET, qnode); + + if (pQMI->QMIHdr.QMIType == qrtr_client->QMIType && pQMI->QMIHdr.ClientId == qrtr_client->ClientId) { + QRTR_SERVICE *srv = find_qrtr_service(pQMI->QMIHdr.QMIType); + + if (srv && srv->service) { + struct sockaddr_qrtr from = {AF_QIPCRTR, qrtr_client->node_id, qrtr_client->port_id}; + struct sockaddr_qrtr to = {AF_QIPCRTR, srv->node, srv->port}; + + qrtr_node_enqueue(&pQMI->MUXMsg, le16toh(pQMI->QMIHdr.Length) + 1 - sizeof(QCQMI_HDR), + QRTR_TYPE_DATA, &from, &to, 0); + find++; + } + break; + } + } + } + + if (!find) { + dprintf("NOT find on %s()\n", __func__); + } + + return 0; +} + +static int qrtr_proxy_init(void) { + unsigned i; + int qrtr_sync_done = 0; + + dprintf("%s enter\n", __func__); + send_ctrl_hello(QRTR_NODE_BCAST, QRTR_PORT_CTRL); + + for (i = 0; i < 10; i++) { + sleep(1); + qrtr_sync_done = !qlist_empty(&qrtr_server_list); + if (qrtr_sync_done) + break; + } + + dprintf("%s %s\n", __func__, qrtr_sync_done ? "succful" : "fail"); + return qrtr_sync_done ? 0 : -1; +} + +static void qrtr_start_server(const char* servername) { + qrtr_proxy_server_fd = create_local_server(servername); + dprintf("qrtr_proxy_server_fd = %d\n", qrtr_proxy_server_fd); + if (qrtr_proxy_server_fd == -1) { + dprintf("Failed to create %s, errno: %d (%s)\n", servername, errno, strerror(errno)); + } +} + +static void qrtr_close_server(const char* servername) { + if (qrtr_proxy_server_fd != -1) { + dprintf("%s %s\n", __func__, servername); + close(qrtr_proxy_server_fd); + qrtr_proxy_server_fd = -1; + } +} + +static void *qrtr_proxy_loop(void *param) +{ + void *rx_buf; + struct qlistnode *con_node; + QRTR_PROXY_CONNECTION *qrtr_con; + + (void)param; + dprintf("%s enter thread_id %p\n", __func__, (void *)pthread_self()); + + rx_buf = malloc(8192); + if (!rx_buf) + return NULL; + + while (cdc_wdm_fd > 0 && qrtr_proxy_quit == 0) { + struct pollfd pollfds[32]; + int ne, ret, nevents = 0; + ssize_t nreads; + + pollfds[nevents].fd = cdc_wdm_fd; + pollfds[nevents].events = POLLIN; + pollfds[nevents].revents= 0; + nevents++; + + if (qrtr_proxy_server_fd > 0) { + pollfds[nevents].fd = qrtr_proxy_server_fd; + pollfds[nevents].events = POLLIN; + pollfds[nevents].revents= 0; + nevents++; + } + + qlist_for_each(con_node, &qrtr_proxy_connection) { + qrtr_con = qnode_to_item(con_node, QRTR_PROXY_CONNECTION, qnode); + + pollfds[nevents].fd = qrtr_con->ClientFd; + pollfds[nevents].events = POLLIN; + pollfds[nevents].revents= 0; + nevents++; + + if (nevents == (sizeof(pollfds)/sizeof(pollfds[0]))) + break; + } + + do { + //ret = poll(pollfds, nevents, -1); + ret = poll(pollfds, nevents, (qrtr_proxy_server_fd > 0) ? -1 : 200); + } while (ret == -1 && errno == EINTR && qrtr_proxy_quit == 0); + + if (ret < 0) { + dprintf("%s poll=%d, errno: %d (%s)\n", __func__, ret, errno, strerror(errno)); + goto qrtr_proxy_loop_exit; + } + + for (ne = 0; ne < nevents; ne++) { + int fd = pollfds[ne].fd; + short revents = pollfds[ne].revents; + + if (revents & (POLLERR | POLLHUP | POLLNVAL)) { + dprintf("%s poll fd = %d, revents = %04x\n", __func__, fd, revents); + if (fd == cdc_wdm_fd) { + goto qrtr_proxy_loop_exit; + } + else if (fd == qrtr_proxy_server_fd) { + + } + else { + cleanup_qrtr_connection(fd); + } + + continue; + } + + if (!(pollfds[ne].revents & POLLIN)) { + continue; + } + + if (fd == qrtr_proxy_server_fd) { + accept_qrtr_connection(fd); + } + else if (fd == cdc_wdm_fd) { + struct qrtr_hdr_v1 *hdr = (struct qrtr_hdr_v1 *)rx_buf; + + nreads = read(fd, rx_buf, 8192); + if (nreads <= 0) { + dprintf("%s read=%d errno: %d (%s)\n", __func__, (int)nreads, errno, strerror(errno)); + goto qrtr_proxy_loop_exit; + } + else if (nreads != (int)align_4(le32toh(hdr->size) + sizeof(*hdr))) { + dprintf("%s nreads=%d, hdr->size = %d\n", __func__, (int)nreads, le32toh(hdr->size)); + continue; + } + + dump_qrtr(hdr, nreads, '<'); + recv_qrtr_from_dev(hdr); + } + else { + PQCQMIMSG pQMI = (PQCQMIMSG)rx_buf; + + nreads = read(fd, rx_buf, 8192); + if (nreads <= 0) { + dprintf("%s read=%d errno: %d (%s)", __func__, (int)nreads, errno, strerror(errno)); + cleanup_qrtr_connection(fd); + break; + } + else if (nreads != (le16toh(pQMI->QMIHdr.Length) + 1)) { + dprintf("%s nreads=%d, pQCQMI->QMIHdr.Length = %d\n", __func__, (int)nreads, le16toh(pQMI->QMIHdr.Length)); + continue; + } + + recv_qmi_from_client(pQMI, fd); + } + } + } + +qrtr_proxy_loop_exit: + while (!qlist_empty(&qrtr_proxy_connection)) { + QRTR_PROXY_CONNECTION *qrtr_con = qnode_to_item(qlist_head(&qrtr_proxy_connection), QRTR_PROXY_CONNECTION, qnode); + + cleanup_qrtr_connection(qrtr_con->ClientFd); + } + + dprintf("%s exit, thread_id %p\n", __func__, (void *)pthread_self()); + free(rx_buf); + + return NULL; +} + +static void usage(void) { + dprintf(" -d A valid qrtr device\n" + " default /dev/mhi_IPCR, but mhi_IPCR may be invalid\n" + " -i netcard name\n" + " -v Will show all details\n"); +} + +static void sig_action(int sig) { + if (qrtr_proxy_quit == 0) { + qrtr_proxy_quit = 1; + if (thread_id) + pthread_kill(thread_id, sig); + } +} + +int main(int argc, char *argv[]) { + int opt; + char cdc_wdm[32+1] = "/dev/mhi_IPCR"; + char servername[64] = {0}; + + signal(SIGINT, sig_action); + signal(SIGTERM, sig_action); + + optind = 1; + while ( -1 != (opt = getopt(argc, argv, "d:i:vh"))) { + switch (opt) { + case 'd': + strcpy(cdc_wdm, optarg); + break; + case 'v': + verbose_debug = 1; + break; + default: + usage(); + return 0; + } + } + + sprintf(servername, "quectel-qrtr-proxy%c", cdc_wdm[strlen(cdc_wdm)-1]); + dprintf("Will use cdc-wdm='%s', proxy='%s'\n", cdc_wdm, servername); + + while (qrtr_proxy_quit == 0) { + cdc_wdm_fd = open(cdc_wdm, O_RDWR | O_NONBLOCK | O_NOCTTY); + if (cdc_wdm_fd == -1) { + dprintf("Failed to open %s, errno: %d (%s)\n", cdc_wdm, errno, strerror(errno)); + sleep(5); + continue; + } + cfmakenoblock(cdc_wdm_fd); + qlist_init(&qrtr_proxy_connection); + qlist_init(&qrtr_server_list); + pthread_create(&thread_id, NULL, qrtr_proxy_loop, NULL); + + if (qrtr_proxy_init() == 0) { + qrtr_start_server(servername); + pthread_join(thread_id, NULL); + qrtr_close_server(servername); + } + else { + pthread_cancel(thread_id); + pthread_join(thread_id, NULL); + } + + close(cdc_wdm_fd); + } + + return 0; +} diff --git a/package/wwan/driver/quectel_cm_5G/src/rmnetctl.c b/package/wwan/driver/quectel_cm_5G/src/rmnetctl.c new file mode 100644 index 000000000..3a9aae9e9 --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/rmnetctl.c @@ -0,0 +1,342 @@ +//https://source.codeaurora.org/quic/la/platform/vendor/qcom-opensource/dataservices/tree/rmnetctl +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define RMNETCTL_SUCCESS 0 +#define RMNETCTL_LIB_ERR 1 +#define RMNETCTL_KERNEL_ERR 2 +#define RMNETCTL_INVALID_ARG 3 + +enum rmnetctl_error_codes_e { + RMNETCTL_API_SUCCESS = 0, + + RMNETCTL_API_FIRST_ERR = 1, + RMNETCTL_API_ERR_MESSAGE_SEND = 3, + RMNETCTL_API_ERR_MESSAGE_RECEIVE = 4, + + RMNETCTL_INIT_FIRST_ERR = 5, + RMNETCTL_INIT_ERR_PROCESS_ID = RMNETCTL_INIT_FIRST_ERR, + RMNETCTL_INIT_ERR_NETLINK_FD = 6, + RMNETCTL_INIT_ERR_BIND = 7, + + RMNETCTL_API_SECOND_ERR = 9, + RMNETCTL_API_ERR_HNDL_INVALID = RMNETCTL_API_SECOND_ERR, + RMNETCTL_API_ERR_RETURN_TYPE = 13, +}; + +struct rmnetctl_hndl_s { + uint32_t pid; + uint32_t transaction_id; + int netlink_fd; + struct sockaddr_nl src_addr, dest_addr; +}; +typedef struct rmnetctl_hndl_s rmnetctl_hndl_t; + +#define NLMSG_TAIL(nmsg) \ + ((struct rtattr *) (((char *)(nmsg)) + NLMSG_ALIGN((nmsg)->nlmsg_len))) + +struct nlmsg { + struct nlmsghdr nl_addr; + struct ifinfomsg ifmsg; + char data[500]; +}; + +#define MIN_VALID_PROCESS_ID 0 +#define MIN_VALID_SOCKET_FD 0 +#define KERNEL_PROCESS_ID 0 +#define UNICAST 0 + +enum { + IFLA_RMNET_UL_AGG_PARAMS = __IFLA_RMNET_MAX, + __IFLA_RMNET_EXT_MAX, +}; + +struct rmnet_egress_agg_params { + uint16_t agg_size; + uint16_t agg_count; + uint32_t agg_time; +}; + +static int rmnet_get_ack(rmnetctl_hndl_t *hndl, uint16_t *error_code) +{ + struct nlack { + struct nlmsghdr ackheader; + struct nlmsgerr ackdata; + char data[256]; + + } ack; + int i; + + if (!hndl || !error_code) + return RMNETCTL_INVALID_ARG; + + if ((i = recv(hndl->netlink_fd, &ack, sizeof(ack), 0)) < 0) { + *error_code = errno; + return RMNETCTL_API_ERR_MESSAGE_RECEIVE; + } + + /*Ack should always be NLMSG_ERROR type*/ + if (ack.ackheader.nlmsg_type == NLMSG_ERROR) { + if (ack.ackdata.error == 0) { + *error_code = RMNETCTL_API_SUCCESS; + return RMNETCTL_SUCCESS; + } else { + *error_code = -ack.ackdata.error; + return RMNETCTL_KERNEL_ERR; + } + } + + *error_code = RMNETCTL_API_ERR_RETURN_TYPE; + return RMNETCTL_API_FIRST_ERR; +} + +static int rtrmnet_ctl_init(rmnetctl_hndl_t **hndl, uint16_t *error_code) +{ + struct sockaddr_nl __attribute__((__may_alias__)) *saddr_ptr; + int netlink_fd = -1; + pid_t pid = 0; + + if (!hndl || !error_code) + return RMNETCTL_INVALID_ARG; + + *hndl = (rmnetctl_hndl_t *)malloc(sizeof(rmnetctl_hndl_t)); + if (!*hndl) { + *error_code = RMNETCTL_API_ERR_HNDL_INVALID; + return RMNETCTL_LIB_ERR; + } + + memset(*hndl, 0, sizeof(rmnetctl_hndl_t)); + + pid = getpid(); + if (pid < MIN_VALID_PROCESS_ID) { + free(*hndl); + *error_code = RMNETCTL_INIT_ERR_PROCESS_ID; + return RMNETCTL_LIB_ERR; + } + (*hndl)->pid = KERNEL_PROCESS_ID; + netlink_fd = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE); + if (netlink_fd < MIN_VALID_SOCKET_FD) { + free(*hndl); + *error_code = RMNETCTL_INIT_ERR_NETLINK_FD; + return RMNETCTL_LIB_ERR; + } + + (*hndl)->netlink_fd = netlink_fd; + + memset(&(*hndl)->src_addr, 0, sizeof(struct sockaddr_nl)); + + (*hndl)->src_addr.nl_family = AF_NETLINK; + (*hndl)->src_addr.nl_pid = (*hndl)->pid; + + saddr_ptr = &(*hndl)->src_addr; + if (bind((*hndl)->netlink_fd, + (struct sockaddr *)saddr_ptr, + sizeof(struct sockaddr_nl)) < 0) { + close((*hndl)->netlink_fd); + free(*hndl); + *error_code = RMNETCTL_INIT_ERR_BIND; + return RMNETCTL_LIB_ERR; + } + + memset(&(*hndl)->dest_addr, 0, sizeof(struct sockaddr_nl)); + + (*hndl)->dest_addr.nl_family = AF_NETLINK; + (*hndl)->dest_addr.nl_pid = KERNEL_PROCESS_ID; + (*hndl)->dest_addr.nl_groups = UNICAST; + + return RMNETCTL_SUCCESS; +} + +static int rtrmnet_ctl_deinit(rmnetctl_hndl_t *hndl) +{ + if (!hndl) + return RMNETCTL_SUCCESS; + + close(hndl->netlink_fd); + free(hndl); + + return RMNETCTL_SUCCESS; +} + +static int rtrmnet_ctl_newvnd(rmnetctl_hndl_t *hndl, char *devname, char *vndname, + uint16_t *error_code, uint8_t index, + uint32_t flagconfig, uint32_t ul_agg_cnt, uint32_t ul_agg_size) +{ + struct rtattr *attrinfo, *datainfo, *linkinfo; + struct ifla_vlan_flags flags; + int devindex = 0, val = 0; + char *kind = "rmnet"; + struct nlmsg req; + short id; + + if (!hndl || !devname || !vndname || !error_code) + return RMNETCTL_INVALID_ARG; + + memset(&req, 0, sizeof(req)); + req.nl_addr.nlmsg_type = RTM_NEWLINK; + req.nl_addr.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg)); + req.nl_addr.nlmsg_flags = NLM_F_REQUEST | NLM_F_CREATE | NLM_F_EXCL | + NLM_F_ACK; + req.nl_addr.nlmsg_seq = hndl->transaction_id; + hndl->transaction_id++; + + /* Get index of devname*/ + devindex = if_nametoindex(devname); + if (devindex < 0) { + *error_code = errno; + return RMNETCTL_KERNEL_ERR; + } + + /* Setup link attr with devindex as data */ + val = devindex; + attrinfo = (struct rtattr *)(((char *)&req) + + NLMSG_ALIGN(req.nl_addr.nlmsg_len)); + attrinfo->rta_type = IFLA_LINK; + attrinfo->rta_len = RTA_ALIGN(RTA_LENGTH(sizeof(val))); + memcpy(RTA_DATA(attrinfo), &val, sizeof(val)); + req.nl_addr.nlmsg_len = NLMSG_ALIGN(req.nl_addr.nlmsg_len) + + RTA_ALIGN(RTA_LENGTH(sizeof(val))); + + /* Set up IFLA info kind RMNET that has linkinfo and type */ + attrinfo = (struct rtattr *)(((char *)&req) + + NLMSG_ALIGN(req.nl_addr.nlmsg_len)); + attrinfo->rta_type = IFLA_IFNAME; + attrinfo->rta_len = RTA_ALIGN(RTA_LENGTH(strlen(vndname) + 1)); + memcpy(RTA_DATA(attrinfo), vndname, strlen(vndname) + 1); + req.nl_addr.nlmsg_len = NLMSG_ALIGN(req.nl_addr.nlmsg_len) + + RTA_ALIGN(RTA_LENGTH(strlen(vndname) + 1)); + + linkinfo = (struct rtattr *)(((char *)&req) + + NLMSG_ALIGN(req.nl_addr.nlmsg_len)); + linkinfo->rta_type = IFLA_LINKINFO; + linkinfo->rta_len = RTA_ALIGN(RTA_LENGTH(0)); + req.nl_addr.nlmsg_len = NLMSG_ALIGN(req.nl_addr.nlmsg_len) + + RTA_ALIGN(RTA_LENGTH(0)); + + attrinfo = (struct rtattr *)(((char *)&req) + + NLMSG_ALIGN(req.nl_addr.nlmsg_len)); + attrinfo->rta_type = IFLA_INFO_KIND; + attrinfo->rta_len = RTA_ALIGN(RTA_LENGTH(strlen(kind))); + memcpy(RTA_DATA(attrinfo), kind, strlen(kind)); + req.nl_addr.nlmsg_len = NLMSG_ALIGN(req.nl_addr.nlmsg_len) + + RTA_ALIGN(RTA_LENGTH(strlen(kind))); + + datainfo = (struct rtattr *)(((char *)&req) + + NLMSG_ALIGN(req.nl_addr.nlmsg_len)); + datainfo->rta_type = IFLA_INFO_DATA; + datainfo->rta_len = RTA_ALIGN(RTA_LENGTH(0)); + req.nl_addr.nlmsg_len = NLMSG_ALIGN(req.nl_addr.nlmsg_len) + + RTA_ALIGN(RTA_LENGTH(0)); + + id = index; + attrinfo = (struct rtattr *)(((char *)&req) + + NLMSG_ALIGN(req.nl_addr.nlmsg_len)); + attrinfo->rta_type = IFLA_VLAN_ID; + attrinfo->rta_len = RTA_LENGTH(sizeof(id)); + memcpy(RTA_DATA(attrinfo), &id, sizeof(id)); + req.nl_addr.nlmsg_len = NLMSG_ALIGN(req.nl_addr.nlmsg_len) + + RTA_ALIGN(RTA_LENGTH(sizeof(id))); + + if (flagconfig != 0) { + flags.mask = flagconfig; + flags.flags = flagconfig; + + attrinfo = (struct rtattr *)(((char *)&req) + + NLMSG_ALIGN(req.nl_addr.nlmsg_len)); + attrinfo->rta_type = IFLA_VLAN_FLAGS; + attrinfo->rta_len = RTA_LENGTH(sizeof(flags)); + memcpy(RTA_DATA(attrinfo), &flags, sizeof(flags)); + req.nl_addr.nlmsg_len = NLMSG_ALIGN(req.nl_addr.nlmsg_len) + + RTA_ALIGN(RTA_LENGTH(sizeof(flags))); + } + + if (ul_agg_cnt > 1) { + struct rmnet_egress_agg_params agg_params; + + agg_params.agg_size = ul_agg_size; + agg_params.agg_count = ul_agg_cnt; + agg_params.agg_time = 3000000; + + attrinfo = (struct rtattr *)(((char *)&req) + + NLMSG_ALIGN(req.nl_addr.nlmsg_len)); + attrinfo->rta_type = IFLA_RMNET_UL_AGG_PARAMS; + attrinfo->rta_len = RTA_LENGTH(sizeof(agg_params)); + memcpy(RTA_DATA(attrinfo), &agg_params, sizeof(agg_params)); + req.nl_addr.nlmsg_len = NLMSG_ALIGN(req.nl_addr.nlmsg_len) + + RTA_ALIGN(RTA_LENGTH(sizeof(agg_params))); + } + + datainfo->rta_len = (char *)NLMSG_TAIL(&req.nl_addr) - (char *)datainfo; + + linkinfo->rta_len = (char *)NLMSG_TAIL(&req.nl_addr) - (char *)linkinfo; + + if (send(hndl->netlink_fd, &req, req.nl_addr.nlmsg_len, 0) < 0) { + *error_code = RMNETCTL_API_ERR_MESSAGE_SEND; + return RMNETCTL_LIB_ERR; + } + + return rmnet_get_ack(hndl, error_code); +} + +int rtrmnet_ctl_create_vnd(char *devname, char *vndname, uint8_t muxid, + uint32_t qmap_version, uint32_t ul_agg_cnt, uint32_t ul_agg_size) +{ + struct rmnetctl_hndl_s *handle; + uint16_t error_code; + int return_code; + uint32_t flagconfig = RMNET_FLAGS_INGRESS_DEAGGREGATION; + + printf("%s devname: %s, vndname: %s, muxid: %d, qmap_version: %d\n", + __func__, devname, vndname, muxid, qmap_version); + + ul_agg_cnt = 0; //TODO + + if (ul_agg_cnt > 1) + flagconfig |= RMNET_EGRESS_FORMAT_AGGREGATION; + + if (qmap_version == 9) { //QMAPV5 +#ifdef RMNET_FLAGS_INGRESS_MAP_CKSUMV5 + flagconfig |= RMNET_FLAGS_INGRESS_MAP_CKSUMV5; + flagconfig |= RMNET_FLAGS_EGRESS_MAP_CKSUMV5; +#else + return -1001; +#endif + } + else if (qmap_version == 8) { //QMAPV4 + flagconfig |= RMNET_FLAGS_INGRESS_MAP_CKSUMV4; + flagconfig |= RMNET_FLAGS_EGRESS_MAP_CKSUMV4; + } + else if (qmap_version == 5) { //QMAPV1 + } + else { + flagconfig = 0; + } + + return_code = rtrmnet_ctl_init(&handle, &error_code); + if (return_code) { + printf("rtrmnet_ctl_init error_code: %d, return_code: %d, errno: %d (%s)\n", + error_code, return_code, errno, strerror(errno)); + } + if (return_code == RMNETCTL_SUCCESS) { + return_code = rtrmnet_ctl_newvnd(handle, devname, vndname, &error_code, + muxid, flagconfig, ul_agg_cnt, ul_agg_size); + if (return_code) { + printf("rtrmnet_ctl_newvnd error_code: %d, return_code: %d, errno: %d (%s)\n", + error_code, return_code, errno, strerror(errno)); + } + rtrmnet_ctl_deinit(handle); + } + + return return_code; +} diff --git a/package/wwan/driver/quectel_cm_5G/src/udhcpc.c b/package/wwan/driver/quectel_cm_5G/src/udhcpc.c new file mode 100644 index 000000000..c439229e9 --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/udhcpc.c @@ -0,0 +1,733 @@ +/****************************************************************************** + @file udhcpc.c + @brief call DHCP tools to obtain IP address. + + DESCRIPTION + Connectivity Management Tool for USB network adapter of Quectel wireless cellular modules. + + INITIALIZATION AND SEQUENCING REQUIREMENTS + None. + + --------------------------------------------------------------------------- + Copyright (c) 2016 - 2020 Quectel Wireless Solution, Co., Ltd. All Rights Reserved. + Quectel Wireless Solution Proprietary and Confidential. + --------------------------------------------------------------------------- +******************************************************************************/ +#include +#include +#include +#include +#include +#include +#include +#include + +#include "util.h" +#include "QMIThread.h" +extern int ql_get_netcard_carrier_state(const char *devname); + +static __inline in_addr_t qmi2addr(uint32_t __x) { + return (__x>>24) | (__x>>8&0xff00) | (__x<<8&0xff0000) | (__x<<24); +} + +static int ql_system(const char *shell_cmd) { + dbg_time("%s", shell_cmd); + return system(shell_cmd); +} + +static void ifc_init_ifr(const char *name, struct ifreq *ifr) +{ + memset(ifr, 0, sizeof(struct ifreq)); + no_trunc_strncpy(ifr->ifr_name, name, IFNAMSIZ); + ifr->ifr_name[IFNAMSIZ - 1] = 0; +} + +static void ql_set_mtu(const char *ifname, int ifru_mtu) { + int inet_sock; + struct ifreq ifr; + + inet_sock = socket(AF_INET, SOCK_DGRAM, 0); + + if (inet_sock > 0) { + ifc_init_ifr(ifname, &ifr); + + if (!ioctl(inet_sock, SIOCGIFMTU, &ifr)) { + if (ifr.ifr_ifru.ifru_mtu != ifru_mtu) { + dbg_time("change mtu %d -> %d", ifr.ifr_ifru.ifru_mtu , ifru_mtu); + ifr.ifr_ifru.ifru_mtu = ifru_mtu; + ioctl(inet_sock, SIOCSIFMTU, &ifr); + } + } + + close(inet_sock); + } +} + +static int ifc_get_addr(const char *name, in_addr_t *addr) +{ + int inet_sock; + struct ifreq ifr; + int ret = 0; + + inet_sock = socket(AF_INET, SOCK_DGRAM, 0); + + ifc_init_ifr(name, &ifr); + if (addr != NULL) { + ret = ioctl(inet_sock, SIOCGIFADDR, &ifr); + if (ret < 0) { + *addr = 0; + } else { + *addr = ((struct sockaddr_in*) &ifr.ifr_addr)->sin_addr.s_addr; + } + } + close(inet_sock); + return ret; +} + +static short ifc_get_flags(const char *ifname) +{ + int inet_sock; + struct ifreq ifr; + int ret = 0; + + inet_sock = socket(AF_INET, SOCK_DGRAM, 0); + + if (inet_sock > 0) { + ifc_init_ifr(ifname, &ifr); + + if (!ioctl(inet_sock, SIOCGIFFLAGS, &ifr)) { + ret = ifr.ifr_ifru.ifru_flags; + } + + close(inet_sock); + } + + return ret; +} + +static int ql_netcard_ipv4_address_check(const char *ifname, in_addr_t ip) { + in_addr_t addr = 0; + + ifc_get_addr(ifname, &addr); + return addr == ip; +} + +static int ql_raw_ip_mode_check(const char *ifname, uint32_t ip) { + int fd; + char raw_ip[128]; + char shell_cmd[128]; + char mode[2] = "X"; + int mode_change = 0; + + if (ql_netcard_ipv4_address_check(ifname, qmi2addr(ip))) + return 0; + + snprintf(raw_ip, sizeof(raw_ip), "/sys/class/net/%s/qmi/raw_ip", ifname); + if (access(raw_ip, F_OK)) + return 0; + + fd = open(raw_ip, O_RDWR | O_NONBLOCK | O_NOCTTY); + if (fd < 0) { + dbg_time("%s %d fail to open(%s), errno:%d (%s)", __FILE__, __LINE__, raw_ip, errno, strerror(errno)); + return 0; + } + + if (read(fd, mode, 2) == -1) {}; + if (mode[0] == '0' || mode[0] == 'N') { + dbg_time("File:%s Line:%d udhcpc fail to get ip address, try next:", __func__, __LINE__); + snprintf(shell_cmd, sizeof(shell_cmd), "ifconfig %s down", ifname); + ql_system(shell_cmd); + dbg_time("echo Y > /sys/class/net/%s/qmi/raw_ip", ifname); + mode[0] = 'Y'; + if (write(fd, mode, 2) == -1) {}; + mode_change = 1; + snprintf(shell_cmd, sizeof(shell_cmd), "ifconfig %s up", ifname); + ql_system(shell_cmd); + } + + close(fd); + return mode_change; +} + +static void* udhcpc_thread_function(void* arg) { + FILE * udhcpc_fp; + char *udhcpc_cmd = (char *)arg; + + if (udhcpc_cmd == NULL) + return NULL; + + dbg_time("%s", udhcpc_cmd); + udhcpc_fp = popen(udhcpc_cmd, "r"); + free(udhcpc_cmd); + if (udhcpc_fp) { + char buf[0xff]; + + buf[sizeof(buf)-1] = '\0'; + while((fgets(buf, sizeof(buf)-1, udhcpc_fp)) != NULL) { + if ((strlen(buf) > 1) && (buf[strlen(buf) - 1] == '\n')) + buf[strlen(buf) - 1] = '\0'; + dbg_time("%s", buf); + } + + pclose(udhcpc_fp); + } + + return NULL; +} + +//#define USE_DHCLIENT +#ifdef USE_DHCLIENT +static int dhclient_alive = 0; +#endif +static int dibbler_client_alive = 0; + +void ql_set_driver_link_state(PROFILE_T *profile, int link_state) { + char link_file[128]; + int fd; + int new_state = 0; + + snprintf(link_file, sizeof(link_file), "/sys/class/net/%s/link_state", profile->usbnet_adapter); + fd = open(link_file, O_RDWR | O_NONBLOCK | O_NOCTTY); + if (fd == -1) { + if (errno != ENOENT) + dbg_time("Fail to access %s, errno: %d (%s)", link_file, errno, strerror(errno)); + return; + } + + if (profile->qmap_mode <= 1) + new_state = !!link_state; + else { + //0x80 means link off this pdp + new_state = (link_state ? 0x00 : 0x80) + (profile->muxid & 0x7F); + } + + snprintf(link_file, sizeof(link_file), "%d\n", new_state); + if (write(fd, link_file, sizeof(link_file)) == -1) {}; + + if (link_state == 0 && profile->qmapnet_adapter[0] + && strcmp(profile->qmapnet_adapter, profile->usbnet_adapter)) { + size_t rc; + + lseek(fd, 0, SEEK_SET); + rc = read(fd, link_file, sizeof(link_file)); + if (rc > 1 && (!strncasecmp(link_file, "0\n", 2) || !strncasecmp(link_file, "0x0\n", 4))) { + snprintf(link_file, sizeof(link_file), "ifconfig %s down", profile->usbnet_adapter); + ql_system(link_file); + } + } + + close(fd); +} + +static const char *ipv4Str(const uint32_t Address) { + static char str[] = {"255.225.255.255"}; + uint8_t *ip = (uint8_t *)&Address; + + snprintf(str, sizeof(str), "%d.%d.%d.%d", ip[3], ip[2], ip[1], ip[0]); + return str; +} + +static const char *ipv6Str(const UCHAR Address[16]) { + static char str[64]; + uint16_t ip[8]; + int i; + for (i = 0; i < 8; i++) { + ip[i] = (Address[i*2]<<8) + Address[i*2+1]; + } + + snprintf(str, sizeof(str), "%x:%x:%x:%x:%x:%x:%x:%x", + ip[0], ip[1], ip[2], ip[3], ip[4], ip[5], ip[6], ip[7]); + + return str; +} + +void update_ipv4_address(const char *ifname, const char *ip, const char *gw, unsigned prefix) +{ + char shell_cmd[128]; + + if (!ifname) + return; + + if (!access("/sbin/ip", X_OK)) { + snprintf(shell_cmd, sizeof(shell_cmd), "ip -%d address flush dev %s", 4, ifname); + ql_system(shell_cmd); + + snprintf(shell_cmd, sizeof(shell_cmd), "ip -%d address add %s/%u dev %s", 4, ip, prefix, ifname); + ql_system(shell_cmd); + + //ping6 www.qq.com + snprintf(shell_cmd, sizeof(shell_cmd), "ip -%d route add default via %s dev %s", 4, gw, ifname); + ql_system(shell_cmd); + } else { + unsigned n = (0xFFFFFFFF >> (32 - prefix)) << (32 - prefix); + // n = (n>>24) | (n>>8&0xff00) | (n<<8&0xff0000) | (n<<24); + + snprintf(shell_cmd, sizeof(shell_cmd), "ifconfig %s %s netmask %s", ifname, ip, ipv4Str(n)); + ql_system(shell_cmd); + + //Resetting default routes + snprintf(shell_cmd, sizeof(shell_cmd), "route del default dev %s", ifname); + while(!system(shell_cmd)); + + snprintf(shell_cmd, sizeof(shell_cmd), "route add default gw %s dev %s", gw, ifname); + ql_system(shell_cmd); + } +} + +void update_ipv6_address(const char *ifname, const char *ip, const char *gw, unsigned prefix) { + char shell_cmd[128]; + + (void)gw; + if (!access("/sbin/ip", X_OK)) { + snprintf(shell_cmd, sizeof(shell_cmd), "ip -%d address flush dev %s", 6, ifname); + ql_system(shell_cmd); + + snprintf(shell_cmd, sizeof(shell_cmd), "ip -%d address add %s/%u dev %s", 6, ip, prefix, ifname); + ql_system(shell_cmd); + + //ping6 www.qq.com + snprintf(shell_cmd, sizeof(shell_cmd), "ip -%d route add default dev %s", 6, ifname); + ql_system(shell_cmd); + } else { + snprintf(shell_cmd, sizeof(shell_cmd), "ifconfig %s %s/%d", ifname, ip, prefix); + ql_system(shell_cmd); + + snprintf(shell_cmd, sizeof(shell_cmd), "route -A inet6 add default dev %s", ifname); + ql_system(shell_cmd); + } +} + +static void update_ip_address_by_qmi(const char *ifname, const IPV4_T *ipv4, const IPV6_T *ipv6) { + char *d1, *d2; + + if (ipv4 && ipv4->Address) { + d1 = strdup(ipv4Str(ipv4->Address)); + d2 = strdup(ipv4Str(ipv4->Gateway)); + unsigned prefix = 0; + unsigned n = 0; + + for (n = 0; n < 32; n++) { + if (ipv4->SubnetMask&((unsigned)1<DnsPrimary) { + d1 = strdup(ipv4Str(ipv4->DnsPrimary)); + d2 = strdup(ipv4Str(ipv4->DnsSecondary ? ipv4->DnsSecondary : ipv4->DnsPrimary)); + update_resolv_conf(4, ifname, d1, d2); + free(d1); free(d2); + } + } + + if (ipv6 && ipv6->Address[0] && ipv6->PrefixLengthIPAddr) { + d1 = strdup(ipv6Str(ipv6->Address)); + d2 = strdup(ipv6Str(ipv6->Gateway)); + + update_ipv6_address(ifname, d1, d2, ipv6->PrefixLengthIPAddr); + free(d1); free(d2); + + //Adding DNS + if (ipv6->DnsPrimary[0]) { + d1 = strdup(ipv6Str(ipv6->DnsPrimary)); + d2 = strdup(ipv6Str(ipv6->DnsSecondary[0] ? ipv6->DnsSecondary : ipv6->DnsPrimary)); + update_resolv_conf(6, ifname, d1, d2); + free(d1); free(d2); + } + } +} + +//#define QL_OPENWER_NETWORK_SETUP +#ifdef QL_OPENWER_NETWORK_SETUP +static const char *openwrt_lan = "br-lan"; +static const char *openwrt_wan = "wwan0"; + +static int ql_openwrt_system(const char *cmd) { + int i; + int ret = 1; + char shell_cmd[128]; + + snprintf(shell_cmd, sizeof(shell_cmd), "%s 2>1 > /dev/null", cmd); + + for (i = 0; i < 15; i++) { + dbg_time("%s", cmd); + ret = system(shell_cmd); + if (!ret) + break; + sleep(1); + } + + return ret; +} + +static int ql_openwrt_is_wan(const char *ifname) { + if (openwrt_lan == NULL) { + system("uci show network.wan.ifname"); + } + + if (strcmp(ifname, openwrt_wan)) + return 0; + + return 1; +} + +static void ql_openwrt_setup_wan(const char *ifname, const IPV4_T *ipv4) { + FILE *fp = NULL; + char config[64]; + + snprintf(config, sizeof(config), "/tmp/rmnet_%s_ipv4config", ifname); + + if (ipv4 == NULL) { + if (ql_openwrt_is_wan(ifname)) + ql_openwrt_system("ifdown wan"); + return; + } + + fp = fopen(config, "w"); + if (fp == NULL) + return; + + fprintf(fp, "IFNAME=\"%s\"\n", ifname); + fprintf(fp, "PUBLIC_IP=\"%s\"\n", ipv4Str(ipv4->Address)); + fprintf(fp, "NETMASK=\"%s\"\n", ipv4Str(ipv4->SubnetMask)); + fprintf(fp, "GATEWAY=\"%s\"\n", ipv4Str(ipv4->Gateway)); + fprintf(fp, "DNSSERVERS=\"%s", ipv4Str(ipv4->DnsPrimary)); + if (ipv4->DnsSecondary != 0) + fprintf(fp, " %s", ipv4Str(ipv4->DnsSecondary)); + fprintf(fp, "\"\n"); + + fclose(fp); + + if (!ql_openwrt_is_wan(ifname)) + return; + + ql_openwrt_system("ifup wan"); +} + +static void ql_openwrt_setup_wan6(const char *ifname, const IPV6_T *ipv6) { + FILE *fp = NULL; + char config[64]; + int first_ifup; + + snprintf(config, sizeof(config), "/tmp/rmnet_%s_ipv6config", ifname); + + if (ipv6 == NULL) { + if (ql_openwrt_is_wan(ifname)) + ql_openwrt_system("ifdown wan6"); + return; + } + + first_ifup = (access(config, F_OK) != 0); + + fp = fopen(config, "w"); + if (fp == NULL) + return; + + fprintf(fp, "IFNAME=\"%s\"\n", ifname); + fprintf(fp, "PUBLIC_IP=\"%s\"\n", ipv6Str(ipv6->Address)); + fprintf(fp, "NETMASK=\"%s\"\n", ipv6Str(ipv6->SubnetMask)); + fprintf(fp, "GATEWAY=\"%s\"\n", ipv6Str(ipv6->Gateway)); + fprintf(fp, "PrefixLength=\"%d\"\n", ipv6->PrefixLengthIPAddr); + fprintf(fp, "DNSSERVERS=\"%s", ipv6Str(ipv6->DnsPrimary)); + if (ipv6->DnsSecondary[0]) + fprintf(fp, " %s", ipv6Str(ipv6->DnsSecondary)); + fprintf(fp, "\"\n"); + + fclose(fp); + + if (!ql_openwrt_is_wan(ifname)) + return; + + if (first_ifup) + ql_openwrt_system("ifup wan6"); + else + ql_openwrt_system("/etc/init.d/network restart"); //make PC to release old IPV6 address, and RS new IPV6 address + +#if 1 //TODO? why need this? + if (openwrt_lan) { + int i; + char shell_cmd[128]; + UCHAR Address[16] = {0}; + + ql_openwrt_system(("ifstatus lan")); + + for (i = 0; i < (ipv6->PrefixLengthIPAddr/8); i++) + Address[i] = ipv6->Address[i]; + + snprintf(shell_cmd, sizeof(shell_cmd), "ip route del %s/%u dev %s", ipv6Str(Address), ipv6->PrefixLengthIPAddr, ifname); + ql_openwrt_system(shell_cmd); + + snprintf(shell_cmd, sizeof(shell_cmd), "ip route add %s/%u dev %s", ipv6Str(Address), ipv6->PrefixLengthIPAddr, openwrt_lan); + ql_system(shell_cmd); + } +#endif +} +#endif + +void udhcpc_start(PROFILE_T *profile) { + char *ifname = profile->usbnet_adapter; + char shell_cmd[128]; + + ql_set_driver_link_state(profile, 1); + + if (profile->qmapnet_adapter[0]) { + ifname = profile->qmapnet_adapter; + } + + if (profile->rawIP && profile->ipv4.Address && profile->ipv4.Mtu) { + ql_set_mtu(ifname, (profile->ipv4.Mtu)); + } + + if (strcmp(ifname, profile->usbnet_adapter)) { + snprintf(shell_cmd, sizeof(shell_cmd), "ifconfig %s up", profile->usbnet_adapter); + ql_system(shell_cmd); + if (ifc_get_flags(ifname)&IFF_UP) { + snprintf(shell_cmd, sizeof(shell_cmd), "ifconfig %s down", ifname); + ql_system(shell_cmd); + } + } + + snprintf(shell_cmd, sizeof(shell_cmd), "ifconfig %s up", ifname); + ql_system(shell_cmd); + + if (profile->ipv4.Address) { + if (profile->PCSCFIpv4Addr1) + dbg_time("pcscf1: %s", ipv4Str(profile->PCSCFIpv4Addr1)); + if (profile->PCSCFIpv4Addr2) + dbg_time("pcscf2: %s", ipv4Str(profile->PCSCFIpv4Addr2)); + } + + if (profile->ipv6.Address[0] && profile->ipv6.PrefixLengthIPAddr) { + if (profile->PCSCFIpv6Addr1[0]) + dbg_time("pcscf1: %s", ipv6Str(profile->PCSCFIpv6Addr1)); + if (profile->PCSCFIpv6Addr2[0]) + dbg_time("pcscf2: %s", ipv6Str(profile->PCSCFIpv6Addr2)); + } + +#if 1 //for bridge mode, only one public IP, so do udhcpc manually + if (ql_bridge_mode_detect(profile)) { + return; + } +#endif + +//because must use udhcpc to obtain IP when working on ETH mode, +//so it is better also use udhcpc to obtain IP when working on IP mode. +//use the same policy for all modules +#if 0 + if (profile->rawIP != 0) //mdm9x07/ec25,ec20 R2.0 + { + update_ip_address_by_qmi(ifname, &profile->ipv4, &profile->ipv6); + return; + } +#endif + + if (profile->ipv4.Address == 0) + goto set_ipv6; + + if (profile->request_ops == &mbim_request_ops) { //lots of mbim modem do not support DHCP + update_ip_address_by_qmi(ifname, &profile->ipv4, NULL); + } + else +/* Do DHCP using busybox tools */ + { + char udhcpc_cmd[128]; + pthread_attr_t udhcpc_thread_attr; + pthread_t udhcpc_thread_id; + + pthread_attr_init(&udhcpc_thread_attr); + pthread_attr_setdetachstate(&udhcpc_thread_attr, PTHREAD_CREATE_DETACHED); + +#ifdef USE_DHCLIENT + snprintf(udhcpc_cmd, sizeof(udhcpc_cmd), "dhclient -4 -d --no-pid %s", ifname); + dhclient_alive++; +#else + if (access("/usr/share/udhcpc/default.script", X_OK) + && access("/etc//udhcpc/default.script", X_OK)) { + dbg_time("No default.script found, it should be in '/usr/share/udhcpc/' or '/etc//udhcpc' depend on your udhcpc version!"); + } + + //-f,--foreground Run in foreground + //-b,--background Background if lease is not obtained + //-n,--now Exit if lease is not obtained + //-q,--quit Exit after obtaining lease + //-t,--retries N Send up to N discover packets (default 3) + snprintf(udhcpc_cmd, sizeof(udhcpc_cmd), "busybox udhcpc -f -n -q -t 5 -i %s", ifname); +#endif + +#if 1 //for OpenWrt + if (!access("/lib/netifd/dhcp.script", X_OK) && !access("/sbin/ifup", X_OK) && !access("/sbin/ifstatus", X_OK)) { +#if 0 //20210415 do not promot these message + dbg_time("you are use OpenWrt?"); + dbg_time("should not calling udhcpc manually?"); + dbg_time("should modify /etc/config/network as below?"); + dbg_time("config interface wan"); + dbg_time("\toption ifname %s", ifname); + dbg_time("\toption proto dhcp"); + dbg_time("should use \"/sbin/ifstaus wan\" to check %s 's status?", ifname); +#endif + } +#endif + +#ifdef USE_DHCLIENT + pthread_create(&udhcpc_thread_id, &udhcpc_thread_attr, udhcpc_thread_function, (void*)strdup(udhcpc_cmd)); + sleep(1); +#else + pthread_create(&udhcpc_thread_id, NULL, udhcpc_thread_function, (void*)strdup(udhcpc_cmd)); + pthread_join(udhcpc_thread_id, NULL); + + if (profile->request_ops == &atc_request_ops + && !ql_netcard_ipv4_address_check(ifname, qmi2addr(profile->ipv4.Address))) { + ql_get_netcard_carrier_state(ifname); + } + + if (profile->request_ops != &qmi_request_ops) { //only QMI modem support next fixup! + goto set_ipv6; + } + + if (ql_raw_ip_mode_check(ifname, profile->ipv4.Address)) { + pthread_create(&udhcpc_thread_id, NULL, udhcpc_thread_function, (void*)strdup(udhcpc_cmd)); + pthread_join(udhcpc_thread_id, NULL); + } + + if (!ql_netcard_ipv4_address_check(ifname, qmi2addr(profile->ipv4.Address))) { + //no udhcpc's default.script exist, directly set ip and dns + update_ip_address_by_qmi(ifname, &profile->ipv4, NULL); + } + //Add by Demon. check default route + FILE *rt_fp = NULL; + char rt_cmd[128] = {0}; + + //Check if there is a default route. + snprintf(rt_cmd, sizeof(rt_cmd), "route -n | grep %s | awk '{print $1}' | grep 0.0.0.0", ifname); + rt_fp = popen((const char *)rt_cmd, "r"); + if (rt_fp != NULL) { + char buf[20] = {0}; + int found_default_rt = 0; + + if (fgets(buf, sizeof(buf), rt_fp) != NULL) { + //Find the specified interface + found_default_rt = 1; + } + + if (1 == found_default_rt) { + //dbg_time("Route items found for %s", ifname); + } + else { + dbg_time("Warning: No route items found for %s", ifname); + } + + pclose(rt_fp); + } + //End by Demon. +#endif + } + +#ifdef QL_OPENWER_NETWORK_SETUP + ql_openwrt_setup_wan(ifname, &profile->ipv4); +#endif + +set_ipv6: + if (profile->ipv6.Address[0] && profile->ipv6.PrefixLengthIPAddr) { +#if 1 + //module do not support DHCPv6, only support 'Router Solicit' + //and it seem if enable /proc/sys/net/ipv6/conf/all/forwarding, Kernel do not send RS + const char *forward_file = "/proc/sys/net/ipv6/conf/all/forwarding"; + int forward_fd = open(forward_file, O_RDONLY); + if (forward_fd > 0) { + char forward_state[2]; + if (read(forward_fd, forward_state, 2) == -1) {}; + if (forward_state[0] == '1') { + //dbg_time("%s enabled, kernel maybe donot send 'Router Solicit'", forward_file); + } + close(forward_fd); + } + + update_ip_address_by_qmi(ifname, NULL, &profile->ipv6); + + if (profile->ipv6.DnsPrimary[0] || profile->ipv6.DnsSecondary[0]) { + char dns1str[64], dns2str[64]; + + if (profile->ipv6.DnsPrimary[0]) { + strcpy(dns1str, ipv6Str(profile->ipv6.DnsPrimary)); + } + + if (profile->ipv6.DnsSecondary[0]) { + strcpy(dns2str, ipv6Str(profile->ipv6.DnsSecondary)); + } + + update_resolv_conf(6, ifname, profile->ipv6.DnsPrimary[0] ? dns1str : NULL, + profile->ipv6.DnsSecondary[0] != '\0' ? dns2str : NULL); + } + +#ifdef QL_OPENWER_NETWORK_SETUP + ql_openwrt_setup_wan6(ifname, &profile->ipv6); +#endif +#else +#ifdef USE_DHCLIENT + snprintf(udhcpc_cmd, sizeof(udhcpc_cmd), "dhclient -6 -d --no-pid %s", ifname); + dhclient_alive++; +#else + /* + DHCPv6: Dibbler - a portable DHCPv6 + 1. download from http://klub.com.pl/dhcpv6/ + 2. cross-compile + 2.1 ./configure --host=arm-linux-gnueabihf + 2.2 copy dibbler-client to your board + 3. mkdir -p /var/log/dibbler/ /var/lib/ on your board + 4. create /etc/dibbler/client.conf on your board, the content is + log-mode short + log-level 7 + iface wwan0 { + ia + option dns-server + } + 5. run "dibbler-client start" to get ipV6 address + 6. run "route -A inet6 add default dev wwan0" to add default route + */ + snprintf(shell_cmd, sizeof(shell_cmd), "route -A inet6 add default %s", ifname); + ql_system(shell_cmd); + snprintf(udhcpc_cmd, sizeof(udhcpc_cmd), "dibbler-client run"); + dibbler_client_alive++; +#endif + + pthread_create(&udhcpc_thread_id, &udhcpc_thread_attr, udhcpc_thread_function, (void*)strdup(udhcpc_cmd)); +#endif + } +} + +void udhcpc_stop(PROFILE_T *profile) { + char *ifname = profile->usbnet_adapter; + char shell_cmd[128]; + + ql_set_driver_link_state(profile, 0); + + if (profile->qmapnet_adapter[0]) { + ifname = profile->qmapnet_adapter; + } + +#ifdef USE_DHCLIENT + if (dhclient_alive) { + system("killall dhclient"); + dhclient_alive = 0; + } +#endif + if (dibbler_client_alive) { + if (system("killall dibbler-client")) {}; + dibbler_client_alive = 0; + } + +//it seems when call netif_carrier_on(), and netcard 's IP is "0.0.0.0", will cause netif_queue_stopped() + snprintf(shell_cmd, sizeof(shell_cmd), "ifconfig %s 0.0.0.0", ifname); + ql_system(shell_cmd); + snprintf(shell_cmd, sizeof(shell_cmd), "ifconfig %s down", ifname); + ql_system(shell_cmd); + +#ifdef QL_OPENWER_NETWORK_SETUP + ql_openwrt_setup_wan(ifname, NULL); + ql_openwrt_setup_wan6(ifname, NULL); +#endif +} diff --git a/package/wwan/driver/quectel_cm_5G/src/udhcpc_netlink.c b/package/wwan/driver/quectel_cm_5G/src/udhcpc_netlink.c new file mode 100644 index 000000000..5e0522368 --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/udhcpc_netlink.c @@ -0,0 +1,179 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +#include "libmnl/ifutils.h" +#include "libmnl/dhcp/dhcp.h" +#include "util.h" +#include "QMIThread.h" + +static int ql_raw_ip_mode_check(const char *ifname) +{ + int fd; + char raw_ip[128]; + char mode[2] = "X"; + int mode_change = 0; + + snprintf(raw_ip, sizeof(raw_ip), "/sys/class/net/%s/qmi/raw_ip", ifname); + if (access(raw_ip, F_OK)) + return 0; + + fd = open(raw_ip, O_RDWR | O_NONBLOCK | O_NOCTTY); + if (fd < 0) + { + dbg_time("%s %d fail to open(%s), errno:%d (%s)", __FILE__, __LINE__, raw_ip, errno, strerror(errno)); + return 0; + } + + read(fd, mode, 2); + if (mode[0] == '0' || mode[0] == 'N') + { + if_link_down(ifname); + dbg_time("echo Y > /sys/class/net/%s/qmi/raw_ip", ifname); + mode[0] = 'Y'; + write(fd, mode, 2); + mode_change = 1; + if_link_up(ifname); + } + + close(fd); + return mode_change; +} + +void ql_set_driver_link_state(PROFILE_T *profile, int link_state) +{ + char link_file[128]; + int fd; + int new_state = 0; + + snprintf(link_file, sizeof(link_file), "/sys/class/net/%s/link_state", profile->usbnet_adapter); + fd = open(link_file, O_RDWR | O_NONBLOCK | O_NOCTTY); + if (fd == -1) + { + if (errno != ENOENT) + dbg_time("Fail to access %s, errno: %d (%s)", link_file, errno, strerror(errno)); + return; + } + + if (profile->qmap_mode <= 1) + new_state = !!link_state; + else + { + //0x80 means link off this pdp + new_state = (link_state ? 0x00 : 0x80) + profile->pdp; + } + + snprintf(link_file, sizeof(link_file), "%d\n", new_state); + write(fd, link_file, sizeof(link_file)); + + if (link_state == 0 && profile->qmap_mode > 1) + { + size_t rc; + + lseek(fd, 0, SEEK_SET); + rc = read(fd, link_file, sizeof(link_file)); + if (rc > 1 && (!strcasecmp(link_file, "0\n") || !strcasecmp(link_file, "0x0\n"))) + { + if_link_down(profile->usbnet_adapter); + } + } + + close(fd); +} + +void udhcpc_start(PROFILE_T *profile) +{ + char *ifname = profile->usbnet_adapter; + + ql_set_driver_link_state(profile, 1); + ql_raw_ip_mode_check(ifname); + + if (profile->qmapnet_adapter) + { + ifname = profile->qmapnet_adapter; + } + if (profile->rawIP && profile->ipv4.Address && profile->ipv4.Mtu) + { + if_set_mtu(ifname, (profile->ipv4.Mtu)); + } + + if (strcmp(ifname, profile->usbnet_adapter)) + { + if_link_up(profile->usbnet_adapter); + } + + if_link_up(ifname); + +#if 1 //for bridge mode, only one public IP, so do udhcpc manually + if (ql_bridge_mode_detect(profile)) + { + return; + } +#endif + // if use DHCP(should make with ${DHCP} src files) + // do_dhcp(ifname); + // return 0; + /* IPv4 Addr Info */ + if (profile->ipv4.Address) + { + dbg_time("IPv4 MTU: %d", profile->ipv4.Mtu); + dbg_time("IPv4 Address: %s", ipaddr_to_string_v4(ntohl(profile->ipv4.Address))); + dbg_time("IPv4 Netmask: %d", mask_to_prefix_v4(ntohl(profile->ipv4.SubnetMask))); + dbg_time("IPv4 Gateway: %s", ipaddr_to_string_v4(ntohl(profile->ipv4.Gateway))); + dbg_time("IPv4 DNS1: %s", ipaddr_to_string_v4(ntohl(profile->ipv4.DnsPrimary))); + dbg_time("IPv4 DNS2: %s", ipaddr_to_string_v4(ntohl(profile->ipv4.DnsSecondary))); + if_set_network_v4(ifname, ntohl(profile->ipv4.Address), + mask_to_prefix_v4(profile->ipv4.SubnetMask), + ntohl(profile->ipv4.Gateway), + ntohl(profile->ipv4.DnsPrimary), + ntohl(profile->ipv4.DnsSecondary)); + } + + if (profile->ipv6.Address[0] && profile->ipv6.PrefixLengthIPAddr) + { + //module do not support DHCPv6, only support 'Router Solicit' + //and it seem if enable /proc/sys/net/ipv6/conf/all/forwarding, Kernel do not send RS + const char *forward_file = "/proc/sys/net/ipv6/conf/all/forwarding"; + int forward_fd = open(forward_file, O_RDONLY); + if (forward_fd > 0) + { + char forward_state[2]; + read(forward_fd, forward_state, 2); + if (forward_state[0] == '1') + { + dbg_time("%s enabled, kernel maybe donot send 'Router Solicit'", forward_file); + } + close(forward_fd); + } + + dbg_time("IPv6 MTU: %d", profile->ipv6.Mtu); + dbg_time("IPv6 Address: %s", ipaddr_to_string_v6(profile->ipv6.Address)); + dbg_time("IPv6 Netmask: %d", profile->ipv6.PrefixLengthIPAddr); + dbg_time("IPv6 Gateway: %s", ipaddr_to_string_v6(profile->ipv6.Gateway)); + dbg_time("IPv6 DNS1: %s", ipaddr_to_string_v6(profile->ipv6.DnsPrimary)); + dbg_time("IPv6 DNS2: %s", ipaddr_to_string_v6(profile->ipv6.DnsSecondary)); + if_set_network_v6(ifname, profile->ipv6.Address, profile->ipv6.PrefixLengthIPAddr, + profile->ipv6.Gateway, profile->ipv6.DnsPrimary, profile->ipv6.DnsSecondary); + } +} + +void udhcpc_stop(PROFILE_T *profile) +{ + char *ifname = profile->usbnet_adapter; + + ql_set_driver_link_state(profile, 0); + + if (profile->qmapnet_adapter) + { + ifname = profile->qmapnet_adapter; + } + + if_link_down(ifname); + if_flush_v4_addr(ifname); + if_flush_v6_addr(ifname); +} diff --git a/package/wwan/driver/quectel_cm_5G/src/udhcpc_script.c b/package/wwan/driver/quectel_cm_5G/src/udhcpc_script.c new file mode 100644 index 000000000..032f8cfa8 --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/udhcpc_script.c @@ -0,0 +1,132 @@ +/****************************************************************************** + @file udhcpc.c + @brief call DHCP tools to obtain IP address. + + DESCRIPTION + Connectivity Management Tool for USB network adapter of Quectel wireless cellular modules. + + INITIALIZATION AND SEQUENCING REQUIREMENTS + None. + + --------------------------------------------------------------------------- + Copyright (c) 2016 - 2020 Quectel Wireless Solution, Co., Ltd. All Rights Reserved. + Quectel Wireless Solution Proprietary and Confidential. + --------------------------------------------------------------------------- +******************************************************************************/ +#include +#include +#include +#include +#include +#include +#include +#include + +#include "util.h" +#include "QMIThread.h" + +#define IFDOWN_SCRIPT "/etc/quectel/ifdown.sh" +#define IFUP_SCRIPT "/etc/quectel/ifup.sh" + +static int ql_system(const char *shell_cmd) +{ + dbg_time("%s", shell_cmd); + return system(shell_cmd); +} + +uint32_t mask_to_prefix_v4(uint32_t mask) +{ + uint32_t prefix = 0; + while (mask) + { + mask = mask & (mask - 1); + prefix++; + } + return prefix; +} + +uint32_t mask_from_prefix_v4(uint32_t prefix) +{ + return ~((1 << (32 - prefix)) - 1); +} + +/* mask in int */ +uint32_t broadcast_from_mask(uint32_t ip, uint32_t mask) +{ + return (ip & mask) | (~mask); +} + +const char *ipaddr_to_string_v4(in_addr_t ipaddr, char *buf, size_t size) +{ + // static char buf[INET6_ADDRSTRLEN] = {'\0'}; + buf[0] = '\0'; + uint32_t addr = ipaddr; + return inet_ntop(AF_INET, &addr, buf, size); +} + +const char *ipaddr_to_string_v6(uint8_t *ipaddr, char *buf, size_t size) +{ + buf[0] = '\0'; + return inet_ntop(AF_INET6, ipaddr, buf, size); +} + +/** + * For more details see default.script + * + * The main aim of this function is offload ip management to script, CM has not interest in manage IP address. + * just tell script all the info about ip, mask, router, dns... + */ +void udhcpc_start(PROFILE_T *profile) +{ + char shell_cmd[1024]; + char ip[128]; + char subnet[128]; + char broadcast[128]; + char router[128]; + char domain1[128]; + char domain2[128]; + + if (NULL == getenv(IFUP_SCRIPT)) + return; + + // manage IPv4??? + // check rawip ??? + snprintf(shell_cmd, sizeof(shell_cmd), + " netiface=%s interface=%s mtu=%u ip=%s subnet=%s broadcast=%s router=%s" + " domain=\"%s %s\" %s", + profile->usbnet_adapter, + profile->qmapnet_adapter ? profile->qmapnet_adapter : profile->usbnet_adapter, + profile->ipv4.Mtu, + ipaddr_to_string_v4(ntohl(profile->ipv4.Address), ip, sizeof(ip)), + ipaddr_to_string_v4(ntohl(profile->ipv4.SubnetMask), subnet, sizeof(subnet)), + ipaddr_to_string_v4(ntohl(broadcast_from_mask(profile->ipv4.Address, profile->ipv4.SubnetMask)), + broadcast, sizeof(broadcast)), + ipaddr_to_string_v4(ntohl(profile->ipv4.Gateway), router, sizeof(router)), + ipaddr_to_string_v4(ntohl(profile->ipv4.DnsPrimary), domain1, sizeof(domain1)), + ipaddr_to_string_v4(ntohl(profile->ipv4.DnsSecondary), domain2, sizeof(domain2)), + getenv(IFUP_SCRIPT)); + ql_system(shell_cmd); + + // manage IPv6??? +} + +/** + * For more details see default.script + * + * The main aim of this function is offload ip management to script, CM has not interest in manage IP address. + * just tell script all the info about ip, mask, router, dns... + */ +void udhcpc_stop(PROFILE_T *profile) +{ + char shell_cmd[1024]; + + if (NULL == getenv(IFDOWN_SCRIPT)) + return; + + snprintf(shell_cmd, sizeof(shell_cmd), + "netiface=%s interface=%s %s", + profile->usbnet_adapter, + profile->qmapnet_adapter ? profile->qmapnet_adapter : profile->usbnet_adapter, + getenv(IFDOWN_SCRIPT)); + ql_system(shell_cmd); +} diff --git a/package/wwan/driver/quectel_cm_5G/src/util.c b/package/wwan/driver/quectel_cm_5G/src/util.c new file mode 100644 index 000000000..53b60f415 --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/util.c @@ -0,0 +1,361 @@ +/****************************************************************************** + @file util.c + @brief some utils for this QCM tool. + + DESCRIPTION + Connectivity Management Tool for USB network adapter of Quectel wireless cellular modules. + + INITIALIZATION AND SEQUENCING REQUIREMENTS + None. + + --------------------------------------------------------------------------- + Copyright (c) 2016 - 2020 Quectel Wireless Solution, Co., Ltd. All Rights Reserved. + Quectel Wireless Solution Proprietary and Confidential. + --------------------------------------------------------------------------- +******************************************************************************/ + +#include +#include +typedef unsigned short sa_family_t; +#include + +#if defined(__STDC__) +#include +#define __V(x) x +#else +#include +#define __V(x) (va_alist) va_dcl +#define const +#define volatile +#endif + +#include + +#include "QMIThread.h" + +pthread_mutex_t cm_command_mutex = PTHREAD_MUTEX_INITIALIZER; +pthread_cond_t cm_command_cond = PTHREAD_COND_INITIALIZER; +unsigned int cm_recv_buf[1024]; + +int cm_open_dev(const char *dev) { + int fd; + + fd = open(dev, O_RDWR | O_NONBLOCK | O_NOCTTY); + if (fd != -1) { + fcntl(fd, F_SETFL, fcntl(fd,F_GETFL) | O_NONBLOCK); + fcntl(fd, F_SETFD, FD_CLOEXEC); + + if (!strncmp(dev, "/dev/tty", strlen("/dev/tty"))) + { + //disable echo on serial ports + struct termios ios; + + memset(&ios, 0, sizeof(ios)); + tcgetattr( fd, &ios ); + cfmakeraw(&ios); + cfsetispeed(&ios, B115200); + cfsetospeed(&ios, B115200); + tcsetattr( fd, TCSANOW, &ios ); + tcflush(fd, TCIOFLUSH); + } + } else { + dbg_time("Failed to open %s, errno: %d (%s)", dev, errno, strerror(errno)); + } + + return fd; +} + +int cm_open_proxy(const char *name) { + int sockfd = -1; + int reuse_addr = 1; + struct sockaddr_un sockaddr; + socklen_t alen; + + /*Create server socket*/ + sockfd = socket(AF_LOCAL, SOCK_STREAM, 0); + if (sockfd < 0) + return sockfd; + + memset(&sockaddr, 0, sizeof(sockaddr)); + sockaddr.sun_family = AF_LOCAL; + sockaddr.sun_path[0] = 0; + memcpy(sockaddr.sun_path + 1, name, strlen(name) ); + + alen = strlen(name) + offsetof(struct sockaddr_un, sun_path) + 1; + if(connect(sockfd, (struct sockaddr *)&sockaddr, alen) < 0) { + close(sockfd); + dbg_time("connect %s errno: %d (%s)", name, errno, strerror(errno)); + return -1; + } + setsockopt(sockfd, SOL_SOCKET, SO_REUSEADDR, &reuse_addr,sizeof(reuse_addr)); + fcntl(sockfd, F_SETFL, fcntl(sockfd,F_GETFL) | O_NONBLOCK); + fcntl(sockfd, F_SETFD, FD_CLOEXEC); + + dbg_time("connect to %s sockfd = %d", name, sockfd); + + return sockfd; +} + +static void setTimespecRelative(struct timespec *p_ts, long long msec) +{ + struct timeval tv; + + gettimeofday(&tv, (struct timezone *) NULL); + + /* what's really funny about this is that I know + pthread_cond_timedwait just turns around and makes this + a relative time again */ + p_ts->tv_sec = tv.tv_sec + (msec / 1000); + p_ts->tv_nsec = (tv.tv_usec + (msec % 1000) * 1000L ) * 1000L; + if ((unsigned long)p_ts->tv_nsec >= 1000000000UL) { + p_ts->tv_sec += 1; + p_ts->tv_nsec -= 1000000000UL; + } +} + +int pthread_cond_timeout_np(pthread_cond_t *cond, pthread_mutex_t * mutex, unsigned msecs) { + if (msecs != 0) { + unsigned i; + unsigned t = msecs/4; + int ret = 0; + + if (t == 0) + t = 1; + + for (i = 0; i < msecs; i += t) { + struct timespec ts; + setTimespecRelative(&ts, t); +//very old uclibc do not support pthread_condattr_setclock(CLOCK_MONOTONIC) + ret = pthread_cond_timedwait(cond, mutex, &ts); //to advoid system time change + if (ret != ETIMEDOUT) { + if(ret) dbg_time("ret=%d, msecs=%u, t=%u", ret, msecs, t); + break; + } + } + + return ret; + } else { + return pthread_cond_wait(cond, mutex); + } +} + +const char * get_time(void) { + static char time_buf[128]; + struct timeval tv; + time_t time; + suseconds_t millitm; + struct tm *ti; + + gettimeofday (&tv, NULL); + + time= tv.tv_sec; + millitm = (tv.tv_usec + 500) / 1000; + + if (millitm == 1000) { + ++time; + millitm = 0; + } + + ti = localtime(&time); + sprintf(time_buf, "%02d-%02d_%02d:%02d:%02d:%03d", ti->tm_mon+1, ti->tm_mday, ti->tm_hour, ti->tm_min, ti->tm_sec, (int)millitm); + return time_buf; +} + +unsigned long clock_msec(void) +{ + struct timespec tm; + clock_gettime( CLOCK_MONOTONIC, &tm); + return (unsigned long)(tm.tv_sec*1000 + (tm.tv_nsec/1000000)); +} + +FILE *logfilefp = NULL; + +void update_resolv_conf(int iptype, const char *ifname, const char *dns1, const char *dns2) { + const char *dns_file = "/etc/resolv.conf"; + FILE *dns_fp; + char dns_line[256]; + #define MAX_DNS 16 + char *dns_info[MAX_DNS]; + char dns_tag[64]; + int dns_match = 0; + int i; + + snprintf(dns_tag, sizeof(dns_tag), "# IPV%d %s", iptype, ifname); + + for (i = 0; i < MAX_DNS; i++) + dns_info[i] = NULL; + + dns_fp = fopen(dns_file, "r"); + if (dns_fp) { + i = 0; + dns_line[sizeof(dns_line)-1] = '\0'; + + while((fgets(dns_line, sizeof(dns_line)-1, dns_fp)) != NULL) { + if ((strlen(dns_line) > 1) && (dns_line[strlen(dns_line) - 1] == '\n')) + dns_line[strlen(dns_line) - 1] = '\0'; + //dbg_time("%s", dns_line); + if (strstr(dns_line, dns_tag)) { + dns_match++; + continue; + } + dns_info[i++] = strdup(dns_line); + if (i == MAX_DNS) + break; + } + + fclose(dns_fp); + } + else if (errno != ENOENT) { + dbg_time("fopen %s fail, errno:%d (%s)", dns_file, errno, strerror(errno)); + return; + } + + if (dns1 == NULL && dns_match == 0) + return; + + dns_fp = fopen(dns_file, "w"); + if (dns_fp) { + if (dns1) + fprintf(dns_fp, "nameserver %s %s\n", dns1, dns_tag); + if (dns2) + fprintf(dns_fp, "nameserver %s %s\n", dns2, dns_tag); + + for (i = 0; i < MAX_DNS && dns_info[i]; i++) + fprintf(dns_fp, "%s\n", dns_info[i]); + fclose(dns_fp); + } + else { + dbg_time("fopen %s fail, errno:%d (%s)", dns_file, errno, strerror(errno)); + } + + for (i = 0; i < MAX_DNS && dns_info[i]; i++) + free(dns_info[i]); +} + +pid_t getpid_by_pdp(int pdp, const char* program_name) +{ + glob_t gt; + int ret; + char filter[16]; + pid_t pid; + + snprintf(filter, sizeof(filter), "-n %d", pdp); + ret = glob("/proc/*/cmdline", GLOB_NOSORT, NULL, >); + if (ret != 0) { + dbg_time("glob error, errno = %d(%s)", errno, strerror(errno)); + return -1; + } else { + int i = 0, fd = -1; + ssize_t nreads; + char cmdline[512] = {0}; + + for (i = 0; i < (int)gt.gl_pathc; i++) { + fd = open(gt.gl_pathv[i], O_RDONLY); + if (fd == -1) { + dbg_time("open %s failed, errno = %d(%s)", gt.gl_pathv[i], errno, strerror(errno)); + globfree(>); + return -1; + } + + nreads = read(fd, cmdline, sizeof(cmdline)); + if (nreads > 0) { + int pos = 0; + while (pos < nreads-1) { + if (cmdline[pos] == '\0') + cmdline[pos] = ' '; // space + pos++; + } + // printf("%s\n", cmdline); + } + + if (strstr(cmdline, program_name) && strstr(cmdline, filter)) { + char path[64] = {0}; + char pidstr[64] = {0}; + char *p; + + dbg_time("%s: %s", gt.gl_pathv[i], cmdline); + strcpy(path, gt.gl_pathv[i]); + p = strstr(gt.gl_pathv[i], "/cmdline"); + *p = '\0'; + while (*(--p) != '/') ; + + strcpy(pidstr, p+1); + pid = atoi(pidstr); + globfree(>); + + return pid; + } + } + } + + globfree(>); + return -1; +} + +void ql_get_driver_rmnet_info(PROFILE_T *profile, RMNET_INFO *rmnet_info) { + int ifc_ctl_sock; + struct ifreq ifr; + int rc; + int request = 0x89F3; + unsigned char data[512]; + + memset(rmnet_info, 0x00, sizeof(*rmnet_info)); + + ifc_ctl_sock = socket(AF_INET, SOCK_DGRAM, 0); + if (ifc_ctl_sock <= 0) { + dbg_time("socket() failed: %s\n", strerror(errno)); + return; + } + + memset(&ifr, 0, sizeof(struct ifreq)); + strncpy(ifr.ifr_name, profile->usbnet_adapter, IFNAMSIZ); + ifr.ifr_name[IFNAMSIZ - 1] = 0; + ifr.ifr_ifru.ifru_data = (void *)data; + + rc = ioctl(ifc_ctl_sock, request, &ifr); + if (rc < 0) { + if (errno != ENOTSUP) + dbg_time("ioctl(0x%x, qmap_settings) errno:%d (%s), rc=%d", request, errno, strerror(errno), rc); + } + else { + memcpy(rmnet_info, data, sizeof(*rmnet_info)); + } + + close(ifc_ctl_sock); +} + +void ql_set_driver_qmap_setting(PROFILE_T *profile, QMAP_SETTING *qmap_settings) { + int ifc_ctl_sock; + struct ifreq ifr; + int rc; + int request = 0x89F2; + + ifc_ctl_sock = socket(AF_INET, SOCK_DGRAM, 0); + if (ifc_ctl_sock <= 0) { + dbg_time("socket() failed: %s\n", strerror(errno)); + return; + } + + memset(&ifr, 0, sizeof(struct ifreq)); + strncpy(ifr.ifr_name, profile->usbnet_adapter, IFNAMSIZ); + ifr.ifr_name[IFNAMSIZ - 1] = 0; + ifr.ifr_ifru.ifru_data = (void *)qmap_settings; + + rc = ioctl(ifc_ctl_sock, request, &ifr); + if (rc < 0) { + dbg_time("ioctl(0x%x, qmap_settings) failed: %s, rc=%d", request, strerror(errno), rc); + } + + close(ifc_ctl_sock); +} + +void no_trunc_strncpy(char *dest, const char *src, size_t dest_size) +{ + size_t i = 0; + + for (i = 0; i < dest_size && *src; i++) { + *dest++ = *src++; + } + + *dest = 0; +} diff --git a/package/wwan/driver/quectel_cm_5G/src/util.h b/package/wwan/driver/quectel_cm_5G/src/util.h new file mode 100644 index 000000000..392d4014e --- /dev/null +++ b/package/wwan/driver/quectel_cm_5G/src/util.h @@ -0,0 +1,52 @@ +/** + @file + util.h + + @brief + This file provides the definitions, and declares some common APIs for list-algorithm. + + */ + +#ifndef _UTILS_H_ +#define _UTILS_H_ + +#include +#include + +struct listnode +{ + struct listnode *next; + struct listnode *prev; +}; + +#define node_to_item(node, container, member) \ + (container *) (((char*) (node)) - offsetof(container, member)) + +#define list_declare(name) \ + struct listnode name = { \ + .next = &name, \ + .prev = &name, \ + } + +#define list_for_each(node, list) \ + for (node = (list)->next; node != (list); node = node->next) + +#define list_for_each_reverse(node, list) \ + for (node = (list)->prev; node != (list); node = node->prev) + +void list_init(struct listnode *list); +void list_add_tail(struct listnode *list, struct listnode *item); +void list_add_head(struct listnode *head, struct listnode *item); +void list_remove(struct listnode *item); + +#define list_empty(list) ((list) == (list)->next) +#define list_head(list) ((list)->next) +#define list_tail(list) ((list)->prev) + +int epoll_register(int epoll_fd, int fd, unsigned int events); +int epoll_deregister(int epoll_fd, int fd); +const char * get_time(void); +unsigned long clock_msec(void); +pid_t getpid_by_pdp(int, const char*); + +#endif