ipq806x: add quantenna 5G wifi support for E8350/R7500v1

This commit is contained in:
coolsnowwolf 2021-07-23 16:59:44 +08:00
parent 0551b74b0a
commit 6a18e18416
127 changed files with 114468 additions and 88 deletions

View File

@ -1,53 +0,0 @@
# Known Quantenna firmware bugs #
This is an attempt to document Quantenna firmware bugs, explaining strange
implemtation details in this OpenWrt package and also hopefully helping
anyone else trying to get this mess to work...
## Quantenna switch and VLANs ##
Mapping WiFi VIFs to VLANs has a horrible side effect: Traffic from the
Quantenna SoC, like the RPC responses, is tagged with the last used VLAN.
The tag depends on Wireless client activity! I.e it is unpredictable.
And even if it were predictable: The module does not accept tagged RPC
calls or other management traffic. The RPC requests must be sent
untagged even if the module responds with an arbitrary tag...
The switch state is not reset on either Quantenna module reboot by RPC
call, or reset by GPIO. The VLAN issue affects the bootloader,
making the module fail to load the second stage bootloader and OS over
tftp because of the VLAN tag mismatch.
Full power cycling is necessary to reset the Quantenna switch to
non-tagging. There is no known software controllable reset method.
### Workaround ###
Playing with static and gratuitous ARPs, in combination with VLAN
interfaces accepting local traffic works pretty well.
But this does not solve the reboot issue, caused by the switch bug
being persistent over reset. The two U-Boot stages will switch mac
addresses up to 3 times during module reboot. The final address is
often dynamically allocated, changing for each boot. This makes it
hard to manage a static ARP entry.
Using "tc" to pop the VLAN tag of any tagged IP and ARP packets for
our management address works perfectly.
## Using bootloader environment ##
The Quantenna bootloader environment is ignored by the firmware unless
/scripts/build_config contains STATELESS=Y. This is a build time
setting, which is not set by the Netgear firmware we use.
The implications is that the module will use different mac adresses
etc compared to an OEM firmware with STATELESS=Y.
This should not be considered a bug, but is an issue caused by our
choice of a common Quantenna firmware image for all hosts with such a
module.

View File

@ -9,30 +9,9 @@ include $(TOPDIR)/rules.mk
PKG_NAME:=quantenna
PKG_VERSION:=37.3.2.44
PKG_RELEASE:=1
PKG_RELEASE:=6
PKG_MAINTAINER:=Bjørn Mork <bjorn@mork.no>
# available versions, ref https://kb.netgear.com/2649/NETGEAR-Open-Source-Code-for-Programmers-GPL
# qtn sdk v36.6.0.23: 1.0.0.46,
# qtn sdk v36.6.0.28: 1.0.0.52,
# qtn sdk v36.6.0.30: 1.0.0.68, 1.0.0.70,
# qtn sdk v36.7.3.23: 1.0.0.76, 1.0.0.82,
# qtn sdk v37.3.1.25: 1.0.0.94, 1.0.0.108, 1.0.0.110 , 1.0.0.112, 1.0.0.116, 1.0.0.122,
# qtn sdk v37.3.2.44: 1.0.0.124
NETGEAR_R7500V1_SRC-37.3.2.44=R7500-and_qtn_gpl_src_v1.0.0.124.tar.bz2.zip
NETGEAR_R7500V1_HASH-37.3.2.44=6592e3286c66a8b0ba689e6cff2d10cac5aea3ddef8ca8746d7771319e47112e
PKG_SOURCE:=$(NETGEAR_R7500V1_SRC-$(strip $(PKG_VERSION)))
PKG_SOURCE_URL:=https://www.downloads.netgear.com/files/GPL
PKG_HASH:=$(NETGEAR_R7500V1_HASH-$(strip $(PKG_VERSION)))
QCSAPI_ARCHIVE-37.3.2.44=quantenna_gpl_v$(PKG_VERSION).tar.bz2
UNPACK_CMD-37.3.2.44=unzip -q -p $(DL_DIR)/$(PKG_SOURCE) | bzcat | $(HOST_TAR) -C $(PKG_BUILD_DIR) $(TAR_OPTIONS)
UNPACK_CMD=$(UNPACK_CMD-$(strip $(PKG_VERSION)))
PKG_MAINTAINER:=lean
PKG_BUILD_DEPENDS:=libtirpc
@ -40,14 +19,7 @@ include $(INCLUDE_DIR)/kernel.mk
include $(INCLUDE_DIR)/package.mk
define Build/Prepare
$(PKG_UNPACK)
bzcat $(PKG_BUILD_DIR)/$(QCSAPI_ARCHIVE-$(strip $(PKG_VERSION))) | \
$(HOST_TAR) -C $(PKG_BUILD_DIR) $(TAR_OPTIONS) --strip-components=1 \
--wildcards */libqcsapi_client_src.zip --wildcards *drivers/pcie2*
bzcat $(PKG_BUILD_DIR)/R7500-*.tar.bz2 | \
$(HOST_TAR) -C $(PKG_BUILD_DIR) $(TAR_OPTIONS) --strip-components=4 \
--wildcards */package/qt-wireless/binary/{topaz-linux.lzma.img,u-boot.bin}
unzip -q -d $(PKG_BUILD_DIR) $(PKG_BUILD_DIR)/libqcsapi_client_src.zip
$(CP) ./src/* $(PKG_BUILD_DIR)/
# tunning to make the same patch apply to versions with or without -fstrict-aliasing
sed -i -e 's/ -fstrict-aliasing//' $(PKG_BUILD_DIR)/Makefile
$(Build/Patch)
@ -82,6 +54,7 @@ define Package/qtn-utils/install
$(INSTALL_BIN) $(PKG_BUILD_DIR)/qcsapi_sockrpc $(1)/usr/sbin/
ln -s /tmp/qcsapi_target_ip.conf $(1)/etc
$(INSTALL_BIN) ./files/qtn-utils.init $(1)/etc/init.d/qtn-utils
$(INSTALL_BIN) ./files/qtn-wifi.init $(1)/etc/init.d/qtn-wifi
endef
define Package/qtn-firmware

View File

@ -0,0 +1,70 @@
#!/bin/sh /etc/rc.common
START=99
STOP=10
NAME=qtn-wifi
start()
{
###########################################
### 5G Quantenna wifi ###
### INITIALIZATION part - do not edit ###
###########################################
# run automatically on start-up modprobe qdpc-host
COUNT=0
WIFI_INIT=`dmesg |grep -c "Connection established with Target BBIC4 board"`
while [ "$WIFI_INIT" = "0" ] ;do
sleep 1
WIFI_INIT=`dmesg |grep -c "Connection established with Target BBIC4 board"`
let COUNT+=1
if [ "$COUNT" -gt 60 ];then
logger "Error 1 in 5G wifi pcie2 firmware initialization"
exit 1
break
fi
done
# start wifi configuration and start wifi
ifconfig host0 1.1.1.1 netmask 255.255.255.252 up
echo "1.1.1.2" > /tmp/qcsapi_target_ip.conf
wifi config
uci set wireless.radio1.disabled=0
uci commit
wifi
sleep 5
#######################################
### END 5g wifi initialization part ###
#######################################
###########################################
### WIFI CONFIGURATION ###
### edit below this line on your taste ###
###########################################
SSID="OpenWrt-5G"
WPAPASS="12345678"
CHANNEL="149"
TXPOWER=22
WIFIDEV=`qcsapi_sockrpc get_primary_interface`
qcsapi_sockrpc get_primary_interface
qcsapi_sockrpc get_regulatory_region "$WIFIDEV"
qcsapi_sockrpc set_regulatory_region "$WIFIDEV" us
qcsapi_sockrpc get_scs_report "$WIFIDEV" all
qcsapi_sockrpc get_channel "$WIFIDEV"
qcsapi_sockrpc set_channel "$WIFIDEV" "$CHANNEL"
qcsapi_sockrpc set_bw "$WIFIDEV" 80
qcsapi_sockrpc get_tx_power "$WIFIDEV" "$CHANNEL"
qcsapi_sockrpc set_tx_power "$WIFIDEV" "$CHANNEL" "$TXPOWER"
qcsapi_sockrpc -u -q set_option "$WIFIDEV" 802_11h 1
qcsapi_sockrpc set_ssid "$WIFIDEV" "$SSID"
qcsapi_sockrpc set_beacon "$WIFIDEV" 11i # WPA2 AES only
qcsapi_sockrpc set_option "$WIFIDEV" SSID_broadcast 1
qcsapi_sockrpc set_WPA_authentication_mode "$WIFIDEV" PSKAuthentication
qcsapi_sockrpc set_WPA_encryption_modes "$WIFIDEV" AESEncryption
qcsapi_sockrpc set_passphrase "$WIFIDEV" 0 "$WPAPASS"
}

View File

@ -0,0 +1,88 @@
CC = gcc
CFLAGS = -I. -fPIC -O -g -Wall -Werror -Wno-unused-variable -Wno-unused-parameter -fstrict-aliasing
COMMON_PROG_OBJS = \
call_qcsapi.o \
qcsapi_driver.o \
qcsapi_output.o \
qcsapi_sem.o \
qcsapi_util.o
SOCKET_PROG_OBJS = \
$(COMMON_PROG_OBJS) \
qcsapi_rpc/client/socket/qcsapi_socket_rpc_client.o \
qcsapi_rpc_common/client/find_host_addr.o \
SOCKET_C_SAMPLE_OBJS = \
qcsapi_rpc_sample/c_rpc_qcsapi_sample.o \
qcsapi_rpc_common/client/find_host_addr.o \
PCIE_PROG_OBJS = \
$(COMMON_PROG_OBJS) \
qcsapi_rpc/client/pcie/qcsapi_pcie_rpc_client.o \
qcsapi_rpc_common/client/rpc_pci_clnt.o \
SOCKET_RAW_PROG_OBJS = \
$(COMMON_PROG_OBJS) \
qcsapi_rpc/client/socket_raw/qcsapi_socketraw_rpc_client.o \
qcsapi_rpc_common/client/rpc_raw_clnt.o \
qcsapi_rpc_common/common/rpc_raw.o
LIB_OBJS = \
qcsapi_rpc/generated/qcsapi_rpc_xdr.o \
qcsapi_rpc/generated/qcsapi_rpc_clnt_adapter.o \
TARGETS = c_rpc_qcsapi_sample \
qcsapi_sockrpc \
qcsapi_sockrpc_static \
qcsapi_pcie \
qcsapi_pcie_static \
qcsapi_sockraw \
qcsapi_sockraw_static \
$(LIB_REALNAME)
CFLAGS += -DPCIE_RPC_TYPE=RPC_TYPE_QCSAPI_PCIE
all: $(TARGETS)
-include $(shell find . -name \*.d)
LIB_NAME = qcsapi_client
LIB_LDNAME = lib$(LIB_NAME).so
LIB_SONAME = $(LIB_LDNAME).1
LIB_REALNAME = $(LIB_LDNAME).1.0.1
c_rpc_qcsapi_sample: ${SOCKET_C_SAMPLE_OBJS:%=build/%} $(LIB_REALNAME)
${CC} $(filter %.o, $^) -L. -l$(LIB_NAME) -o $@
qcsapi_pcie: ${PCIE_PROG_OBJS:%=build/%} $(LIB_REALNAME)
${CC} $(filter %.o, $^) -L. -l$(LIB_NAME) -o $@
qcsapi_pcie_static: ${PCIE_PROG_OBJS:%=build/%} ${LIB_OBJS}
${CC} $(filter %.o, $^) -o $@
qcsapi_sockrpc: ${SOCKET_PROG_OBJS:%=build/%} $(LIB_REALNAME)
${CC} $(filter %.o, $^) -L. -l$(LIB_NAME) -o $@
qcsapi_sockrpc_static: ${SOCKET_PROG_OBJS:%=build/%} ${LIB_OBJS}
${CC} $(filter %.o, $^) -o $@
qcsapi_sockraw: ${SOCKET_RAW_PROG_OBJS:%=build/%} $(LIB_REALNAME)
${CC} $(filter %.o, $^) -L. -l$(LIB_NAME) -o $@
qcsapi_sockraw_static: ${SOCKET_RAW_PROG_OBJS:%=build/%} ${LIB_OBJS}
${CC} $(filter %.o, $^) -o $@
$(LIB_REALNAME): ${LIB_OBJS:%=build/%}
${CC} -shared -s -o $@ -Wl,-soname,$(LIB_SONAME) -lc $^
cd ${@D} ; ln -fs $(LIB_REALNAME) $(LIB_SONAME)
cd ${@D} ; ln -fs $(LIB_SONAME) $(LIB_LDNAME)
build/%.o: %.c
@mkdir -p ${@D}
${CC} ${CFLAGS} $< -c -o $@ -MD -MF $@.d
clean:
rm -rf build $(LIB_LDNAME)* $(TARGETS) $(LIB_OBJS)

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,52 @@
/*SH1
*******************************************************************************
** **
** Copyright (c) 2009 - 2011 Quantenna Communications Inc **
** **
** File : qcsapi.h **
** Description : **
** **
*******************************************************************************
** **
** Redistribution and use in source and binary forms, with or without **
** modification, are permitted provided that the following conditions **
** are met: **
** 1. Redistributions of source code must retain the above copyright **
** notice, this list of conditions and the following disclaimer. **
** 2. Redistributions in binary form must reproduce the above copyright **
** notice, this list of conditions and the following disclaimer in the **
** documentation and/or other materials provided with the distribution. **
** 3. The name of the author may not be used to endorse or promote products **
** derived from this software without specific prior written permission. **
** **
** Alternatively, this software may be distributed under the terms of the **
** GNU General Public License ("GPL") version 2, or (at your option) any **
** later version as published by the Free Software Foundation. **
** **
** In the case this software is distributed under the GPL license, **
** you should have received a copy of the GNU General Public License **
** along with this software; if not, write to the Free Software **
** Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA **
** **
** THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR **
** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES**
** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. **
** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, **
** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT **
** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,**
** DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY **
** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT **
** (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF **
** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. **
** **
*******************************************************************************
EH1*/
#ifndef _CALL_QCSAPI_H
#define _CALL_QCSAPI_H
#include "qcsapi_output.h"
extern int qcsapi_main(qcsapi_output *print, int argc, char **argv);
#endif /* _CALL_QCSAPI_H */

View File

@ -0,0 +1,52 @@
/*SH0
*******************************************************************************
** **
** Copyright (c) 2009 - 2011 Quantenna Communications Inc **
** **
** File : call_qcsapi_local.c **
** Description : tiny wrapper to invoke call_qcsapi locally, from main() **
** **
*******************************************************************************
** **
** Redistribution and use in source and binary forms, with or without **
** modification, are permitted provided that the following conditions **
** are met: **
** 1. Redistributions of source code must retain the above copyright **
** notice, this list of conditions and the following disclaimer. **
** 2. Redistributions in binary form must reproduce the above copyright **
** notice, this list of conditions and the following disclaimer in the **
** documentation and/or other materials provided with the distribution. **
** 3. The name of the author may not be used to endorse or promote products **
** derived from this software without specific prior written permission. **
** **
** Alternatively, this software may be distributed under the terms of the **
** GNU General Public License ("GPL") version 2, or (at your option) any **
** later version as published by the Free Software Foundation. **
** **
** In the case this software is distributed under the GPL license, **
** you should have received a copy of the GNU General Public License **
** along with this software; if not, write to the Free Software **
** Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA **
** **
** THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR **
** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES**
** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. **
** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, **
** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT **
** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,**
** DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY **
** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT **
** (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF **
** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. **
** **
*******************************************************************************
EH0*/
#ifndef __QCSAPI_FIND_HOST_ADDR_H__
#define __QCSAPI_FIND_HOST_ADDR_H__
extern const char* client_qcsapi_find_host_addr(int *argc, char ***argv);
extern void client_qcsapi_find_host_errmsg(const char *progname);
#endif /* __QCSAPI_FIND_HOST_ADDR_H__ */

View File

@ -0,0 +1,8 @@
/*
* Copyright (c) 2015 Quantenna Communications, Inc.
* All rights reserved.
*/
#ifndef __QCSAPI_QFTC_H__
#define __QCSAPI_QFTC_H__
extern int qftc_start(const char *file_path_name, const char *sif_name, const uint8_t *dmac_addr);
#endif

View File

@ -0,0 +1,268 @@
/*
* (C) Copyright 2014 Quantenna Communications Inc.
*
* See file CREDITS for list of people who contributed to this
* project.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston,
* MA 02111-1307 USA
*/
/*
* Header file which describes the Ruby and Topaz platforms.
* Used by both run-time and boot loader images.
*
* Do not put run-time specific definitions in this file.
*/
#ifndef __COMMON_MEM_H
#define __COMMON_MEM_H
#include "ruby_config.h"
/* Platform memory */
/* SRAM */
#define RUBY_SRAM_UNIFIED_BEGIN 0x98000000
#define RUBY_SRAM_UNIFIED_NOCACHE_BEGIN 0xf8000000
#define RUBY_SRAM_FLIP_BEGIN 0x88000000
#define RUBY_SRAM_FLIP_NOCACHE_BEGIN 0x60000000
#define RUBY_SRAM_NOFLIP_BEGIN 0x80000000
#define RUBY_SRAM_NOFLIP_NOCACHE_BEGIN 0x60000000
#define RUBY_SRAM_BANK_SIZE (64 * 1024)
#ifdef TOPAZ_PLATFORM
#define RUBY_SRAM_SIZE (8 * RUBY_SRAM_BANK_SIZE)
#define RUBY_SRAM_BANK_SAFE_SIZE RUBY_SRAM_BANK_SIZE
#else
#define RUBY_SRAM_END_BANK_GUARD_SIZE 32
#define RUBY_SRAM_SIZE (4 * RUBY_SRAM_BANK_SIZE)
#define RUBY_SRAM_BANK_SAFE_SIZE (RUBY_SRAM_BANK_SIZE - RUBY_SRAM_END_BANK_GUARD_SIZE)
#endif
/* DDR */
#define RUBY_DRAM_UNIFIED_BEGIN 0x80000000
#define RUBY_DRAM_UNIFIED_NOCACHE_BEGIN 0xd0000000
#define RUBY_DRAM_FLIP_BEGIN 0x80000000
#define RUBY_DRAM_FLIP_NOCACHE_BEGIN 0x40000000
#define RUBY_DRAM_NOFLIP_BEGIN 0x0
#define RUBY_DRAM_NOFLIP_NOCACHE_BEGIN 0x40000000
#define RUBY_MAX_DRAM_SIZE DDR_128MB
#define RUBY_MIN_DRAM_SIZE DDR_64MB
#if TOPAZ_MMAP_UNIFIED
#define RUBY_SRAM_BEGIN RUBY_SRAM_UNIFIED_BEGIN
#define RUBY_SRAM_BUS_BEGIN RUBY_SRAM_UNIFIED_BEGIN
#define RUBY_SRAM_NOCACHE_BEGIN RUBY_SRAM_UNIFIED_NOCACHE_BEGIN
#define RUBY_DRAM_BEGIN RUBY_DRAM_UNIFIED_BEGIN
#define RUBY_DRAM_BUS_BEGIN RUBY_DRAM_UNIFIED_BEGIN
#define RUBY_DRAM_NOCACHE_BEGIN RUBY_DRAM_UNIFIED_NOCACHE_BEGIN
#elif RUBY_MMAP_FLIP
#define RUBY_SRAM_BEGIN RUBY_SRAM_FLIP_BEGIN
#define RUBY_SRAM_BUS_BEGIN RUBY_SRAM_NOFLIP_BEGIN
#define RUBY_SRAM_NOCACHE_BEGIN RUBY_SRAM_FLIP_NOCACHE_BEGIN
#define RUBY_DRAM_BEGIN RUBY_DRAM_FLIP_BEGIN
#define RUBY_DRAM_BUS_BEGIN RUBY_DRAM_NOFLIP_BEGIN
#define RUBY_DRAM_NOCACHE_BEGIN RUBY_DRAM_FLIP_NOCACHE_BEGIN
#else
#define RUBY_SRAM_BEGIN RUBY_SRAM_NOFLIP_BEGIN
#define RUBY_SRAM_BUS_BEGIN RUBY_SRAM_NOFLIP_BEGIN
#define RUBY_SRAM_NOCACHE_BEGIN RUBY_SRAM_NOFLIP_NOCACHE_BEGIN
#define RUBY_DRAM_BEGIN RUBY_DRAM_NOFLIP_BEGIN
#define RUBY_DRAM_BUS_BEGIN RUBY_DRAM_NOFLIP_BEGIN
#define RUBY_DRAM_NOCACHE_BEGIN RUBY_DRAM_NOFLIP_NOCACHE_BEGIN
#endif
/*****************************************************************************/
/* SPI memory mapped */
/*****************************************************************************/
#define RUBY_SPI_FLASH_ADDR 0x90000000
/* Hardware */
#define RUBY_HARDWARE_BEGIN 0xC0000000
#define ROUNDUP(x, y) ((((x)+((y)-1))/(y))*(y))
/* Config space */
#define CONFIG_ARC_CONF_SIZE (8 * 1024)
/* Config area for Universal H/W ID */
#define CONFIG_ARC_CONF_BASE (0x80000000 + CONFIG_ARC_CONF_SIZE)
#define CONFIG_ARC_KERNEL_PAGE_SIZE (8 * 1024)
#define RUBY_KERNEL_LOAD_DRAM_BEGIN (RUBY_DRAM_BEGIN + 0x3000000)
/* DDR layout */
#define CONFIG_ARC_NULL_BASE 0x00000000
#define CONFIG_ARC_NULL_SIZE (64 * 1024)
#define CONFIG_ARC_NULL_END (CONFIG_ARC_NULL_BASE + CONFIG_ARC_NULL_SIZE)
/* PCIe BDA area */
#define CONFIG_ARC_PCIE_BASE (RUBY_DRAM_BEGIN + CONFIG_ARC_NULL_END)
#define CONFIG_ARC_PCIE_SIZE (64 * 1024) /* minimal PCI BAR size */
#if ((CONFIG_ARC_PCIE_BASE & (64 * 1024 - 1)) != 0)
#error "The reserved region for PCIe BAR should 64k aligned!"
#endif
/*
* CONFIG_ARC_MUC_STACK_OFFSET_UBOOT must be equal to CONFIG_ARC_MUC_STACK_OFFSET
* and RUBY_CRUMBS_OFFSET_UBOOT must be equal to RUBY_CRUMBS_OFFSET.
* Their values can be obtained with host/utilities/ruby_mem_helper.
*/
#if TOPAZ_RX_ACCELERATE
/* Must be equal to CONFIG_ARC_MUC_STACK_OFFSET */
#define CONFIG_ARC_MUC_STACK_OFFSET_UBOOT (0x0003F7C0)
/* MuC stack, included in CONFIG_ARC_MUC_SRAM_SIZE */
#define CONFIG_ARC_MUC_STACK_SIZE (4 * 1024)
#else
/* Must be equal to CONFIG_ARC_MUC_STACK_OFFSET */
#define CONFIG_ARC_MUC_STACK_OFFSET_UBOOT (0x0003FFA0)
/* MuC stack, included in CONFIG_ARC_MUC_SRAM_SIZE */
#define CONFIG_ARC_MUC_STACK_SIZE (6 * 1024)
#endif
#define CONFIG_ARC_MUC_STACK_INIT_UBOOT (RUBY_SRAM_BEGIN + CONFIG_ARC_MUC_STACK_OFFSET_UBOOT)
#ifdef TOPAZ_PLATFORM
/* Must be equal to RUBY_CRUMBS_OFFSET */
#define RUBY_CRUMBS_OFFSET_UBOOT (0x0003FFC0)
#else
#define RUBY_CRUMBS_OFFSET_UBOOT (0x0003FFA0)
#endif
#define RUBY_CRUMBS_ADDR_UBOOT (RUBY_SRAM_BEGIN + RUBY_CRUMBS_OFFSET_UBOOT)
/*
* Crumb structure, sits at the end of SRAM. Each core can use it to
* store the last run function to detect bus hangs.
*/
#ifndef __ASSEMBLY__
struct ruby_crumbs_percore {
unsigned long blink;
unsigned long status32;
unsigned long sp;
};
struct ruby_crumbs_mem_section {
unsigned long start;
unsigned long end;
};
struct ruby_crumbs {
struct ruby_crumbs_percore lhost;
struct ruby_crumbs_percore muc;
struct ruby_crumbs_percore dsp;
/*
* allow (somewhat) intelligent parsing of muc stacks by
* specifying the text section
*/
struct ruby_crumbs_mem_section muc_dram;
struct ruby_crumbs_mem_section muc_sram;
/*
* magic token; if set incorrectly we probably have
* random values after power-on
*/
unsigned long magic;
};
#define RUBY_CRUMBS_MAGIC 0x7c97be8f
#endif /* __ASSEMBLY__ */
/* Utility functions */
#ifndef __ASSEMBLY__
#if defined(AUC_BUILD) || defined(RUBY_MINI)
#define NO_RUBY_WEAK 1
#else
#define NO_RUBY_WEAK 0
#endif
#define RUBY_BAD_BUS_ADDR ((unsigned long)0)
#define RUBY_BAD_VIRT_ADDR ((void*)RUBY_BAD_BUS_ADDR)
#define RUBY_ERROR_ADDR ((unsigned long)0xefefefef)
#if defined(__CHECKER__)
#define RUBY_INLINE static inline __attribute__((always_inline))
#define RUBY_WEAK(name) RUBY_INLINE
#elif defined(__GNUC__)
/*GCC*/
#define RUBY_INLINE static inline __attribute__((always_inline))
#if NO_RUBY_WEAK
#define RUBY_WEAK(name) RUBY_INLINE
#else
#define RUBY_WEAK(name) __attribute__((weak))
#endif
#else
/*MCC*/
#define RUBY_INLINE static _Inline
#if NO_RUBY_WEAK
#define RUBY_WEAK(name) RUBY_INLINE
#else
#define RUBY_WEAK(name) pragma Weak(name);
#endif
#pragma Offwarn(428)
#endif
#define ____in_mem_range(addr, start, size) \
(((addr) >= (start)) && ((addr) < (start) + (size)))
#if defined(STATIC_CHECK) || defined(__CHECKER__)
RUBY_INLINE int __in_mem_range(unsigned long addr, unsigned long start, unsigned long size)
{
return (((addr) >= (start)) && ((addr) < (start) + (size)));
}
#else
#define __in_mem_range ____in_mem_range
#endif
#if RUBY_MMAP_FLIP
RUBY_INLINE unsigned long virt_to_bus(const void *addr)
{
unsigned long ret = (unsigned long)addr;
if (__in_mem_range(ret, RUBY_SRAM_FLIP_BEGIN, RUBY_SRAM_SIZE)) {
ret = ret - RUBY_SRAM_FLIP_BEGIN + RUBY_SRAM_NOFLIP_BEGIN;
} else if (__in_mem_range(ret, RUBY_DRAM_FLIP_BEGIN, RUBY_MAX_DRAM_SIZE)) {
ret = ret - RUBY_DRAM_FLIP_BEGIN + RUBY_DRAM_NOFLIP_BEGIN;
} else if (ret < RUBY_HARDWARE_BEGIN) {
ret = RUBY_BAD_BUS_ADDR;
}
return ret;
}
RUBY_WEAK(bus_to_virt) void* bus_to_virt(unsigned long addr)
{
unsigned long ret = addr;
if (__in_mem_range(ret, RUBY_SRAM_NOFLIP_BEGIN, RUBY_SRAM_SIZE)) {
ret = ret - RUBY_SRAM_NOFLIP_BEGIN + RUBY_SRAM_FLIP_BEGIN;
} else if (__in_mem_range(ret, RUBY_DRAM_NOFLIP_BEGIN, RUBY_MAX_DRAM_SIZE)) {
ret = ret - RUBY_DRAM_NOFLIP_BEGIN + RUBY_DRAM_FLIP_BEGIN;
} else if (ret < RUBY_HARDWARE_BEGIN) {
ret = (unsigned long)RUBY_BAD_VIRT_ADDR;
}
return (void*)ret;
}
#else
/* Map 1:1, (x) address must be upper then 0x8000_0000. */
#define virt_to_bus(x) ((unsigned long)(x))
#define bus_to_virt(x) ((void *)(x))
#endif /* #if RUBY_MMAP_FLIP */
#ifndef __GNUC__
/* MCC */
#pragma Popwarn()
#endif
#endif /* #ifndef __ASSEMBLY__ */
#endif /* __COMMON_MEM_H */

View File

@ -0,0 +1,8 @@
#define TOPAZ_PLATFORM
#define TOPAZ_FPGA_PLATFORM 0
#define TOPAZ_EMAC_NULL_BUF_WR
#undef TOPAZ_FPGA_UMCTL1
#define PLATFORM_WMAC_MODE ap
#undef PLATFORM_DEFAULT_BOARD_ID
#define ARC_HW_REV_NEEDS_TLBMISS_FIX
#define TOPAZ_VNET_WR_STAGING 0

View File

@ -0,0 +1,14 @@
#ifndef _PCI_RPC_H
#define _PCI_RPC_H
#include "rpc_pci_nlm.h"
extern CLIENT *
clnt_pci_create (const char *hostname,
u_long prog,
u_long vers,
const char *proto);
extern SVCXPRT *svc_pci_create (int sock);
#endif

View File

@ -0,0 +1,26 @@
#ifndef __PCI_NLM_H__
#define __PCI_NLM_H__
/*
* We seperate the netlink type for client and server here.
* If the netlink type is conflicted with customers', they just need to modify
* NETLINK_RPC_PCI_CLNT and the type define in the PCIe RC driver and the netlink
* type in the rpc server and PCIe EP driver will not be affected.
*/
#define NETLINK_RPC_PCI_CLNT 31
#define NETLINK_RPC_PCI_SVC 31
#define PCIMSGSIZE (64 * 1024 - 1)
/*
* Nelink Message types.
*/
#define RPC_TYPE_CALL_QCSAPI_PCIE 0x0100
#define RPC_TYPE_QCSAPI_PCIE 0x0200
#define NETLINK_TYPE_SVC_REGISTER (PCIE_RPC_TYPE | 0x0010)
#define NETLINK_TYPE_SVC_RESPONSE (PCIE_RPC_TYPE | 0x0011)
#define NETLINK_TYPE_CLNT_REGISTER (PCIE_RPC_TYPE | 0x0010)
#define NETLINK_TYPE_CLNT_REQUEST (PCIE_RPC_TYPE | 0x0011)
#endif

View File

@ -0,0 +1,94 @@
/*
* Copyright (c) 2015 Quantenna Communications, Inc.
* All rights reserved.
*/
#ifndef RPC_RAW_H
#define RPC_RAW_H
#include <rpc/rpc.h>
#include <linux/if_packet.h>
#include <linux/if_ether.h>
#ifndef ARRAY_SIZE
#define ARRAY_SIZE(a) (sizeof (a) / sizeof ((a)[0]))
#endif
#define QRPC_RAW_SOCK_PROT 11
#define QFTP_RAW_SOCK_PROT 22
#define ETH_P_OUI_EXT 0x88B7
#define QUANTENNA_OUI 0x002686
#define QFTP_DATA_PKT_HDR_SIZE (sizeof(struct q_raw_ethoui_hdr) +\
sizeof(struct qftp_data_pkt) - 1)
#define QFTP_ACK_NACK_FRAME_LEN (sizeof(struct q_raw_ethoui_hdr) +\
sizeof(struct qftp_ack_nack_pkt))
/* QFT */
#define QFTP_FRAME_TYPE_NACK 0
#define QFTP_FRAME_TYPE_ACK 1
#define QFTP_FRAME_TYPE_CONNECT 2
#define QFTP_FRAME_TYPE_DATA 3
/* RPC QCSAPI */
#define QRPC_FRAME_TYPE_COMPLETE 4
#define QRPC_FRAME_TYPE_FRAG 5
#define QRPC_BUFFER_LEN (16 * 1024)
#define QRPC_QCSAPI_RPCD_SID 0
#define QRPC_CALL_QCSAPI_RPCD_SID 1
struct q_raw_ethoui_hdr {
struct ethhdr eth_hdr;
uint8_t prot_id[5]; /* Protocol Identifier */
uint8_t _pad1;
} __attribute__ ((packed));
/* QRPC frames */
struct qrpc_frame_hdr {
struct q_raw_ethoui_hdr qhdr;
uint8_t sub_type;
uint8_t sid;
uint16_t seq;
} __attribute__ ((packed));
struct qrpc_raw_ethpkt {
struct qrpc_frame_hdr fhdr;
char payload[ETH_FRAME_LEN - sizeof(struct qrpc_frame_hdr)];
} __attribute__ ((packed));
/* QFTP frame payloads */
struct qftp_raw_ethpkt {
struct q_raw_ethoui_hdr hdr;
char payload[ETH_FRAME_LEN - sizeof(struct q_raw_ethoui_hdr)];
} __attribute__ ((packed));
struct qftp_connect_pkt {
uint16_t sub_type;
uint16_t seq;
uint32_t image_size;
char image_name[1];
} __attribute__ ((packed));
struct qftp_data_pkt {
uint16_t sub_type;
uint16_t seq;
char data[1];
} __attribute__ ((packed));
struct qftp_ack_nack_pkt {
uint16_t sub_type;
uint16_t seq;
} __attribute__ ((packed));
extern CLIENT *qrpc_clnt_raw_create(u_long prog, u_long vers,
const char *const srcif_name, const uint8_t * dmac_addr, uint8_t sess_id);
extern SVCXPRT *qrpc_svc_raw_create(int sock, const char *const bind_interface, uint8_t sess_id);
extern int qrpc_set_prot_filter(const int sock, const short prot);
extern int qrpc_raw_bind(const int sock, const char *const if_name, const int protocol);
extern int str_to_mac(const char *txt_mac, uint8_t * mac);
extern int qrpc_clnt_raw_config_dst(const int sock, const char *const srcif_name,
struct sockaddr_ll *dst_addr,
const uint8_t *dmac_addr,
struct q_raw_ethoui_hdr *pkt_outbuf,
uint8_t qprot);
extern int qrpc_raw_read_timeout(const int sock_fd, const int timeout);
#endif

View File

@ -0,0 +1,182 @@
/*
* (C) Copyright 2010 Quantenna Communications Inc.
*
* See file CREDITS for list of people who contributed to this
* project.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston,
* MA 02111-1307 USA
*/
/*
* Header file which describes Ruby platform.
* Has to be used by both kernel and bootloader.
*/
#ifndef __RUBY_CONFIG_H
#define __RUBY_CONFIG_H
#include "topaz_config.h"
/*******************************************************************/
#if TOPAZ_MMAP_UNIFIED
#define RUBY_MMAP_FLIP 0
#else
#if !(defined(MUC_BUILD) || defined(DSP_BUILD) || defined(AUC_BUILD))
#define RUBY_MMAP_FLIP 1
#else
#define RUBY_MMAP_FLIP 0
#endif
#endif
/* Set to 1 if MuC need to enable TLB, otherwise set to 0 */
#define RUBY_MUC_TLB_ENABLE 1
/*******************************************************************/
#ifdef RUBY_PLATFORM
#if RUBY_FPGA_PLATFORM
#define RUBY_SERIAL_BAUD 38400
#define RUBY_FIXED_DEV_CLK 12500000
#define RUBY_FIXED_CPU_CLK 40000000
#define RUBY_FPGA_DDR
#else
#define RUBY_SERIAL_BAUD 115200
#define RUBY_FIXED_DEV_CLK 125000000
#define RUBY_FIXED_CPU_CLK 400000000
#define RUBY_ASIC_DDR
#endif /* #if RUBY_FPGA_PLATFORM */
#define UPF_SPD_FLAG 0
#define DEFAULT_BAUD RUBY_SERIAL_BAUD
#endif /* #ifdef RUBY_PLATFORM */
/*******************************************************************/
/* Define some constants for Linux ARC kernel */
#define CONFIG_ARC700_SERIAL_BAUD RUBY_SERIAL_BAUD
#define CONFIG_ARC700_CLK RUBY_FIXED_CPU_CLK
#define CONFIG_ARC700_DEV_CLK RUBY_FIXED_DEV_CLK
/*******************************************************************/
/* RGMII related defines */
#define CONFIG_ARCH_RUBY_ENET_RGMII
#define CONFIG_ARCH_RGMII_DEFAULT 0x8F8F8F8F
#define CONFIG_ARCH_RGMII_DLL_TIMING 0x8F8D8F8F
#define CONFIG_ARCH_RGMII_S1P8NS_H1P9NS 0x8F891F1F
#define CONFIG_ARCH_RGMII_NODELAY 0x1F1F1F1F
#define CONFIG_ARCH_RGMII_710F CONFIG_ARCH_RGMII_NODELAY
#define CONFIG_ARCH_RGMII_P1RX00TX0E 0x0E8E1F1F
/* EMAC related defines */
/* EMAC flags */
#define EMAC_NOT_IN_USE (0)
#define EMAC_IN_USE (BIT(0))
#define EMAC_PHY_NOT_IN_USE (BIT(1)) // do not initialize/access phy mdio
#define EMAC_PHY_FORCE_10MB (BIT(2))
#define EMAC_PHY_FORCE_100MB (BIT(3))
#define EMAC_PHY_FORCE_1000MB (BIT(4))
#define EMAC_PHY_FORCE_HDX (BIT(5))
#define EMAC_PHY_RESET (BIT(6)) // force PHY reset
#define EMAC_PHY_MII (BIT(7)) // default is rgmii
#define EMAC_PHY_AUTO_MASK (EMAC_PHY_FORCE_10MB | EMAC_PHY_FORCE_100MB | EMAC_PHY_FORCE_1000MB)
#define EMAC_PHY_AR8236 (BIT(8))
#define EMAC_PHY_AR8327 (BIT(9))
#define EMAC_PHY_GPIO1_RESET (BIT(10))
#define EMAC_PHY_GPIO13_RESET (BIT(11))
#define EMAC_PHY_NO_COC (BIT(12)) // do not adjust link speed for power savings
#define EMAC_PHY_MV88E6071 (BIT(13))
#define EMAC_PHY_FPGAA_ONLY (BIT(15))
#define EMAC_PHY_FPGAB_ONLY (BIT(16))
#define EMAC_PHY_RTL8363SB_P0 (BIT(18))
#define EMAC_PHY_RTL8363SB_P1 (BIT(19))
#define EMAC_BONDED (BIT(20))
#define EMAC_PHY_RTL8365MB (BIT(21))
#define EMAC_PHY_RTL8211DS (BIT(22))
#define EMAC_PHY_CUSTOM (BIT(31))
#define EMAC_MV88E6071 (EMAC_IN_USE | EMAC_PHY_MII | EMAC_PHY_NOT_IN_USE | \
EMAC_PHY_NO_COC | EMAC_PHY_FORCE_100MB | EMAC_PHY_MV88E6071)
#define EMAC_SLOW_PHY (EMAC_PHY_FORCE_10MB|EMAC_PHY_FORCE_100MB|EMAC_PHY_MII)
/* force phy addr scan */
#define EMAC_PHY_ADDR_SCAN (32) // scan bus for addr
/* Flash memory sizes */
#define FLASH_32MB (32*1024*1024)
#define FLASH_16MB (16*1024*1024)
#define FLASH_8MB (8*1024*1024)
#define FLASH_4MB (4*1024*1024)
#define FLASH_2MB (2*1024*1024)
#define FLASH_64KB (64*1024)
#define DEFAULT_FLASH_SIZE (FLASH_8MB)
#define FLASH_SIZE_JEDEC (0)
/* DDR memory sizes */
#define DDR_256MB (256*1024*1024)
#define DDR_128MB (128*1024*1024)
#define DDR_64MB (64*1024*1024)
#define DDR_32MB (32*1024*1024)
#define DDR_AUTO (0)
#define DEFAULT_DDR_SIZE (DDR_64MB)
/* Other DDR defines */
#define DDR3_800MHz 800
#define DDR3_640MHz 640
#define DDR3_500MHz 500
#define DDR3_400MHz 400
#define DDR3_320MHz 320
#define DDR_400 400
#define DDR_320 320
#define DDR_250 250
#define DDR_200 200
#define DDR_160 160
#define DDR_125 125
#define DEFAULT_DDR_SPEED (DDR_160)
#define DDR_32_MICRON 0
#define DDR_16_MICRON 1
#define DDR_16_ETRON 2
#define DDR_16_SAMSUNG 3
#define DDR_32_ETRON 4
#define DDR_32_SAMSUNG 5
#define DDR_16_HYNIX 6
#define DDR3_16_WINBOND 7
#define DDR3_32_WINBOND 8
#define DEFAULT_DDR_CFG (DDR_16_MICRON)
/* UART1 defines */
#define UART1_NOT_IN_USE 0
#define UART1_IN_USE 1
#define PCIE_NOT_IN_USE 0
#define PCIE_IN_USE (BIT(0))
#define PCIE_USE_PHY_LOOPBK (BIT(1))
#define PCIE_RC_MODE (BIT(2))
#define PCIE_ENDPOINT (PCIE_IN_USE | PCIE_USE_PHY_LOOPBK)
#define PCIE_ROOTCOMPLEX (PCIE_IN_USE | PCIE_RC_MODE | PCIE_USE_PHY_LOOPBK)
/*******************************************************************/
#define CONFIG_USE_SPI1_FOR_IPC PLATFORM_REG_SWITCH(1, 0)
#endif // #ifndef __RUBY_CONFIG_H

View File

@ -0,0 +1,532 @@
/*
* (C) Copyright 2010 Quantenna Communications Inc.
*
* See file CREDITS for list of people who contributed to this
* project.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston,
* MA 02111-1307 USA
*/
/*
* Header file which describes Ruby platform.
* Has to be used by runtime firmware.
*/
#ifndef __RUBY_MEM_H
#define __RUBY_MEM_H
#include "common_mem.h"
/* FIXME: Move CPU related macros to a separate header file. */
#define ARC_DCACHE_LINE_LENGTH 32
/* NEVTBD - put in real XYMEM values */
#define RUBY_DSP_XYMEM_BEGIN 0xD0000000
#define RUBY_DSP_XYMEM_END 0xDFFFFFFF
/* SRAM layout */
#define RUBY_CRUMBS_SIZE 64 /* bytes at the very end of sram for crash tracing */
#ifdef TOPAZ_PLATFORM
#ifdef QTN_RC_ENABLE_HDP
#define TOPAZ_HBM_BUF_EMAC_RX_COUNT_S (14)
#define TOPAZ_HBM_BUF_WMAC_RX_COUNT_S (0)
#else
#define TOPAZ_HBM_BUF_EMAC_RX_COUNT_S (13)
#define TOPAZ_HBM_BUF_WMAC_RX_COUNT_S (11)
#endif
#define TOPAZ_HBM_EMAC_TX_DONE_COUNT_S (12)
#define TOPAZ_HBM_BUF_EMAC_RX_COUNT (1 << TOPAZ_HBM_BUF_EMAC_RX_COUNT_S)
#define TOPAZ_HBM_BUF_WMAC_RX_COUNT (1 << TOPAZ_HBM_BUF_WMAC_RX_COUNT_S)
#define TOPAZ_HBM_EMAC_TX_DONE_COUNT (1 << TOPAZ_HBM_EMAC_TX_DONE_COUNT_S)
/* dedicated SRAM space for HBM pointer pools */
#define TOPAZ_HBM_POOL_PTR_SIZE 4 /* sizeof(void *), 32 bit arch */
#define TOPAZ_HBM_POOL_EMAC_RX_START 0x00000000
#define TOPAZ_HBM_POOL_EMAC_RX_SIZE (TOPAZ_HBM_BUF_EMAC_RX_COUNT * TOPAZ_HBM_POOL_PTR_SIZE)
#define TOPAZ_HBM_POOL_EMAC_RX_END (TOPAZ_HBM_POOL_EMAC_RX_START + TOPAZ_HBM_POOL_EMAC_RX_SIZE)
#define TOPAZ_HBM_POOL_WMAC_RX_START TOPAZ_HBM_POOL_EMAC_RX_END
#define TOPAZ_HBM_POOL_WMAC_RX_SIZE (TOPAZ_HBM_BUF_WMAC_RX_COUNT * TOPAZ_HBM_POOL_PTR_SIZE)
#define TOPAZ_HBM_POOL_WMAC_RX_END (TOPAZ_HBM_POOL_WMAC_RX_START + TOPAZ_HBM_POOL_WMAC_RX_SIZE)
#define TOPAZ_HBM_POOL_EMAC_TX_DONE_START TOPAZ_HBM_POOL_WMAC_RX_END
#define TOPAZ_HBM_POOL_EMAC_TX_DONE_SIZE (TOPAZ_HBM_EMAC_TX_DONE_COUNT * TOPAZ_HBM_POOL_PTR_SIZE)
#define TOPAZ_HBM_POOL_EMAC_TX_DONE_END (TOPAZ_HBM_POOL_EMAC_TX_DONE_START + TOPAZ_HBM_POOL_EMAC_TX_DONE_SIZE)
#define TOPAZ_FWT_SW_START TOPAZ_HBM_POOL_EMAC_TX_DONE_END
#define TOPAZ_FWT_SW_SIZE (4096)
#define TOPAZ_FWT_SW_END (TOPAZ_FWT_SW_START + TOPAZ_FWT_SW_SIZE)
#define CONFIG_MUC_EXTRA_RES_BASE TOPAZ_FWT_SW_END
#define CONFIG_MUC_EXTRA_RESERVE_SIZE (8 * 1024)
#define CONFIG_MUC_EXTRA_RES_END (CONFIG_MUC_EXTRA_RES_BASE + CONFIG_MUC_EXTRA_RESERVE_SIZE)
#define CONFIG_ARC_KERNEL_SRAM_B1_BASE ROUNDUP(CONFIG_MUC_EXTRA_RES_END, CONFIG_ARC_KERNEL_PAGE_SIZE)
#define CONFIG_ARC_KERNEL_SRAM_B1_SIZE (22 * 1024)
#define CONFIG_ARC_KERNEL_SRAM_B1_END (CONFIG_ARC_KERNEL_SRAM_B1_BASE + CONFIG_ARC_KERNEL_SRAM_B1_SIZE)
#define CONFIG_ARC_KERNEL_SRAM_B2_BASE CONFIG_ARC_KERNEL_SRAM_B1_END
#define CONFIG_ARC_KERNEL_SRAM_B2_END (ROUNDUP(CONFIG_ARC_KERNEL_SRAM_B2_BASE, RUBY_SRAM_BANK_SIZE) - \
ROUNDUP(TOPAZ_VNET_WR_STAGING_RESERVE, CONFIG_ARC_KERNEL_PAGE_SIZE))
#define CONFIG_ARC_KERNEL_SRAM_B2_SIZE (CONFIG_ARC_KERNEL_SRAM_B2_END - CONFIG_ARC_KERNEL_SRAM_B2_BASE)
#define TOPAZ_VNET_WR_STAGING_1_START CONFIG_ARC_KERNEL_SRAM_B2_END
#define TOPAZ_VNET_WR_STAGING_1_SIZE TOPAZ_VNET_WR_STAGING_RESERVE
#define TOPAZ_VNET_WR_STAGING_1_END (TOPAZ_VNET_WR_STAGING_1_START + TOPAZ_VNET_WR_STAGING_1_SIZE)
#define TOPAZ_VNET_WR_STAGING_2_START ROUNDUP(TOPAZ_VNET_WR_STAGING_1_END, RUBY_SRAM_BANK_SIZE)
#define TOPAZ_VNET_WR_STAGING_2_SIZE TOPAZ_VNET_WR_STAGING_RESERVE
#define TOPAZ_VNET_WR_STAGING_2_END (TOPAZ_VNET_WR_STAGING_2_START + TOPAZ_VNET_WR_STAGING_2_SIZE)
#if TOPAZ_VNET_WR_STAGING_2_START != (2 * RUBY_SRAM_BANK_SIZE)
#error SRAM linkmap error forming topaz sram wr staging
#endif
#define CONFIG_ARC_MUC_SRAM_B1_BASE ROUNDUP(TOPAZ_VNET_WR_STAGING_2_END, CONFIG_ARC_KERNEL_PAGE_SIZE)
#define CONFIG_ARC_MUC_SRAM_B1_END ROUNDUP(CONFIG_ARC_MUC_SRAM_B1_BASE + 1, RUBY_SRAM_BANK_SIZE)
#define CONFIG_ARC_MUC_SRAM_B1_SIZE (CONFIG_ARC_MUC_SRAM_B1_END - CONFIG_ARC_MUC_SRAM_B1_BASE)
#define CONFIG_ARC_MUC_SRAM_B2_BASE ROUNDUP(CONFIG_ARC_MUC_SRAM_B1_END, RUBY_SRAM_BANK_SIZE)
#define CONFIG_ARC_MUC_SRAM_B2_SIZE (RUBY_SRAM_BANK_SAFE_SIZE - RUBY_CRUMBS_SIZE)
#define CONFIG_ARC_MUC_SRAM_B2_END (CONFIG_ARC_MUC_SRAM_B2_BASE + CONFIG_ARC_MUC_SRAM_B2_SIZE)
#define CONFIG_ARC_AUC_SRAM_BASE ROUNDUP(CONFIG_ARC_MUC_SRAM_B2_END, RUBY_SRAM_BANK_SIZE)
#define CONFIG_ARC_AUC_SRAM_SIZE (3 * RUBY_SRAM_BANK_SIZE)
#define CONFIG_ARC_AUC_SRAM_END (CONFIG_ARC_AUC_SRAM_BASE + CONFIG_ARC_AUC_SRAM_SIZE)
#define CONFIG_ARC_SRAM_END CONFIG_ARC_AUC_SRAM_END
/* MU TxBF qmatrix is stored at the last bank of SRAM, DSP writes to it, has to use SRAM BUS addr */
#define CONFIG_ARC_MU_QMAT_BASE (RUBY_SRAM_BUS_BEGIN + 0X70000)
#define CONFIG_ARC_MU_QMAT_SIZE RUBY_SRAM_BANK_SIZE
#define CONFIG_ARC_MU_QMAT_END (CONFIG_ARC_MU_QMAT_BASE + CONFIG_ARC_MU_QMAT_SIZE)
#else /* Ruby */
#define CONFIG_ARC_KERNEL_SRAM_B1_BASE 0x00000000
#define CONFIG_ARC_KERNEL_SRAM_B1_SIZE RUBY_SRAM_BANK_SAFE_SIZE
#define CONFIG_ARC_KERNEL_SRAM_B1_END (CONFIG_ARC_KERNEL_SRAM_B1_BASE + CONFIG_ARC_KERNEL_SRAM_B1_SIZE)
#define CONFIG_ARC_KERNEL_SRAM_B2_BASE (CONFIG_ARC_KERNEL_SRAM_B1_BASE + RUBY_SRAM_BANK_SIZE)
#define CONFIG_ARC_KERNEL_SRAM_B2_SIZE RUBY_SRAM_BANK_SAFE_SIZE
#define CONFIG_ARC_KERNEL_SRAM_B2_END (CONFIG_ARC_KERNEL_SRAM_B2_BASE + CONFIG_ARC_KERNEL_SRAM_B2_SIZE)
#define CONFIG_ARC_MUC_SRAM_B1_BASE ROUNDUP(CONFIG_ARC_KERNEL_SRAM_B2_END, RUBY_SRAM_BANK_SIZE)
#define CONFIG_ARC_MUC_SRAM_B1_SIZE RUBY_SRAM_BANK_SAFE_SIZE
#define CONFIG_ARC_MUC_SRAM_B1_END (CONFIG_ARC_MUC_SRAM_B1_BASE + CONFIG_ARC_MUC_SRAM_B1_SIZE)
#define CONFIG_ARC_MUC_SRAM_B2_BASE ROUNDUP(CONFIG_ARC_MUC_SRAM_B1_END, RUBY_SRAM_BANK_SIZE)
#define CONFIG_ARC_MUC_SRAM_B2_SIZE (RUBY_SRAM_BANK_SAFE_SIZE - RUBY_CRUMBS_SIZE)
#define CONFIG_ARC_MUC_SRAM_B2_END (CONFIG_ARC_MUC_SRAM_B2_BASE + CONFIG_ARC_MUC_SRAM_B2_SIZE)
#endif /* TOPAZ_PLATFORM */
#if TOPAZ_RX_ACCELERATE
/* TODO FIXME - MuC crashed when copying data between SRAM and DDR */
#define CONFIG_ARC_MUC_STACK_OFFSET (CONFIG_ARC_MUC_SRAM_B2_END - 2048)
#else
#define CONFIG_ARC_MUC_STACK_OFFSET (CONFIG_ARC_MUC_SRAM_B2_END)
#endif
#if CONFIG_ARC_MUC_STACK_OFFSET_UBOOT != CONFIG_ARC_MUC_STACK_OFFSET
#error "CONFIG_ARC_MUC_STACK_OFFSET_UBOOT must be equal to CONFIG_ARC_MUC_STACK_OFFSET!"
#endif
#define CONFIG_ARC_MUC_STACK_INIT (RUBY_SRAM_BEGIN + CONFIG_ARC_MUC_STACK_OFFSET)
#define RUBY_CRUMBS_OFFSET (CONFIG_ARC_MUC_SRAM_B2_END)
#if RUBY_CRUMBS_OFFSET != RUBY_CRUMBS_OFFSET_UBOOT
#error "RUBY_CRUMBS_OFFSET_UBOOT must be equal to RUBY_CRUMBS_OFFSET!"
#endif
#define RUBY_CRUMBS_ADDR (RUBY_SRAM_BEGIN + RUBY_CRUMBS_OFFSET)
/* DDR layout */
#define CONFIG_ARC_PCIE_RSVD_SIZE (64 * 1024)
#define CONFIG_ARC_DSP_BASE (CONFIG_ARC_NULL_END + CONFIG_ARC_PCIE_RSVD_SIZE)
#define CONFIG_ARC_DSP_SIZE (768 * 1024)
#define CONFIG_ARC_DSP_END (CONFIG_ARC_DSP_BASE + CONFIG_ARC_DSP_SIZE)
#define CONFIG_ARC_MUC_BASE CONFIG_ARC_DSP_END
#ifdef TOPAZ_128_NODE_MODE
#define CONFIG_ARC_MUC_SIZE ((3 * 1024 * 1024) + (528 * 1024))
#else
#define CONFIG_ARC_MUC_SIZE ((2 * 1024 * 1024) + (768 * 1024))
#endif
#define MUC_DRAM_RX_RESVERED_RELOC_SIZE (8 * 1024)
#define CONFIG_ARC_MUC_END (CONFIG_ARC_MUC_BASE + CONFIG_ARC_MUC_SIZE)
#define CONFIG_ARC_MUC_MAPPED_BASE CONFIG_ARC_MUC_BASE
#define CONFIG_ARC_MUC_MAPPED_SIZE (RUBY_MAX_DRAM_SIZE - CONFIG_ARC_MUC_MAPPED_BASE)
#ifdef TOPAZ_PLATFORM
#define CONFIG_ARC_AUC_BASE CONFIG_ARC_MUC_END
#define CONFIG_ARC_AUC_SIZE (1024 * 1024 + 768 * 1024 + 40 * 1024)
#define CONFIG_ARC_AUC_END (CONFIG_ARC_AUC_BASE + CONFIG_ARC_AUC_SIZE)
#define TOPAZ_HBM_BUF_ALIGN (1 * 1024)
#define TOPAZ_HBM_BUF_EMAC_RX_POOL 0
#define TOPAZ_HBM_BUF_WMAC_RX_POOL 1
#define TOPAZ_HBM_AUC_FEEDBACK_POOL 2
#define TOPAZ_HBM_EMAC_TX_DONE_POOL 3
#define TOPAZ_HBM_BUF_EMAC_RX_SIZE (4 * 1024)
#define TOPAZ_HBM_BUF_WMAC_RX_SIZE (17 * 1024)
#define TOPAZ_HBM_BUF_META_SIZE 64 /* keep it 2^n */
#define TOPAZ_HBM_POOL_GUARD_SIZE (64 * 1024)
#define TOPAZ_HBM_BUF_EMAC_RX_TOTAL (TOPAZ_HBM_BUF_EMAC_RX_COUNT * \
TOPAZ_HBM_BUF_EMAC_RX_SIZE)
#define TOPAZ_HBM_BUF_WMAC_RX_TOTAL (TOPAZ_HBM_BUF_WMAC_RX_COUNT * \
TOPAZ_HBM_BUF_WMAC_RX_SIZE)
#define TOPAZ_HBM_BUF_META_BASE CONFIG_ARC_AUC_END
#define TOPAZ_HBM_BUF_META_EMAC_RX_BASE (TOPAZ_HBM_BUF_META_BASE + TOPAZ_HBM_BUF_META_SIZE)
#define TOPAZ_HBM_BUF_META_EMAC_RX_BASE_VIRT (RUBY_DRAM_BEGIN + TOPAZ_HBM_BUF_META_EMAC_RX_BASE)
#define TOPAZ_HBM_BUF_META_EMAC_RX_TOTAL (TOPAZ_HBM_BUF_EMAC_RX_COUNT * \
TOPAZ_HBM_BUF_META_SIZE)
#define TOPAZ_HBM_BUF_META_EMAC_RX_END (TOPAZ_HBM_BUF_META_EMAC_RX_BASE + \
TOPAZ_HBM_BUF_META_EMAC_RX_TOTAL)
#define TOPAZ_HBM_BUF_META_WMAC_RX_BASE (TOPAZ_HBM_BUF_META_EMAC_RX_END + TOPAZ_HBM_BUF_META_SIZE)
#define TOPAZ_HBM_BUF_META_WMAC_RX_BASE_VIRT (RUBY_DRAM_BEGIN + TOPAZ_HBM_BUF_META_WMAC_RX_BASE)
#define TOPAZ_HBM_BUF_META_WMAC_RX_TOTAL (TOPAZ_HBM_BUF_WMAC_RX_COUNT * \
TOPAZ_HBM_BUF_META_SIZE)
#define TOPAZ_HBM_BUF_META_WMAC_RX_END (TOPAZ_HBM_BUF_META_WMAC_RX_BASE + \
TOPAZ_HBM_BUF_META_WMAC_RX_TOTAL)
#define TOPAZ_HBM_BUF_META_END (TOPAZ_HBM_BUF_META_WMAC_RX_END + TOPAZ_HBM_BUF_META_SIZE)
#define TOPAZ_HBM_BUF_META_TOTAL (TOPAZ_HBM_BUF_META_END - TOPAZ_HBM_BUF_META_BASE)
#define TOPAZ_HBM_BUF_BASE ROUNDUP(TOPAZ_HBM_BUF_META_END, TOPAZ_HBM_BUF_ALIGN)
#define TOPAZ_HBM_BUF_EMAC_RX_BASE (TOPAZ_HBM_BUF_BASE + TOPAZ_HBM_POOL_GUARD_SIZE)
#define TOPAZ_HBM_BUF_EMAC_RX_BASE_VIRT (RUBY_DRAM_BEGIN + TOPAZ_HBM_BUF_EMAC_RX_BASE)
#define TOPAZ_HBM_BUF_EMAC_RX_END (TOPAZ_HBM_BUF_EMAC_RX_BASE + \
TOPAZ_HBM_BUF_EMAC_RX_TOTAL)
#define TOPAZ_HBM_BUF_WMAC_RX_BASE (TOPAZ_HBM_BUF_EMAC_RX_END + TOPAZ_HBM_POOL_GUARD_SIZE)
#define TOPAZ_HBM_BUF_WMAC_RX_BASE_VIRT (RUBY_DRAM_BEGIN + TOPAZ_HBM_BUF_WMAC_RX_BASE)
#define TOPAZ_HBM_BUF_WMAC_RX_END (TOPAZ_HBM_BUF_WMAC_RX_BASE + \
TOPAZ_HBM_BUF_WMAC_RX_TOTAL)
#define TOPAZ_HBM_BUF_END (TOPAZ_HBM_BUF_WMAC_RX_END + TOPAZ_HBM_POOL_GUARD_SIZE)
#define TOPAZ_FWT_MCAST_ENTRIES 2048
#define TOPAZ_FWT_MCAST_FF_ENTRIES 8 /* one per vap */
#define TOPAZ_FWT_MCAST_IPMAP_ENT_SIZE 64 /* sizeof(struct topaz_fwt_sw_ipmap) */
#define TOPAZ_FWT_MCAST_TQE_ENT_SIZE 20 /* sizeof(struct topaz_fwt_sw_mcast_entry) */
/* Tables are cache-line aligned to ensure proper memory flushing. */
#define TOPAZ_FWT_MCAST_IPMAP_SIZE \
ROUNDUP(TOPAZ_FWT_MCAST_ENTRIES * TOPAZ_FWT_MCAST_IPMAP_ENT_SIZE, \
ARC_DCACHE_LINE_LENGTH)
#define TOPAZ_FWT_MCAST_TQE_SIZE \
ROUNDUP(TOPAZ_FWT_MCAST_ENTRIES * TOPAZ_FWT_MCAST_TQE_ENT_SIZE, \
ARC_DCACHE_LINE_LENGTH)
#define TOPAZ_FWT_MCAST_TQE_FF_SIZE \
ROUNDUP(TOPAZ_FWT_MCAST_FF_ENTRIES * TOPAZ_FWT_MCAST_TQE_ENT_SIZE, \
ARC_DCACHE_LINE_LENGTH)
#define TOPAZ_FWT_MCAST_IPMAP_BASE TOPAZ_HBM_BUF_END
#define TOPAZ_FWT_MCAST_IPMAP_END (TOPAZ_FWT_MCAST_IPMAP_BASE + TOPAZ_FWT_MCAST_IPMAP_SIZE)
#define TOPAZ_FWT_MCAST_TQE_BASE TOPAZ_FWT_MCAST_IPMAP_END
#define TOPAZ_FWT_MCAST_TQE_END (TOPAZ_FWT_MCAST_TQE_BASE + TOPAZ_FWT_MCAST_TQE_SIZE)
#define TOPAZ_FWT_MCAST_TQE_FF_BASE TOPAZ_FWT_MCAST_TQE_END
#define TOPAZ_FWT_MCAST_TQE_FF_END (TOPAZ_FWT_MCAST_TQE_FF_BASE + TOPAZ_FWT_MCAST_TQE_FF_SIZE)
#define TOPAZ_FWT_MCAST_END TOPAZ_FWT_MCAST_TQE_FF_END
/* Offset from DDR beginning, from which memory start to belong to Linux */
#define CONFIG_ARC_KERNEL_MEM_BASE TOPAZ_FWT_MCAST_END
#if TOPAZ_HBM_BUF_EMAC_RX_BASE & (TOPAZ_HBM_BUF_ALIGN - 1)
#error EMAC Buffer start not aligned
#endif
#if TOPAZ_HBM_BUF_WMAC_RX_BASE & (TOPAZ_HBM_BUF_ALIGN - 1)
#error WMAC Buffer start not aligned
#endif
#else
/* Offset from DDR beginning, from which memory start to belong to Linux */
#define CONFIG_ARC_KERNEL_MEM_BASE CONFIG_ARC_MUC_END
#endif
#define CONFIG_ARC_UBOOT_RESERVED_SPACE (8 * 1024)
/* Linux kernel u-boot image start address, for uncompressed images */
#define CONFIG_ARC_KERNEL_BOOT_BASE ROUNDUP(CONFIG_ARC_KERNEL_MEM_BASE, \
CONFIG_ARC_KERNEL_PAGE_SIZE)
/* Linux kernel image start */
#define CONFIG_ARC_KERNEL_BASE (CONFIG_ARC_KERNEL_BOOT_BASE + CONFIG_ARC_UBOOT_RESERVED_SPACE)
#define CONFIG_ARC_KERNEL_MAX_SIZE (RUBY_MAX_DRAM_SIZE - CONFIG_ARC_KERNEL_MEM_BASE)
#define CONFIG_ARC_KERNEL_MIN_SIZE (RUBY_MIN_DRAM_SIZE - CONFIG_ARC_KERNEL_MEM_BASE)
/* AuC tightly coupled memory specification */
#define TOPAZ_AUC_IMEM_ADDR 0xE5000000
#define TOPAZ_AUC_IMEM_SIZE (32 * 1024)
/* BBIC4 RevB AuC DMEM bottom 4KB: 0xE510_0000 to 0xE510_0FFF is aliased with Wmac1 TCM 0xE514_0000
* exclude the bottom 4K from DMEM, and reduce the size from 16KB to 12KB
*/
#define TOPAZ_AUC_DMEM_ADDR 0xE5101000
#define TOPAZ_AUC_DMEM_SIZE (12 * 1024)
#define TOPAZ_REVB_DMEM_SIZE_RESERVED (4 *1024)
/***************/
/* Utility functions */
#ifndef __ASSEMBLY__
#if defined(__CHECKER__)
#define __sram_text
#define __sram_data
#elif defined(__GNUC__)
/*GCC*/
#if defined(CONFIG_ARCH_RUBY_NUMA) && defined(__KERNEL__) && defined(__linux__)
/* Kernel is compiled with -mlong-calls option, so we can make calls between code fragments placed in different memories */
#define __sram_text_sect_name ".sram.text"
#define __sram_data_sect_name ".sram.data"
#if defined(PROFILE_LINUX_EP) || defined(TOPAZ_PLATFORM)
# define __sram_text
# define __sram_data
#else
# define __sram_text __attribute__ ((__section__ (__sram_text_sect_name)))
# define __sram_data __attribute__ ((__section__ (__sram_data_sect_name)))
#endif
#else
#define __sram_text_sect_name ".text"
#define __sram_data_sect_name ".data"
#define __sram_text
#define __sram_data
#endif
#else
#pragma Offwarn(428)
#endif
RUBY_INLINE int is_valid_mem_addr(unsigned long addr)
{
if (__in_mem_range(addr, RUBY_SRAM_BEGIN, RUBY_SRAM_SIZE)) {
return 1;
} else if (__in_mem_range(addr, RUBY_DRAM_BEGIN, RUBY_MAX_DRAM_SIZE)) {
return 1;
}
return 0;
}
#if TOPAZ_MMAP_UNIFIED
RUBY_WEAK(virt_to_nocache) void* virt_to_nocache(const void *addr)
{
unsigned long ret = (unsigned long)addr;
if (__in_mem_range(ret, RUBY_SRAM_BEGIN, RUBY_SRAM_SIZE)) {
ret = ret - RUBY_SRAM_BEGIN + RUBY_SRAM_NOCACHE_BEGIN;
} else if (__in_mem_range(ret, RUBY_DRAM_BEGIN, RUBY_MAX_DRAM_SIZE)) {
ret = ret - RUBY_DRAM_BEGIN + RUBY_DRAM_NOCACHE_BEGIN;
} else if (ret < RUBY_HARDWARE_BEGIN) {
ret = (unsigned long)RUBY_BAD_VIRT_ADDR;
}
return (void*)ret;
}
RUBY_WEAK(nocache_to_virt) void* nocache_to_virt(const void *addr)
{
unsigned long ret = (unsigned long)addr;
if (__in_mem_range(ret, RUBY_SRAM_NOCACHE_BEGIN, RUBY_SRAM_SIZE)) {
ret = ret - RUBY_SRAM_NOCACHE_BEGIN + RUBY_SRAM_BEGIN;
} else if (__in_mem_range(ret, RUBY_DRAM_NOCACHE_BEGIN, RUBY_MAX_DRAM_SIZE)) {
ret = ret - RUBY_DRAM_NOCACHE_BEGIN + RUBY_DRAM_BEGIN;
} else if (ret < RUBY_HARDWARE_BEGIN) {
ret = (unsigned long)RUBY_BAD_VIRT_ADDR;
}
return (void*)ret;
}
#endif
#if RUBY_MUC_TLB_ENABLE
#if TOPAZ_MMAP_UNIFIED
#define muc_to_nocache virt_to_nocache
#define nocache_to_muc nocache_to_virt
#else
RUBY_WEAK(muc_to_nocache) void* muc_to_nocache(const void *addr)
{
unsigned long ret = (unsigned long)addr;
if (__in_mem_range(ret, RUBY_SRAM_NOFLIP_BEGIN, RUBY_SRAM_SIZE)) {
ret = ret - RUBY_SRAM_NOFLIP_BEGIN + RUBY_SRAM_NOFLIP_NOCACHE_BEGIN;
} else if (__in_mem_range(ret, RUBY_DRAM_NOFLIP_BEGIN, RUBY_MAX_DRAM_SIZE)) {
ret = ret - RUBY_DRAM_NOFLIP_BEGIN + RUBY_DRAM_NOFLIP_NOCACHE_BEGIN;
} else if (ret < RUBY_HARDWARE_BEGIN) {
ret = (unsigned long)RUBY_BAD_VIRT_ADDR;
}
return (void*)ret;
}
RUBY_WEAK(nocache_to_muc) void* nocache_to_muc(const void *addr)
{
unsigned long ret = (unsigned long)addr;
if (__in_mem_range(ret, RUBY_SRAM_NOFLIP_NOCACHE_BEGIN, RUBY_SRAM_SIZE)) {
ret = ret - RUBY_SRAM_NOFLIP_NOCACHE_BEGIN + RUBY_SRAM_NOFLIP_BEGIN;
} else if (__in_mem_range(ret, RUBY_DRAM_NOFLIP_NOCACHE_BEGIN, RUBY_MAX_DRAM_SIZE)) {
ret = ret - RUBY_DRAM_NOFLIP_NOCACHE_BEGIN + RUBY_DRAM_NOFLIP_BEGIN;
} else if (ret < RUBY_HARDWARE_BEGIN) {
ret = (unsigned long)RUBY_BAD_VIRT_ADDR;
}
return (void*)ret;
}
#endif
#ifndef MUC_BUILD
RUBY_INLINE unsigned long muc_to_lhost(unsigned long addr)
{
void *tmp = nocache_to_muc((void*)addr);
if (tmp != RUBY_BAD_VIRT_ADDR) {
addr = (unsigned long)tmp;
}
return (unsigned long)bus_to_virt(addr);
}
#endif // #ifndef MUC_BUILD
#else
#define muc_to_nocache(x) ((void*)(x))
#define nocache_to_muc(x) ((void*)(x))
#ifndef MUC_BUILD
#define muc_to_lhost(x) ((unsigned long)bus_to_virt((unsigned long)(x)))
#endif // #ifndef MUC_BUILD
#endif // #if RUBY_MUC_TLB_ENABLE
#ifndef __GNUC__
/*MCC*/
#pragma Popwarn()
#endif
#endif // #ifndef __ASSEMBLY__
/*
* "Write memory barrier" instruction emulation.
* Ruby platform has complex net of connected buses.
* Write transactions are buffered.
* qtn_wmb() guarantees that all issued earlier and pending writes
* to system controller, to SRAM and to DDR are completed
* before qtn_wmb() is finished.
* For complete safety Linux's wmb() should be defined
* through qtn_wmb(), but I afraid it would kill performance.
*/
#ifndef __ASSEMBLY__
#define RUBY_SYS_CTL_SAFE_READ_REGISTER 0xE0000000
#if defined(__GNUC__) && defined(__i386__)
#define qtn_wmb() do {} while(0)
static inline unsigned long _qtn_addr_wmb(unsigned long *addr) { return *addr; }
#define qtn_addr_wmb(addr) _qtn_addr_wmb((unsigned long *)(addr))
#define qtn_pipeline_drain() do {} while(0)
#elif defined(__GNUC__)
/*GCC*/
#if defined(__arc__)
#define qtn_wmb() \
({ \
unsigned long temp; \
__asm__ __volatile__ ( \
"ld.di %0, [%1]\n\t" \
"ld.di %0, [%2]\n\t" \
"ld.di %0, [%3]\n\t" \
"sync\n\t" \
: "=r"(temp) \
: "i"(RUBY_DRAM_BEGIN + CONFIG_ARC_KERNEL_MEM_BASE), "i"(RUBY_SRAM_BEGIN + CONFIG_ARC_KERNEL_SRAM_B1_BASE), "i"(RUBY_SYS_CTL_SAFE_READ_REGISTER) \
: "memory"); \
})
#define qtn_addr_wmb(addr) \
({ \
unsigned long temp; \
__asm__ __volatile__ ( \
"ld.di %0, [%1]\n\t" \
"sync\n\t" \
: "=r"(temp) \
: "r"(addr) \
: "memory"); \
temp; \
})
#define qtn_pipeline_drain() \
({ \
__asm__ __volatile__ ( \
"sync\n\t" \
: : : "memory"); \
})
#else
#define qtn_wmb()
#define qtn_addr_wmb(addr) *((volatile uint32_t*)addr)
#define qtn_pipeline_drain()
#endif
#else
/*MCC*/
#if _ARCVER >= 0x31/*ARC7*/
#define _qtn_pipeline_drain() \
sync
#else
#define _qtn_pipeline_drain() \
nop_s; nop_s; nop_s
#endif
_Asm void qtn_wmb(void)
{
/*r12 is temporary register, so we can use it inside this function freely*/
ld.di %r12, [RUBY_DRAM_BEGIN + CONFIG_ARC_MUC_BASE]
ld.di %r12, [RUBY_SRAM_BEGIN + CONFIG_ARC_MUC_SRAM_B1_BASE]
ld.di %r12, [RUBY_SYS_CTL_SAFE_READ_REGISTER]
_qtn_pipeline_drain()
}
_Asm u_int32_t qtn_addr_wmb(unsigned long addr)
{
%reg addr;
ld.di %r0, [addr]
_qtn_pipeline_drain()
}
_Asm void qtn_pipeline_drain(void)
{
_qtn_pipeline_drain()
}
#endif
#endif
/*
* Problem - writing to first half of cache way trash second half.
* Idea is to lock second half.
* Need make sure that invalidation does not unlock these lines (whole
* cache invalidation unlocks), or need to re-lock lines back.
* Also side effect - half of lines will be cached, half - not.
* So may need to shuffle data to make hot data cacheable.
*/
#define TOPAZ_CACHE_WAR_OFFSET 2048
#ifndef __ASSEMBLY__
#ifdef __GNUC__
RUBY_INLINE void qtn_cache_topaz_war_dcache_lock(unsigned long aux_reg, unsigned long val)
{
unsigned long addr;
unsigned long way_iter;
unsigned long line_iter;
asm volatile (
" sr %4, [%3]\n"
" mov %0, 0xA0000000\n"
" mov %1, 0\n"
"1: add %0, %0, 2048\n"
" mov %2, 0\n"
"2: sr %0, [0x49]\n"
" add %0, %0, 32\n"
" add %2, %2, 1\n"
" cmp %2, 64\n"
" bne 2b\n"
" add %1, %1, 1\n"
" cmp %1, 4\n"
" bne 1b\n"
: "=r"(addr), "=r"(way_iter), "=r"(line_iter)
: "r"(aux_reg), "r"(val)
);
}
#else
_Inline _Asm void qtn_cache_topaz_war_dcache_lock(unsigned long aux_reg, unsigned long val)
{
% reg aux_reg, reg val;
sr val, [aux_reg]
mov %r0, 0xA0000000
mov %r1, 0
1: add %r0, %r0, 2048
mov %r2, 0
2: sr %r0, [0x49]
add %r0, %r0, 32
add %r2, %r2, 1
cmp %r2, 64
bne 2b
add %r1, %r1, 1
cmp %r1, 4
bne 1b
}
#endif // #ifdef __GNUC__
#endif // #ifndef __ASSEMBLY__
#endif // #ifndef __RUBY_MEM_H

View File

@ -0,0 +1,160 @@
/*
* (C) Copyright 2012 Quantenna Communications Inc.
*
* See file CREDITS for list of people who contributed to this
* project.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston,
* MA 02111-1307 USA
*/
#ifndef __RUBY_PM_H
#define __RUBY_PM_H
/* Power save levels */
#define BOARD_PM_LEVEL_FORCE_NO (0)
#define BOARD_PM_LEVEL_NO (1)
#define BOARD_PM_LEVEL_SLOW_DOWN (2)
#define BOARD_PM_LEVEL_LATENCY_UP (3)
#define BOARD_PM_LEVEL_DISTANCE_DOWN (4)
#define BOARD_PM_LEVEL_IDLE (5)
#define BOARD_PM_LEVEL_SUSPEND (6)
#define BOARD_PM_LEVEL_INIT BOARD_PM_LEVEL_FORCE_NO
/* Duty level, shared between Lhost and MuC */
#define BOARD_PM_LEVEL_DUTY BOARD_PM_LEVEL_IDLE
/* Names of power save governors */
#define BOARD_PM_GOVERNOR_WLAN "wlan"
#define BOARD_PM_GOVERNOR_QDISC "qdisc"
#define BOARD_PM_GOVERNOR_QCSAPI "qcsapi"
/* wlan timings to switch between modes */
#define BOARD_PM_WLAN_IDLE_TIMEOUT (120 * HZ)
#define BOARD_PM_WLAN_DEFAULT_TIMEOUT (0)
/* qdisc parameters to switch between modes */
#define BOARD_PM_QDISC_TIMER_TIMEOUT (50/*ms*/ * HZ / 1000)
#define BOARD_PM_QDISC_SPEEDUP_THRESHOLD (400)
#define BOARD_PM_QDISC_SLOWDOWN_THRESHOLD (100)
#define BOARD_PM_QDISC_SLOWDOWN_COUNT (80)
#define BOARD_PM_QDISC_SLOWDOWN_TIMEOUT (3 * HZ)
#define BOARD_PM_QDISC_DEFAULT_TIMEOUT (0)
/* Beacon TSF setting */
#define BOARD_PM_WAKE_BEACON_TSF_DEADLINE_PCT (50)
#define BOARD_PM_WAKE_BEACON_TSF_ALERT_PCT (25)
/* Default setting, shared between Lhost and MuC */
#define BOARD_PM_PDUTY_PERIOD_MS_DEFAULT 80
#define BOARD_PM_PDUTY_PCT_LOW_DEFAULT 80
#define BOARD_PM_SUSPEND_PERIOD_MS_DEFAULT 100
#define BOARD_PM_SUSPEND_PCT_LOW_DEFAULT 99
/* Multiple Periods Support */
#define BOARD_PM_PERIOD_CHANGE_INTERVAL 1
#define BOARD_PM_PERIOD_CNT 3
enum qtn_pm_param {
QTN_PM_CURRENT_LEVEL,
QTN_PM_SUSPEND,
QTN_PM_SUSPEND_PERIOD_MS,
QTN_PM_SUSPEND_PCT_LOW,
QTN_PM_SUSPEND_HRESET,
QTN_PM_SUSPEND_ALLCHAINS_DISABLE,
QTN_PM_PDUTY,
QTN_PM_PDUTY_PERIOD_MS,
QTN_PM_PDUTY_PCT_LOW,
QTN_PM_PDUTY_HRESET,
QTN_PM_PDUTY_RXCHAINS_DISABLE,
QTN_PM_MUC_SLEEP,
QTN_PM_RXCHAIN_IDLE_COUNT,
QTN_PM_RXCHAIN_IDLE_LEVEL,
QTN_PM_TXCHAIN_IDLE_COUNT,
QTN_PM_TXCHAIN_IDLE_LEVEL,
QTN_PM_PAUSE_MGMT_PROBERESP,
QTN_PM_PAUSE_MGMT_ASSOCRESP,
QTN_PM_PAUSE_MGMT_AUTH,
/* For Multiple Periods Support */
QTN_PM_PERIOD_CHANGE_INTERVAL, /* How long period setting will be changed(unit: second) */
QTN_PM_PERIOD_CNT, /* How many periods in period group(Max 3) */
QTN_PM_PERIOD_GROUP, /* Period group(Max 3 periods, each <= 255ms, unit: millisecond)*/
QTN_PM_IOCTL_MAX
};
#define QTN_PM_PARAM_NAMES { \
"level", /* QTN_PM_CURRENT_LEVEL */ \
"suspend_level", /* QTN_PM_SUSPEND */ \
"suspend_period", /* QTN_PM_SUSPEND_PERIOD_MS */ \
"suspend_pct", /* QTN_PM_SUSPEND_PCT_LOW */ \
"suspend_hreset", /* QTN_PM_SUSPEND_HRESET */ \
"suspend_allchains", /* QTN_PM_SUSPEND_ALLCHAINS_DISABLE */ \
"pduty_level", /* QTN_PM_PDUTY */ \
"pduty_period", /* QTN_PM_PDUTY_PERIOD_MS */ \
"pduty_pct", /* QTN_PM_PDUTY_PCT_LOW */ \
"pduty_hreset", /* QTN_PM_PDUTY_HRESET */ \
"pduty_rxchains", /* QTN_PM_PDUTY_RXCHAINS_DISABLE */ \
"muc_sleep_level", /* QTN_PM_MUC_SLEEP */ \
"rxchain_count", /* QTN_PM_RXCHAIN_IDLE_COUNT */ \
"rxchain_level", /* QTN_PM_RXCHAIN_IDLE_LEVEL */ \
"txchain_count", /* QTN_PM_TXCHAIN_IDLE_COUNT */ \
"txchain_level", /* QTN_PM_TXCHAIN_IDLE_LEVEL */ \
"pause_proberesp", /* QTN_PM_PAUSE_MGMT_PROBERESP */ \
"pause_assocresp", /* QTN_PM_PAUSE_MGMT_ASSOCRESP */ \
"pause_auth", /* QTN_PM_PAUSE_MGMT_ASSOCRESP */ \
"period_change_interval", /* QTN_PM_PERIOD_CHANGE_INTERVAL */ \
"period_cnt", /* QTN_PM_PERIOD_CNT */ \
"period_group" /* QTN_PM_PERIOD_GROUP */ \
}
#define QTN_PM_PARAM_DEFAULTS { \
BOARD_PM_LEVEL_INIT, /* QTN_PM_CURRENT_LEVEL */ \
BOARD_PM_LEVEL_SUSPEND, /* QTN_PM_SUSPEND */ \
BOARD_PM_SUSPEND_PERIOD_MS_DEFAULT, /* QTN_PM_SUSPEND_PERIOD_MS */ \
BOARD_PM_SUSPEND_PCT_LOW_DEFAULT, /* QTN_PM_SUSPEND_PCT_LOW */ \
1, /* QTN_PM_SUSPEND_HRESET */ \
1, /* QTN_PM_SUSPEND_ALL_CHAINS_DISABLE */ \
BOARD_PM_LEVEL_DUTY, /* QTN_PM_PDUTY */ \
BOARD_PM_PDUTY_PERIOD_MS_DEFAULT, /* QTN_PM_PDUTY_PERIOD_MS */ \
BOARD_PM_PDUTY_PCT_LOW_DEFAULT, /* QTN_PM_PDUTY_PCT_LOW */ \
0, /* QTN_PM_PDUTY_HRESET */ \
1, /* QTN_PM_PDUTY_RXCHAINS_DISABLE */ \
BOARD_PM_LEVEL_LATENCY_UP, /* QTN_PM_MUC_SLEEP */ \
4, /* QTN_PM_RXCHAIN_IDLE_COUNT */ \
BOARD_PM_LEVEL_DISTANCE_DOWN, /* QTN_PM_RXCHAIN_IDLE_LEVEL */ \
4, /* QTN_PM_TXCHAIN_IDLE_COUNT */ \
BOARD_PM_LEVEL_DISTANCE_DOWN, /* QTN_PM_TXCHAIN_IDLE_LEVEL */ \
60000, /* QTN_PM_PAUSE_MGMT_PROBERESP */ \
5000, /* QTN_PM_PAUSE_MGMT_ASSOCRESP */ \
5000, /* QTN_PM_PAUSE_MGMT_AUTH */ \
BOARD_PM_PERIOD_CHANGE_INTERVAL, /* QTN_PM_PERIOD_CHANGE_INTERVAL */ \
BOARD_PM_PERIOD_CNT, /* QTN_PM_PERIOD_CNT */ \
0x50321E /* QTN_PM_PERIOD_GROUP(30ms, 50ms, 80ms) */ \
}
#define QTN_PM_UNPACK_PARAM(x) ((x) & 0xFF)
#define QTN_PM_UNPACK_VALUE(x) ((x) >> 8)
#define QTN_PM_PACK_PARAM_VALUE(p, v) (((p) & 0xFF) | (((v) << 8) & 0xFFFFFF00))
#endif /* __RUBY_PM_H */

View File

@ -0,0 +1,189 @@
/*
* (C) Copyright 2010 Quantenna Communications Inc.
*
* See file CREDITS for list of people who contributed to this
* project.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston,
* MA 02111-1307 USA
*/
/*
* Header file which describes Topaz platform.
* Has to be used by both kernel and bootloader.
*/
#ifndef __TOPAZ_CONFIG_H
#define __TOPAZ_CONFIG_H
#include "current_platform.h"
#ifdef TOPAZ_PLATFORM
#if !TOPAZ_FPGA_PLATFORM
#undef TOPAZ_ICACHE_WORKAROUND
#endif
#endif
/*
* Control registers move depending on unified + alias bit
*/
#ifdef TOPAZ_PLATFORM
#define TOPAZ_MMAP_UNIFIED 0
#define TOPAZ_MMAP_ALIAS 0
#define TOPAZ_RX_ACCELERATE 1
#else
#define TOPAZ_MMAP_UNIFIED 0
#define TOPAZ_MMAP_ALIAS 0
#define TOPAZ_RX_ACCELERATE 0
#endif
/* If MU-MIMO done in HDP or SDP */
#ifdef TOPAZ_PLATFORM
#define QTN_HDP_MU 1
#else
#define QTN_HDP_MU 0
#endif
#if QTN_HDP_MU
#define QTN_HDP_MU_FCS_WORKROUND 1
#else
#define QTN_HDP_MU_FCS_WORKROUND 0
#endif
#if TOPAZ_MMAP_ALIAS && !TOPAZ_MMAP_UNIFIED
#error Alias map requires unified map
#endif
#if TOPAZ_MMAP_ALIAS
#define TOPAZ_ALIAS_MAP_SWITCH(a, b) (b)
#else
#define TOPAZ_ALIAS_MAP_SWITCH(a, b) (a)
#endif
/* Topaz fixed phy addresses */
#define TOPAZ_FPGAA_PHY0_ADDR 2
#define TOPAZ_FPGAA_PHY1_ADDR 3
#define TOPAZ_FPGAB_PHY0_ADDR 4
#define TOPAZ_FPGAB_PHY1_ADDR 1
#define TOPAZ_PHY0_ADDR 1
#define TOPAZ_PHY1_ADDR 3
#ifndef TOPAZ_FPGA_PLATFORM
#define TOPAZ_FPGA_PLATFORM 0
#endif
#ifndef TOPAZ_VNET_WR_STAGING
#define TOPAZ_VNET_WR_STAGING 0
#endif
#define TOPAZ_VNET_WR_DMA_CHANNELS 2
#define TOPAZ_VNET_WR_STAGING_BUF_COUNT_PER_CHAIN 10
#define TOPAZ_VNET_WR_STAGING_BUF_SIZE 0x600
#if TOPAZ_VNET_WR_STAGING
#define TOPAZ_VNET_WR_STAGING_ALIGN 0x80
#define TOPAZ_VNET_WR_STAGING_GAP TOPAZ_VNET_WR_STAGING_ALIGN
#define TOPAZ_VNET_WR_STAGING_RESERVE ((TOPAZ_VNET_WR_STAGING_BUF_COUNT_PER_CHAIN * \
TOPAZ_VNET_WR_STAGING_BUF_SIZE) + \
TOPAZ_VNET_WR_STAGING_GAP)
#else
#define TOPAZ_VNET_WR_STAGING_RESERVE 0
#endif
#ifdef TOPAZ_PLATFORM
/* Definition indicates that Topaz platform is FPGA */
#if TOPAZ_FPGA_PLATFORM
/* CLK speeds are in MHz and 1/10th the speed of actual ASIC */
#define TOPAZ_SERIAL_BAUD 38400
#define TOPAZ_APB_CLK 12500000
#define TOPAZ_AHB_CLK 25000000
#define TOPAZ_CPU_CLK 50000000
#define RUBY_FPGA_DDR
#else
#define TOPAZ_SERIAL_BAUD 115200
#define TOPAZ_APB_CLK 125000000
#define TOPAZ_AHB_CLK 250000000
#define TOPAZ_CPU_CLK 500000000
#define RUBY_ASIC_DDR
#endif /* #if TOPAZ_FPGA_PLATFORM */
/*
* Setting UPF_SPD_FLAG gives a developer the option to set the
* flag to match a UPF_ define from <linux>/include/linux/serial_core.h
* or set the value to 0 to use the default baud rate setting DEFAULT_BAUD
*/
#define UPF_SPD_FLAG 0
#define DEFAULT_BAUD TOPAZ_SERIAL_BAUD
/*
* Re-use Ruby defines to simplify the number of changes required
* to compile new binaries for Topaz
*/
#define RUBY_SERIAL_BAUD TOPAZ_SERIAL_BAUD
#define RUBY_FIXED_DEV_CLK TOPAZ_APB_CLK
#define RUBY_FIXED_CPU_CLK TOPAZ_CPU_CLK
#ifdef PLATFORM_DEFAULT_BOARD_ID
#define DEFAULT_BOARD_ID PLATFORM_DEFAULT_BOARD_ID
#else
/* Default board id used to match Topaz setting if there is no SPI Flash */
#define DEFAULT_BOARD_ID QTN_TOPAZ_BB_BOARD
#endif /* TOPAZ_DEFAULT_BOARD_ID */
#ifndef PLATFORM_ARC7_MMU_VER
#define PLATFORM_ARC7_MMU_VER 2
#endif
#define CONFIG_RUBY_BROKEN_IPC_IRQS 0
#define RUBY_IPC_HI_IRQ(bit_num) ((bit_num) + 8)
#define RUBY_M2L_IPC_HI_IRQ(bit_num) (bit_num)
#define PLATFORM_REG_SWITCH(reg1, reg2) (reg2)
#define writel_topaz(a, b) writel(a, b)
#define writel_ruby(a, b)
#define QTN_VLAN_LLC_ENCAP 1
#define TOPAZ_128_NODE_MODE 1
#define TOPAZ_ETH_REFLECT_SW_FWD 0
#define DSP_ENABLE_STATS 1
#else
#ifndef PLATFORM_ARC7_MMU_VER
#define PLATFORM_ARC7_MMU_VER 2
#endif
/*
* For BBIC3.
* Workaround for IPC interrupt hw flaw. When receiver dynamically masks/unmasks
* interrupt, the transmitter cannot distinguish whether interrupt was acked or just masked.
*/
#define CONFIG_RUBY_BROKEN_IPC_IRQS 1
#define RUBY_IPC_HI_IRQ(bit_num) ((bit_num) + 16)
#define RUBY_M2L_IPC_HI_IRQ(bit_num) ((bit_num) + 16)
#define PLATFORM_REG_SWITCH(reg1, reg2) (reg1)
#define writel_topaz(a, b)
#define writel_ruby(a, b) writel(a, b)
#define QTN_VLAN_LLC_ENCAP 0
#endif /* #ifdef TOPAZ_PLATFORM */
#endif /* #ifndef __TOPAZ_CONFIG_H */

View File

@ -0,0 +1,69 @@
/*
* Copyright (c) 2015 Quantenna Communications, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* This code is taken from u-boot/include/image.h file
*/
#ifndef UBOOT_HEADER_H
#define UBOOT_HEADER_H
#ifndef __ASSEMBLY__
#define IH_MAGIC 0x27051956 /* Image Magic Number */
#define IH_NMLEN 32 /* Image Name Length */
/*
* Legacy format image header,
* all data in network byte order (aka natural aka bigendian).
*/
typedef struct image_header {
uint32_t ih_magic; /* Image Header Magic Number */
uint32_t ih_hcrc; /* Image Header CRC Checksum */
uint32_t ih_time; /* Image Creation Timestamp */
uint32_t ih_size; /* Image Data Size */
uint32_t ih_load; /* Data Load Address */
uint32_t ih_ep; /* Entry Point Address */
uint32_t ih_dcrc; /* Image Data CRC Checksum */
uint8_t ih_os; /* Operating System */
uint8_t ih_arch; /* CPU architecture */
uint8_t ih_type; /* Image Type */
uint8_t ih_comp; /* Compression Type */
uint8_t ih_name[IH_NMLEN]; /* Image Name */
} image_header_t;
static inline uint32_t image_get_header_size(void)
{
#define MAX_KNOWN_PAGE_SIZE 8192
#define ROUND_UP(N, S) ((((N) + (S) - 1) / (S)) * (S))
return ROUND_UP(sizeof(image_header_t), MAX_KNOWN_PAGE_SIZE);
}
struct early_flash_config {
uint32_t method;
uint32_t ipaddr;
uint32_t serverip;
uint8_t built_time_utc_sec[11];
uint8_t uboot_type;
} __attribute__ ((packed));
#endif /* __ASSEMBLY__ */
#define RUBY_BOOT_METHOD_TRYLOOP 0
#define RUBY_BOOT_METHOD_TFTP 1
#define RUBY_BOOT_METHOD_BOOTP 2
#define RUBY_BOOT_METHOD_MAX 3
#endif /* UBOOT_HEADER_H */

View File

@ -0,0 +1,135 @@
/*-
* Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
* redistribution must be conditioned upon including a substantially
* similar Disclaimer requirement for further binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGES.
*
* $Id: compat.h 2601 2007-07-24 14:14:47Z kelmo $
*/
#ifndef _ATH_COMPAT_H_
#define _ATH_COMPAT_H_
/* Compatibility with older Linux kernels */
#if defined(__KERNEL__) || (defined(__linux__) && __linux__)
#include <linux/types.h>
#endif
#if !defined(__KERNEL__) || !defined (__bitwise)
#define __le16 u_int16_t
#define __le32 u_int32_t
#define __le64 u_int64_t
#define __be16 u_int16_t
#define __be32 u_int32_t
#define __be64 u_int64_t
#define __force
#endif
#ifndef container_of
#define container_of(ptr, type, member) ({ \
const typeof( ((type *)0)->member ) *__mptr = (ptr); \
(type *)( (char *)__mptr - offsetof(type,member) );})
#endif
/*
* BSD/Linux compatibility shims. These are used mainly to
* minimize differences when importing necesary BSD code.
*/
#ifndef NBBY
#define NBBY 8 /* number of bits/byte */
#endif
/* roundup() appears in Linux 2.6.18 */
#ifdef __KERNEL__
#include <linux/kernel.h>
#endif
#ifndef roundup
#define roundup(x, y) ((((x)+((y)-1))/(y))*(y)) /* to any y */
#endif
#ifndef howmany
#define howmany(x, y) (((x)+((y)-1))/(y))
#endif
/* Bit map related macros. */
#define setbit(a,i) ((a)[(i)/NBBY] |= 1<<((i)%NBBY))
#define clrbit(a,i) ((a)[(i)/NBBY] &= ~(1<<((i)%NBBY)))
#define isset(a,i) ((a)[(i)/NBBY] & (1<<((i)%NBBY)))
#define isclr(a,i) (((a)[(i)/NBBY] & (1<<((i)%NBBY))) == 0)
#ifndef __packed
#define __packed __attribute__((__packed__))
#endif
#define __printflike(_a,_b) \
__attribute__ ((__format__ (__printf__, _a, _b)))
#define __offsetof(t,m) offsetof(t,m)
#ifndef ALIGNED_POINTER
/*
* ALIGNED_POINTER is a boolean macro that checks whether an address
* is valid to fetch data elements of type t from on this architecture.
* This does not reflect the optimal alignment, just the possibility
* (within reasonable limits).
*
*/
#define ALIGNED_POINTER(p,t) 1
#endif
#ifdef __KERNEL__
#define KASSERT(exp, msg) do { \
if (unlikely(!(exp))) { \
printk msg; \
BUG(); \
} \
} while (0)
#endif /* __KERNEL__ */
/*
* NetBSD/FreeBSD defines for file version.
*/
#define __FBSDID(_s)
#define __KERNEL_RCSID(_n,_s)
/*
* Fixes for Linux API changes
*/
#ifdef __KERNEL__
#include <linux/version.h>
#define ATH_REGISTER_SYSCTL_TABLE(t) register_sysctl_table(t)
#endif /* __KERNEL__ */
/* FIXME: this needs changing if we need to support TCM/SRAM for time critical code */
#define __tcm_text
#endif /* _ATH_COMPAT_H_ */

View File

@ -0,0 +1,50 @@
#
# Makefile for Octeon platform
#
ifndef OCTEON_ROOT
OCTEON_ROOT := ../..
endif
include $(OCTEON_ROOT)/common.mk
COMMON_DIR := ../common
MAIN_INCLUDES := $(PWD)/../../include
INCLUDES := -I$(COMMON_DIR) -I$(PWD) -I$(MAIN_INCLUDES)
CAVIUM_INCLUDES := -I ${OCTEON_ROOT}/target/include $(INCLUDES)
KERNEL_PATH := ../../../../linux/kernel_2.6/linux
kernel_source := $(KERNEL_PATH)
CROSS_COMPILE := mips64-octeon-linux-gnu-
CC = $(CROSS_COMPILE)gcc
LD = $(CROSS_COMPILE)ld
# Common flags to be passed for driver compilation
EXTRA_CFLAGS += -Winline -Wall -I arch/mips/cavium-octeon/gpl-executive/config ${CAVIUM_INCLUDES}
EXTRA_CFLAGS += -I $(srctree)/arch/mips/cavium-octeon/gpl-executive/config -I ${OCTEON_ROOT}/executive
EXTRA_CFLAGS += -I drivers/net/cavium-ethernet
EXTRA_CFLAGS += -DQDPC_CVM_DMA
EXTRA_CFLAGS += -DQTN_PCIE_USE_LOCAL_BUFFER
#EXTRA_CFLAGS += -DDEBUG
default: all
qdpc-host-objs := qdpc_cvm_dma.o \
qdpc_platform.o \
$(COMMON_DIR)/qdpc_ring.o \
$(COMMON_DIR)/qdpc_net.o \
$(COMMON_DIR)/qdpc_pcie.o \
$(COMMON_DIR)/qdpc_init.o
obj-m := qdpc-host.o
all:
$(MAKE) -C $(kernel_source) SUBDIRS=`pwd` modules;
clean:
rm -rf *.o *~ core .depend .*.cmd *.ko *.mod.c .tmp_versions Module.symvers
rm -rf $(COMMON_DIR)/.*.cmd $(COMMON_DIR)/.tmp_versions
rm -rf Module.markers modules.order $(qdpc-host-objs)

View File

@ -0,0 +1,159 @@
/**
* Copyright (c) 2011-2013 Quantenna Communications, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
**/
#ifdef QDPC_CVM_DMA
#include <asm/octeon/cvmx.h>
#include "cvmx-cmd-queue.h"
#include "cvmx-version.h"
#include "cvmx-atomic.h"
#include "cvmx-pip.h"
#include "cvmx-ipd.h"
#include "cvmx-pow.h"
#include "cvmx-spi.h"
#include "cvmx-bootmem.h"
#include "cvmx-app-init.h"
#include "cvmx-helper.h"
#include "cvmx-helper-board.h"
#include "cvmx-npei-defs.h"
#include "cvmx-pexp-defs.h"
#include <asm/octeon/cvmx-fpa.h>
#include "cvmx-dma-engine.h"
#include "cvmx-helper-fpa.h"
#include "cvmx-pcie.h"
#include "cvmx-pciercx-defs.h"
#include "cvmx-pescx-defs.h"
#include "cvmx-pemx-defs.h"
#include <linux/version.h>
#include "qdpc_platform.h"
#define BYTE_SWAP_MODE_64b 1
#define NUM_PACKET_BUFFERS 1024
#define PRINT_ERROR printk
extern int cvm_oct_mem_fill_fpa(int pool, int size, int elements);
int qdpc_pcie_port = 0;
int qdpc_init_dma_func(void)
{
cvmx_npei_dma_control_t dma_control;
int port = 0, value = 0;
/* Free pool buffers are allocated. These are used by DMA engine. */
cvmx_fpa_enable();
cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE, NUM_PACKET_BUFFERS);
cvm_oct_mem_fill_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE, NUM_PACKET_BUFFERS);
if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL) {
cvm_oct_mem_fill_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128);
}
cvmx_helper_setup_red(NUM_PACKET_BUFFERS/4, NUM_PACKET_BUFFERS/8);
/* Initializing DMA engine */
if(cvmx_dma_engine_initialize()) {
PRINT_ERROR(KERN_ERR " cvmx_dma_engine_initialize failed\n");
return -1;
}
dma_control.u64 = 0;
dma_control.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DMA_CONTROL);
/* Enable endian-swap mode for DMA. */
dma_control.s.o_es = BYTE_SWAP_MODE_64b;
/*
* 1 = use pointer values for address and register values for O_RO, O_ES, and O_NS.
* 0 = use register values for address and pointer values for O_RO, O_ES, and O_NS.
*/
dma_control.s.o_mode = 1;
cvmx_write_csr(CVMX_PEXP_NPEI_DMA_CONTROL, dma_control.u64);
for(port = 0 ; port < 2; port ++) {
value = cvmx_pcie_cfgx_read(port, CVMX_PCIERCX_CFG032(port));
if(value & ( 1<< 29)) {
qdpc_pcie_port = port;
break; /* Assuming only one pcie card is connected to Cavium board */
}
}
return 0;
}
uint32_t cvmx_pcie_cfgx_read(int port, uint32_t cfg_offset)
{
if (octeon_has_feature(OCTEON_FEATURE_NPEI)) {
cvmx_pescx_cfg_rd_t pescx_cfg_rd;
pescx_cfg_rd.u64 = 0;
pescx_cfg_rd.s.addr = cfg_offset;
cvmx_write_csr(CVMX_PESCX_CFG_RD(port), pescx_cfg_rd.u64);
pescx_cfg_rd.u64 = cvmx_read_csr(CVMX_PESCX_CFG_RD(port));
return pescx_cfg_rd.s.data;
}
else {
cvmx_pemx_cfg_rd_t pemx_cfg_rd;
pemx_cfg_rd.u64 = 0;
pemx_cfg_rd.s.addr = cfg_offset;
cvmx_write_csr(CVMX_PEMX_CFG_RD(port), pemx_cfg_rd.u64);
pemx_cfg_rd.u64 = cvmx_read_csr(CVMX_PEMX_CFG_RD(port));
return pemx_cfg_rd.s.data;
}
}
int qdpc_do_dma_transfer(u_int8_t dma_type, u_int64_t local_addr, u_int64_t host_addr, u_int32_t size)
{
cvmx_dma_engine_header_t header;
volatile u_int8_t stat;
int count = 0;
header.u64 = 0;
header.s.type = dma_type;
header.s.lport = qdpc_pcie_port; /* Physical hardware pcie port on which pcie read/write takes place */
/* Initializing status to some value */
stat = 0x65;
header.s.addr = cvmx_ptr_to_phys((void *)&stat); /* DMA completion status */
/* DMA transfer. Dma engine number - 1.(0 - 4).
* One engine is assigned for DMAing user data
* between host and octeon user mgmt app.
*/
if(cvmx_dma_engine_transfer(0 , header, local_addr, host_addr & 0xffffffff, size)) {
PRINT_ERROR("cvmx_dma_engine_transfer: qdpc_do_dma_transfer failed\n");
return -1;
}
while((stat != 0) &&(count < 10000)) { /* Checking DMA completion status */
CVMX_SYNCW;
count ++;
}
if(stat) {
PRINT_ERROR("qdpc_do_dma_transfer failed \n");
return -1;
}
return 0;
}
#endif /* QDPC_CVM_DMA */

View File

@ -0,0 +1,57 @@
/**
* Copyright (c) 2011-2013 Quantenna Communications, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
**/
#include <linux/version.h>
#include <linux/pci.h>
#include "qdpc_platform.h"
int qdpc_platform_init(void)
{
#ifdef QDPC_CVM_DMA
if(qdpc_init_dma_func()) {
printk("Cavium DMA init failed \n");
return -1;
}
#endif /* QDPC_CVM_DMA */
return 0;
}
void qdpc_platform_exit(void)
{
#ifdef QDPC_CVM_DMA
/* Shutting down DMA engine */
cvmx_dma_engine_shutdown();
#endif /* QDPC_CVM_DMA */
return;
}
void qdpc_platform_xfer(void *dst, void *src, unsigned int len)
{
#ifdef QDPC_CVM_DMA
qdpc_do_dma_transfer(CVMX_DMA_ENGINE_TRANSFER_OUTBOUND, cvmx_ptr_to_phys(src),
cvmx_ptr_to_phys(dst), len);
#else /* QDPC_CVM_DMA */
/* Copying skb data into packet buffer */
memcpy_toio(dst, src, len);
#endif
return;
}

View File

@ -0,0 +1,48 @@
/**
* Copyright (c) 2011-2013 Quantenna Communications, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
**/
#ifndef __QDPC_PFDEP_H__
#define __QDPC_PFDEP_H__
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
#define IOREMAP ioremap_wc
#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) */
#define IOREMAP ioremap
#endif
#ifdef QDPC_CVM_DMA
#include <asm/octeon/cvmx.h>
#include "cvmx-dma-engine.h"
int qdpc_init_dma_func(void);
int qdpc_do_dma_transfer(u_int8_t dma_type, u_int64_t local_addr,
u_int64_t host_addr, u_int32_t size);
#endif /* QDPC_CVM_DMA */
#define SRAM_TEXT
#define SRAM_DATA
int qdpc_platform_init(void);
void qdpc_platform_exit(void);
void qdpc_platform_xfer(void *dst, void *src, unsigned int len);
#endif /* __QDPC_PFDEP_H__ */

View File

@ -0,0 +1,547 @@
/**
* Copyright (c) 2011-2013 Quantenna Communications, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
**/
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/version.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/kthread.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <linux/netlink.h>
#include <linux/firmware.h>
#include <linux/module.h>
#include "qdpc_config.h"
#include "qdpc_debug.h"
#include "qdpc_init.h"
#include "qdpc_regs.h"
#include "qdpc_ruby.h"
#include "qdpc_platform.h"
MODULE_AUTHOR("Quantenna");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Virtual ethernet driver for Quantenna Ruby device");
MODULE_VERSION(QDPC_MODULE_VERSION);
#ifdef TOPAZ_PLATFORM
#define QDPC_RUBY_IMG "topaz-linux.lzma.img"
#else
#define QDPC_RUBY_IMG "ruby-linux.lzma.img"
#endif
#define EP_BOOT_FROM_FLASH 1
/* Quantenna PCIE vendor and device identifiers */
static struct pci_device_id qdpc_pcie_ids[] = {
{PCI_DEVICE(QDPC_VENDOR_ID, QDPC_DEVICE_ID),},
{0,}
};
MODULE_DEVICE_TABLE(pci, qdpc_pcie_ids);
static int qdpc_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id);
static void qdpc_pcie_remove(struct pci_dev *pdev);
static int qdpc_boot_thread(void *data);
static void qdpc_nl_recv_msg(struct sk_buff *skb);
int qdpc_init_netdev(struct net_device **net_dev, struct pci_dev *pdev);
char qdpc_pcie_driver_name[] = "qdpc_pcie_device_driver";
static struct pci_driver qdpc_pcie_driver = {
.name = qdpc_pcie_driver_name,
.id_table = qdpc_pcie_ids,
.probe = qdpc_pcie_probe,
.remove = qdpc_pcie_remove,
};
static struct net_device *qdpc_net_dev_g = NULL;
struct sock *qdpc_nl_sk = NULL;
int qdpc_clntPid = 0;
unsigned int (*qdpc_pci_readl)(void *addr) = qdpc_readl;
void (*qdpc_pci_writel)(unsigned int val, void *addr) = qdpc_writel;
static int qdpc_bootpoll(struct qdpc_priv *p, uint32_t state)
{
while (!kthread_should_stop() && (qdpc_isbootstate(p,state) == 0)) {
if (qdpc_booterror(p))
return -1;
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(QDPC_SCHED_TIMEOUT);
}
return 0;
}
static void booterror(qdpc_pcie_bda_t *bda)
{
if (PCIE_BDA_TARGET_FWLOAD_ERR & qdpc_pci_readl(&bda->bda_flags))
printk("EP boot from download firmware failed!\n");
else if (PCIE_BDA_TARGET_FBOOT_ERR & qdpc_pci_readl(&bda->bda_flags))
printk("EP boot from flash failed! Please check if there is usable image in Target flash.\n");
else
printk("EP boot get in error, dba flag: 0x%x\n", qdpc_pci_readl(&bda->bda_flags));
}
static void qdpc_pci_endian_detect(struct qdpc_priv *priv)
{
__iomem qdpc_pcie_bda_t *bda = priv->bda;
volatile uint32_t pci_endian;
writel(QDPC_PCI_ENDIAN_DETECT_DATA, &bda->bda_pci_endian);
mmiowb();
writel(QDPC_PCI_ENDIAN_VALID_STATUS, &bda->bda_pci_pre_status);
while (readl(&bda->bda_pci_post_status) != QDPC_PCI_ENDIAN_VALID_STATUS) {
if (kthread_should_stop())
break;
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(QDPC_SCHED_TIMEOUT);
}
pci_endian = readl(&bda->bda_pci_endian);
if (pci_endian == QDPC_PCI_LITTLE_ENDIAN) {
qdpc_pci_readl = qdpc_readl;
qdpc_pci_writel = qdpc_writel;
printk("PCI memory is little endian\n");
} else if (pci_endian == QDPC_PCI_BIG_ENDIAN) {
qdpc_pci_readl = qdpc_le32_readl;
qdpc_pci_writel = qdpc_le32_writel;
printk("PCI memory is big endian\n");
} else {
qdpc_pci_readl = qdpc_readl;
qdpc_pci_writel = qdpc_writel;
printk("PCI memory endian value:%08x is invalid - using little endian\n", pci_endian);
}
/* Clear endian flags */
writel(0, &bda->bda_pci_pre_status);
writel(0, &bda->bda_pci_post_status);
writel(0, &bda->bda_pci_endian);
}
static int qdpc_firmware_load(struct pci_dev *pdev, struct qdpc_priv *priv)
{
#define DMABLOCKSIZE (64*1024)
#define NBLOCKS(size) ((size)/(DMABLOCKSIZE) + (((size)%(DMABLOCKSIZE) > 0) ? 1 : 0))
int result = SUCCESS;
const struct firmware *fw;
__iomem qdpc_pcie_bda_t *bda = priv->bda;
/* Request compressed firmware from user space */
if ((result = request_firmware(&fw, QDPC_RUBY_IMG, &pdev->dev)) == -ENOENT) {
/*
* No firmware found in the firmware directory, skip firmware downloading process
* boot from flash directly on target
*/
printk( "no firmware found skip fw downloading\n");
qdpc_pcie_posted_write((PCIE_BDA_HOST_NOFW_ERR |
qdpc_pci_readl(&bda->bda_flags)), &bda->bda_flags);
return FAILURE;
} else if (result == SUCCESS) {
uint32_t nblocks = NBLOCKS(fw->size);
uint32_t remaining = fw->size;
uint32_t count;
uint32_t dma_offset = qdpc_pci_readl(&bda->bda_dma_offset);
void *data = kmalloc(DMABLOCKSIZE, GFP_KERNEL|GFP_DMA);
const uint8_t *curdata = fw->data;
dma_addr_t handle = 0;
if (!data) {
printk(KERN_ERR "Allocation failed for memory size[%u] Download firmware failed!\n", DMABLOCKSIZE);
release_firmware(fw);
qdpc_pcie_posted_write((PCIE_BDA_HOST_MEMALLOC_ERR |
qdpc_pci_readl(&bda->bda_flags)), &bda->bda_flags);
return FAILURE;
}
handle = pci_map_single(priv->pdev, data ,DMABLOCKSIZE, PCI_DMA_TODEVICE);
if (!handle) {
printk("Pci map for memory data block 0x%p error, Download firmware failed!\n", data);
kfree(data);
release_firmware(fw);
qdpc_pcie_posted_write((PCIE_BDA_HOST_MEMMAP_ERR |
qdpc_pci_readl(&bda->bda_flags)), &bda->bda_flags);
return FAILURE;
}
qdpc_setbootstate(priv, QDPC_BDA_FW_HOST_LOAD);
qdpc_bootpoll(priv, QDPC_BDA_FW_EP_RDY);
/* Start loading firmware */
for (count = 0 ; count < nblocks; count++)
{
uint32_t size = (remaining > DMABLOCKSIZE) ? DMABLOCKSIZE : remaining;
memcpy(data, curdata, size);
/* flush dcache */
pci_dma_sync_single_for_device(priv->pdev, handle ,size, PCI_DMA_TODEVICE);
qdpc_pcie_posted_write(handle + dma_offset, &bda->bda_img);
qdpc_pcie_posted_write(size, &bda->bda_img_size);
printk("FW Data[%u]: VA:0x%p PA:0x%p Sz=%u..\n", count, (void *)curdata, (void *)handle, size);
qdpc_setbootstate(priv, QDPC_BDA_FW_BLOCK_RDY);
qdpc_bootpoll(priv, QDPC_BDA_FW_BLOCK_DONE);
remaining = (remaining < size) ? remaining : (remaining - size);
curdata += size;
printk("done!\n");
}
pci_unmap_single(priv->pdev,handle, DMABLOCKSIZE, PCI_DMA_TODEVICE);
/* Mark end of block */
qdpc_pcie_posted_write(0, &bda->bda_img);
qdpc_pcie_posted_write(0, &bda->bda_img_size);
qdpc_setbootstate(priv, QDPC_BDA_FW_BLOCK_RDY);
qdpc_bootpoll(priv, QDPC_BDA_FW_BLOCK_DONE);
qdpc_setbootstate(priv, QDPC_BDA_FW_BLOCK_END);
PRINT_INFO("Image. Sz:%u State:0x%x\n", (uint32_t)fw->size, qdpc_pci_readl(&bda->bda_bootstate));
qdpc_bootpoll(priv, QDPC_BDA_FW_LOAD_DONE);
kfree(data);
release_firmware(fw);
PRINT_INFO("Image downloaded....!\n");
} else {
PRINT_ERROR("Failed to load firmware:%d\n", result);
return result;
}
return result;
}
static int qdpc_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct qdpc_priv *priv = NULL;
struct net_device *ndev = NULL;
int result = SUCCESS;
/* Net device initialization */
if ((result = qdpc_init_netdev(&ndev, pdev)) < 0) {
return result;
}
qdpc_net_dev_g = ndev;
priv = netdev_priv(ndev);
/* Check if the device has PCI express capability */
if (!pci_find_capability(pdev, PCI_CAP_ID_EXP)) {
PRINT_ERROR(KERN_ERR "The device %x does not have PCI Express capability\n",
pdev->device);
result = -ENOSYS;
goto out;
} else {
PRINT_DBG(KERN_INFO "The device %x has PCI Express capability\n", pdev->device);
}
/* Wake up the device if it is in suspended state and allocate IO,
* memory regions and IRQ if not
*/
if (pci_enable_device(pdev)) {
PRINT_ERROR(KERN_ERR "Failed to initialize PCI device with device ID %x\n",
pdev->device);
result = -EIO;
goto out;
} else {
PRINT_DBG(KERN_INFO "Initialized PCI device with device ID %x\n", pdev->device);
}
/*
* Check if the PCI device can support DMA addressing properly.
* The mask gives the bits that the device can address
*/
pci_set_master(pdev);
/* Initialize PCIE layer */
if (( result = qdpc_pcie_init_intr_and_mem(ndev)) < 0) {
PRINT_DBG("Interrupt & Memory Initialization failed \n");
goto release_memory;
}
/* Create and start the thread to initiate the INIT Handshake*/
priv->init_thread = kthread_run(qdpc_boot_thread, priv, "qdpc_init_thread");
if (priv->init_thread == NULL) {
PRINT_ERROR("Init thread creation failed \n");
goto free_mem_interrupt;
}
/* Register net device with the kernel */
if ((result = register_netdev(ndev))) {
PRINT_DBG("veth: error %d registering net device \"%s\"\n",
result, ndev->name);
goto stop_init_kthread;
}
/* Create netlink & register with kernel */
priv->netlink_socket = netlink_kernel_create(&init_net, QDPC_NETLINK_RPC_PCI, 0, qdpc_nl_recv_msg,
NULL, THIS_MODULE);
if (priv->netlink_socket) {
return SUCCESS;
}
PRINT_ERROR(KERN_ALERT "Error creating netlink socket.\n");
result = FAILURE;
/* If any failure is happened. */
stop_init_kthread:
if (priv->init_thread)
kthread_stop(priv->init_thread);
free_mem_interrupt:
qdpc_pcie_free_mem(pdev);
qdpc_free_interrupt(pdev);
release_memory:
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)
/* Releasing the memory region if any error occured */
pci_clear_master(pdev);
#endif
pci_disable_device(pdev);
out:
free_netdev(ndev);
/* Any failure in probe, so it can directly return in remove */
pci_set_drvdata(pdev, NULL);
return result;
}
static void qdpc_pcie_remove(struct pci_dev *pdev)
{
struct net_device *ndev = pci_get_drvdata(pdev);
struct qdpc_priv *priv = NULL;
/* Any failure in probe */
if (ndev == NULL) {
return;
}
priv = netdev_priv(ndev);
/* Net device cleanup */
unregister_netdev(ndev);
qdpc_net_dev_g = NULL;
/* Stoping kthread */
if (priv->init_thread)
kthread_stop(priv->init_thread);
qdpc_datapath_uninit(priv);
qdpc_unmap_iomem(priv);
if (priv->netlink_socket) {
/* release netlink socket */
netlink_kernel_release(priv->netlink_socket);
}
/* Free allocated memory */
qdpc_pcie_free_mem(pdev);
/* Free interrupt line. */
qdpc_free_interrupt(pdev);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)
pci_clear_master(pdev);
#endif
/* Disable the device */
pci_disable_device(pdev);
/* Free netdevice */
free_netdev(ndev);
return;
}
static int __init qdpc_init_module(void)
{
int ret;
PRINT_DBG(KERN_INFO "Quantenna pcie driver initialization\n");
if (qdpc_platform_init()) {
PRINT_ERROR("Platform initilization failed \n");
ret = FAILURE;
return ret;
}
/* Register the pci driver with device*/
if ((ret = pci_register_driver(&qdpc_pcie_driver)) < 0 ) {
PRINT_ERROR("Could not register the driver to pci : %d\n", ret);
ret = -ENODEV;
return ret;
}
return ret;
}
static void __exit qdpc_exit_module(void)
{
/* Release netlink */
qdpc_platform_exit();
/* Unregister the pci driver with the device */
pci_unregister_driver(&qdpc_pcie_driver);
return;
}
static int qdpc_boot_thread(void *data)
{
struct qdpc_priv *priv = (struct qdpc_priv *)data;
__iomem qdpc_pcie_bda_t *bda = priv->bda;
struct net_device *ndev;
unsigned char macaddr[ETH_ALEN] = {0};
uint32_t tmp;
qdpc_pci_endian_detect(priv);
printk("Setting HOST ready...\n");
qdpc_setbootstate(priv, QDPC_BDA_FW_HOST_RDY);
qdpc_bootpoll(priv, QDPC_BDA_FW_TARGET_RDY);
if (qdpc_set_dma_mask(priv)){
printk("Failed to map DMA mask.\n");
priv->init_thread = NULL;
do_exit(-1);
}
if ((PCIE_BDA_FLASH_PRESENT & qdpc_pci_readl(&bda->bda_flags)) && EP_BOOT_FROM_FLASH) {
printk("EP have fw in flash, boot from flash\n");
qdpc_pcie_posted_write((PCIE_BDA_FLASH_BOOT |
qdpc_pci_readl(&bda->bda_flags)), &bda->bda_flags);
qdpc_setbootstate(priv, QDPC_BDA_FW_TARGET_BOOT);
qdpc_bootpoll(priv, QDPC_BDA_FW_FLASH_BOOT);
goto fw_start;
}
qdpc_setbootstate(priv, QDPC_BDA_FW_TARGET_BOOT);
printk("EP FW load request...\n");
qdpc_bootpoll(priv, QDPC_BDA_FW_LOAD_RDY);
printk("Start Firmware download...\n");
if (qdpc_firmware_load(priv->pdev, priv)){
printk("Failed to download firmware.\n");
priv->init_thread = NULL;
do_exit(-1);
}
fw_start:
qdpc_setbootstate(priv, QDPC_BDA_FW_START);
printk("Start booting EP...\n");
if (qdpc_bootpoll(priv,QDPC_BDA_FW_CONFIG)) {
booterror(bda);
priv->init_thread = NULL;
do_exit(-1);
}
printk("EP boot successful, starting config...\n");
if (qdpc_dma_setup(priv)) {
printk("Failed to initialize DMA.\n");
priv->init_thread = NULL;
do_exit(-1);
}
qdpc_setbootstate(priv, QDPC_BDA_FW_RUN);
qdpc_bootpoll(priv,QDPC_BDA_FW_RUNNING);
ndev = priv->ndev;
/*
* Get MAC address from boot data area.
* Two fields (bda_pci_pre_status and bda_pci_endian) are overloaded for
* this purpose in order to avoid updating the bootloader.
*/
tmp = qdpc_pci_readl(&bda->bda_pci_pre_status);
macaddr[0] = tmp & 0xFF;
macaddr[1] = (tmp >> 8) & 0xFF;
macaddr[2] = (tmp >> 16) & 0xFF;
macaddr[3] = (tmp >> 24) & 0xFF;
tmp = qdpc_pci_readl(&bda->bda_pci_endian);
macaddr[4] = tmp & 0xFF;
macaddr[5] = (tmp >> 8) & 0xFF;
memcpy(ndev->dev_addr, macaddr, ETH_ALEN);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)
ndev->netdev_ops->ndo_open(ndev);
#else
ndev->open(ndev);
#endif
PRINT_INFO("Connection established with Target BBIC3 board\n");
tasklet_enable(&priv->rx_tasklet);
tasklet_enable(&priv->txd_tasklet);
priv->init_thread = NULL;
do_exit(0);
}
static void qdpc_nl_recv_msg(struct sk_buff *skb)
{
struct qdpc_priv *priv = netdev_priv(qdpc_net_dev_g);
struct nlmsghdr *nlh = (struct nlmsghdr*)skb->data;
struct sk_buff *skb2;
/* Parsing the netlink message */
PRINT_DBG(KERN_INFO "%s line %d Netlink received pid:%d, size:%d, type:%d\n",
__FUNCTION__, __LINE__, nlh->nlmsg_pid, nlh->nlmsg_len, nlh->nlmsg_type);
switch (nlh->nlmsg_type) {
case QDPC_NETLINK_TYPE_CLNT_REGISTER:
if (nlh->nlmsg_len) {
PRINT_DBG(KERN_INFO "%s line %d Netlink received client register size:%d\n",
__FUNCTION__, __LINE__, nlh->nlmsg_len);
}
priv->netlink_pid = nlh->nlmsg_pid; /*pid of sending process */;
return;
case QDPC_NETLINK_TYPE_CLNT_REQUEST:
break;
default:
PRINT_DBG(KERN_INFO "%s line %d Netlink Invalid type %d\n",
__FUNCTION__, __LINE__, nlh->nlmsg_type);
return;
}
/*
* make a new skb. The old skb will freed in netlink_unicast_kernel,
* but we must hold the skb before DMA transfer done
*/
skb2 = alloc_skb(nlh->nlmsg_len+sizeof(qdpc_cmd_hdr_t), GFP_ATOMIC);
if (skb2) {
qdpc_cmd_hdr_t *cmd_hdr;
cmd_hdr = (qdpc_cmd_hdr_t *)skb2->data;
memcpy(cmd_hdr->dst_magic, QDPC_NETLINK_DST_MAGIC, ETH_ALEN);
memcpy(cmd_hdr->src_magic, QDPC_NETLINK_SRC_MAGIC, ETH_ALEN);
cmd_hdr->type = htons(QDPC_APP_NETLINK_TYPE);
cmd_hdr->len = htons(nlh->nlmsg_len);
memcpy(skb2->data+sizeof(qdpc_cmd_hdr_t), skb->data+sizeof(struct nlmsghdr), nlh->nlmsg_len);
skb_put(skb2, nlh->nlmsg_len+sizeof(qdpc_cmd_hdr_t));
qdpc_send_packet(skb2, qdpc_net_dev_g);
}
}
module_init(qdpc_init_module);
module_exit(qdpc_exit_module);

View File

@ -0,0 +1,168 @@
/**
* Copyright (c) 2011-2013 Quantenna Communications, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
**/
#ifndef __QDPC_INIT_H_
#define __QDPC_INIT_H_
#include <asm/io.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
#define QDPC_MODULE_NAME "qdpc_ruby"
#define QDPC_DEV_NAME "qdpc_ruby"
#define QDPC_MODULE_VERSION "1.0"
/*
* Netlink Message types.
*/
#define QDPC_NETLINK_RPC_PCI 31
#define QDPC_NETLINK_TYPE_CLNT_REGISTER 10
#define QDPC_NETLINK_TYPE_CLNT_REQUEST 11
/* PCIe device information declarations */
#define QDPC_VENDOR_ID 0x1bb5
#define QDPC_DEVICE_ID 0x0008
#define QDPC_PCIE_NUM_BARS 6
typedef struct qdpc_bar {
void *b_vaddr; /* PCIe bar virtual address */
dma_addr_t b_busaddr; /* PCIe bar physical address */
size_t b_len; /* Bar resource length */
uint32_t b_offset; /* Offset from start of map */
uint8_t b_index; /* Bar Index */
} qdpc_bar_t;
#define QDPC_BAR_VADDR(bar, off) ((bar).b_vaddr +(off))
extern unsigned int (*qdpc_pci_readl)(void *addr);
extern void (*qdpc_pci_writel)(unsigned int val, void *addr);
/* Driver private housekeeping data structures. */
struct qdpc_priv {
struct pci_dev *pdev; /* Points Pci device */
struct net_device *ndev; /* points net device */
struct net_device_stats stats; /* Network statistics */
int irq; /* Interupt line */
struct tasklet_struct rx_tasklet; /* Tasklet scheduled in interrupt handler */
struct tasklet_struct txd_tasklet; /* Transmit Done Tasklet scheduled in interrupt handler */
struct task_struct *init_thread; /* Initialization thread */
struct timer_list txq_enable_timer; /* timer for enable tx */
spinlock_t lock; /* Private structure lock */
enum qdpc_drv_state init_done; /* State of driver */
int msi_enabled; /* Supports msi or not */
qdpc_pktring_t pktq;
qdpc_dmadata_t *dsdata; /* Downstream data */
qdpc_dmadata_t *usdata; /* Upstream data */
struct sock *netlink_socket;
uint32_t netlink_pid;
qdpc_bar_t sysctl_bar;
qdpc_bar_t epmem_bar;
qdpc_epshmem_hdr_t epmem;
/* io memory pointers */
__iomem qdpc_pcie_bda_t *bda;
__iomem uint32_t *ep2host_irqstatus; /* IRQ Cause from EP */
__iomem uint32_t *host2ep_irqstatus; /* IRQ Cause from HOST */
__iomem int32_t *host_ep2h_txd_budget;
__iomem int32_t *host_h2ep_txd_budget;
};
/*
* End-point(EP) is little-Endian.
* These two macros are used for host side outbound window memory access.
* Outbound here is host side view-point. So memory accessed by these two macros
* should be on EP side.
* NOTE: On some platforms, outbound hardware swap(byte order swap) should be
* enabled for outbound memory access correctly. If enabled, Endian translation
* will be done by hardware, and software Endian translation should be disabled.
* */
#ifdef OUTBOUND_HW_SWAP
#define le32_readl(x) readl(x)
#define le32_writel(x, addr) writel(x, addr)
#else
#define le32_readl(x) le32_to_cpu(readl((x)))
#define le32_writel(x, addr) writel(cpu_to_le32((x)), addr)
#endif
static inline unsigned int qdpc_readl(void *addr)
{
return readl(addr);
}
static inline void qdpc_writel(unsigned int val, void *addr)
{
writel(val, addr);
}
static inline unsigned int qdpc_le32_readl(void *addr)
{
return le32_to_cpu(readl((addr)));
}
static inline void qdpc_le32_writel(unsigned int val, void *addr)
{
writel(cpu_to_le32((val)), addr);
}
static inline void qdpc_pcie_posted_write(uint32_t val, __iomem void *basereg)
{
qdpc_pci_writel(val,basereg);
/* flush posted write */
qdpc_pci_readl(basereg);
}
static inline int qdpc_isbootstate(struct qdpc_priv *p, uint32_t state) {
__iomem uint32_t *status = &p->bda->bda_bootstate;
uint32_t s = qdpc_pci_readl(status);
return (s == state);
}
static inline int qdpc_booterror(struct qdpc_priv *p) {
__iomem uint32_t *status = &p->bda->bda_flags;
uint32_t s = qdpc_pci_readl(status);
return (s & PCIE_BDA_ERROR_MASK);
}
static inline void qdpc_setbootstate(struct qdpc_priv *p, uint32_t state) {
__iomem qdpc_pcie_bda_t *bda = p->bda;
qdpc_pcie_posted_write(state, &bda->bda_bootstate);
}
/* Function prototypes */
int qdpc_pcie_init_intr_and_mem(struct net_device *ndev);
int qdpc_dma_setup(struct qdpc_priv *priv);
void qdpc_interrupt_target(struct qdpc_priv *priv, uint32_t intr);
void qdpc_disable_irqs(struct qdpc_priv *priv);
void qdpc_enable_irqs(struct qdpc_priv *priv);
void qdpc_free_interrupt(struct pci_dev *pdev);
void qdpc_pcie_free_mem(struct pci_dev *pdev);
void qdpc_datapath_init(struct qdpc_priv *priv);
void qdpc_init_target_buffers(void *data);
void qdpc_veth_rx(struct net_device *ndev);
void qdpc_map_image_loc(struct qdpc_priv *priv);
uint32_t qdpc_host_to_ep_address(struct qdpc_priv *priv, void *addr);
void qdpc_veth_txdone(struct net_device *ndev);
int qdpc_send_packet(struct sk_buff *skb, struct net_device *ndev);
void *qdpc_map_pciemem(unsigned long busaddr, size_t len);
void qdpc_unmap_pciemem(unsigned long busaddr, void *vaddr, size_t len);
int qdpc_dmainit_rxq(struct qdpc_priv *priv);
int qdpc_dmainit_txq(struct qdpc_priv *priv);
int qdpc_unmap_iomem(struct qdpc_priv *priv);
int32_t qdpc_set_dma_mask(struct qdpc_priv *priv);
struct sk_buff *qdpc_get_skb(struct qdpc_priv *priv, size_t len);
void qdpc_datapath_uninit(struct qdpc_priv *priv);
#endif

View File

@ -0,0 +1,771 @@
/**
* Copyright (c) 2011-2013 Quantenna Communications, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
**/
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/version.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/skbuff.h>
#include <linux/kthread.h>
#include <linux/inetdevice.h>
#include <linux/ip.h>
#include <net/netlink.h>
#include "qdpc_config.h"
#include "qdpc_debug.h"
#include "qdpc_init.h"
#include "qdpc_platform.h"
#ifdef CONFIG_RUBY_PCIE_HOST
#include <qtn/skb_recycle.h>
#include <qtn/qtn_global.h>
#endif
#define MAX(X,Y) ((X) > (Y) ? X : Y)
#define MIN(X,Y) ((X) < (Y) ? X : Y)
/* Net device function prototypes */
int qdpc_veth_open(struct net_device *ndev);
int qdpc_veth_release(struct net_device *ndev);
struct net_device_stats *qdpc_veth_stats(struct net_device *ndev);
int qdpc_veth_change_mtu(struct net_device *ndev, int new_mtu);
int qdpc_veth_set_mac_addr(struct net_device *ndev, void *paddr);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)
netdev_tx_t qdpc_veth_tx(struct sk_buff *skb, struct net_device *ndev);
#else
int qdpc_veth_tx(struct sk_buff *skb, struct net_device *ndev);
#endif
/* Net device operations structure */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)
static struct net_device_ops veth_ops = {
.ndo_open = qdpc_veth_open,
.ndo_stop = qdpc_veth_release,
.ndo_get_stats = qdpc_veth_stats,
.ndo_set_mac_address = qdpc_veth_set_mac_addr,
.ndo_change_mtu = qdpc_veth_change_mtu,
.ndo_start_xmit = qdpc_veth_tx,
};
#endif
#define QDPC_NET_STATS_LEN 10
#define QDPC_VETH_STATS_LEN ARRAY_SIZE(qdpc_veth_gstrings_stats)
/* Ethtool related definitions */
static const char qdpc_veth_gstrings_stats[][ETH_GSTRING_LEN] = {
"rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
"tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
};
static void qdpc_veth_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *info);
static void qdpc_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data);
static void qdpc_veth_get_strings(struct net_device *netdev,
u32 stringset, u8 *data);
static int qdpc_veth_get_sset_count(struct net_device *netdev, int sset);
static struct ethtool_ops qdpc_veth_ethtool_ops = {
.get_drvinfo = qdpc_veth_get_drvinfo,
.get_strings = qdpc_veth_get_strings,
.get_sset_count = qdpc_veth_get_sset_count,
.get_ethtool_stats = qdpc_get_ethtool_stats,
};
/* Driver Name */
extern char qdpc_pcie_driver_name[];
static inline bool check_netlink_magic(qdpc_cmd_hdr_t *cmd_hdr)
{
return ((memcmp(cmd_hdr->dst_magic, QDPC_NETLINK_DST_MAGIC, ETH_ALEN) == 0)
&& (memcmp(cmd_hdr->src_magic, QDPC_NETLINK_SRC_MAGIC, ETH_ALEN) == 0));
}
int qdpc_veth_open(struct net_device *ndev)
{
struct qdpc_priv *priv = netdev_priv(ndev);
if (!qdpc_isbootstate(priv, QDPC_BDA_FW_RUNNING))
return -ENODEV;
flush_scheduled_work();
netif_start_queue(ndev);
return SUCCESS;
}
int qdpc_veth_release(struct net_device *ndev)
{
/* release ports, irq and such -- like fops->close */
netif_stop_queue(ndev); /* can't transmit any more */
return SUCCESS;
}
SRAM_TEXT struct sk_buff *qdpc_get_skb(struct qdpc_priv *priv, size_t len)
{
const uint32_t align = dma_get_cache_alignment();
struct sk_buff *skb = NULL;
uint32_t off;
#if (defined(CONFIG_RUBY_PCIE_HOST) && defined(QDPC_USE_SKB_RECYCLE))
uint32_t size;
struct qtn_skb_recycle_list *recycle_list = qtn_get_shared_recycle_list();
if (recycle_list) {
skb = qtn_skb_recycle_list_pop(recycle_list, &recycle_list->stats_pcie);
}
if (!skb) {
size = qtn_rx_buf_size();
if (len > size)
size = len;
skb = dev_alloc_skb(size + align);
}
if (skb)
skb->recycle_list = recycle_list;
#else
skb = dev_alloc_skb(len + align);
#endif
if (skb) {
off = ((uint32_t)((unsigned long)skb->data)) & (align - 1);
/* skb->data should be cache aligned - do calculation here to be sure. */
if (off) {
skb_reserve(skb, align - off);
}
}
return skb;
}
static inline SRAM_TEXT void qdpc_tx_skb_recycle(struct sk_buff *skb)
{
#if (defined(CONFIG_RUBY_PCIE_HOST) && defined(QDPC_USE_SKB_RECYCLE))
struct qtn_skb_recycle_list *recycle_list = qtn_get_shared_recycle_list();
if (qtn_skb_recycle_list_push(recycle_list,
&recycle_list->stats_pcie, skb))
return;
#endif
dev_kfree_skb_any(skb);
}
void qdpc_netlink_rx(struct net_device *ndev, void *buf, size_t len)
{
struct qdpc_priv *priv = netdev_priv(ndev);
struct sk_buff *skb = nlmsg_new(len, 0);
struct nlmsghdr *nlh;
if (skb == NULL) {
DBGPRINTF(DBG_LL_WARNING, QDRV_LF_WARN, "WARNING: out of netlink SKBs\n");
return;
}
nlh = nlmsg_put(skb, 0, 0, NLMSG_DONE, len, 0); ;
memcpy(nlmsg_data(nlh), buf, len);
NETLINK_CB(skb).dst_group = 0;
/* Send the netlink message to user application */
nlmsg_unicast(priv->netlink_socket, skb, priv->netlink_pid);
}
static inline SRAM_TEXT int qdpc_map_buffer(struct qdpc_priv *priv, qdpc_desc_t *desc, int mapdir)
{
desc->dd_paddr = pci_map_single(priv->pdev, desc->dd_vaddr, desc->dd_size, mapdir);
return 0;
}
static inline SRAM_TEXT int qdpc_unmap_buffer(struct qdpc_priv *priv, qdpc_desc_t *desc, uint32_t mapdir)
{
pci_unmap_single(priv->pdev, desc->dd_paddr, desc->dd_size, mapdir);
desc->dd_paddr = 0;
return 0;
}
static SRAM_TEXT size_t qdpc_rx_process_frame(struct net_device *ndev,
qdpc_desc_t *rx_desc, uint32_t dma_status, bool lastdesc)
{
struct qdpc_priv *priv = netdev_priv(ndev);
qdpc_dmadesc_t *rx_hwdesc = rx_desc->dd_hwdesc;
struct sk_buff *skb = NULL;
const uint32_t buffer_size = QDPC_DMA_MAXBUF;
uint32_t dma_data = 0;
uint32_t dma_control = 0;
size_t dmalen;
#ifdef QTN_PCIE_USE_LOCAL_BUFFER
void *data;
#endif
struct ethhdr *eth;
qdpc_cmd_hdr_t *cmd_hdr;
dmalen = QDPC_DMA_RXLEN(dma_status);
#ifdef QTN_PCIE_USE_LOCAL_BUFFER
data = rx_desc->dd_metadata;
if ((dmalen >= QDPC_DMA_MINBUF) && (dmalen <= QDPC_DMA_MAXBUF)
&& (QDPC_DMA_SINGLE_BUFFER(dma_status))){
eth = (struct ethhdr *)rx_desc->dd_metadata;
switch (ntohs(eth->h_proto)) {
case QDPC_APP_NETLINK_TYPE:
/* Double Check if it's netlink packet*/
cmd_hdr = (qdpc_cmd_hdr_t *)rx_desc->dd_metadata;
if (check_netlink_magic(cmd_hdr)) {
qdpc_netlink_rx(ndev, rx_desc->dd_metadata
+ sizeof(qdpc_cmd_hdr_t), ntohs(cmd_hdr->len));
}
break;
default:
skb = qdpc_get_skb(priv, dmalen);
if (skb) {
skb_put(skb, dmalen);
memcpy(skb->data, rx_desc->dd_vaddr, dmalen);
skb->dev = ndev;
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb->protocol = eth_type_trans(skb, ndev);
netif_rx(skb);
} else {
DBGPRINTF(DBG_LL_WARNING, QDRV_LF_WARN,
"WARNING: out of SKBs\n");
}
break;
}
} else {
DBGPRINTF(DBG_LL_WARNING, QDRV_LF_WARN,
"RX: Drop packet.S:0x%x B:0x%x L:%u\n", dma_status, rx_hwdesc->dma_data, (uint32_t)dmalen);
}
rx_desc->dd_vaddr = data;
rx_desc->dd_size = buffer_size;
qdpc_map_buffer(priv, rx_desc, PCI_DMA_FROMDEVICE);
dma_data = rx_desc->dd_paddr + priv->epmem.eps_dma_offset;
dma_control = ((buffer_size & QDPC_DMA_LEN_MASK)|(lastdesc ? QDPC_DMA_LAST_DESC : 0));
#else
skb = (struct sk_buff *)rx_desc->dd_metadata;
if (skb && (dmalen >= QDPC_DMA_MINBUF) && (dmalen <= QDPC_DMA_MAXBUF)
&& (QDPC_DMA_SINGLE_BUFFER(dma_status))) {
eth = (struct ethhdr *)skb->data;
switch (ntohs(eth->h_proto)) {
case QDPC_APP_NETLINK_TYPE:
/* Double Check if it's netlink packet*/
cmd_hdr = (qdpc_cmd_hdr_t *)skb->data;
if (check_netlink_magic(cmd_hdr)) {
qdpc_netlink_rx(ndev, skb->data +
sizeof(qdpc_cmd_hdr_t), ntohs(cmd_hdr->len));
}
break;
default:
skb->dev = ndev;
skb->len = 0;
skb_reset_tail_pointer(skb);
skb_put(skb, dmalen);
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb->protocol = eth_type_trans(skb, ndev);
netif_rx(skb);
skb = NULL;
break;
}
} else {
DBGPRINTF(DBG_LL_WARNING, QDRV_LF_WARN,
"RX: Drop packet. Skb: 0x%p Status:0x%x Len:%u\n", skb, dma_status, dmalen);
}
if (skb)
dev_kfree_skb_any(skb);
skb = qdpc_get_skb(priv, buffer_size);
if (skb == NULL) {
rx_desc->dd_vaddr = 0;
rx_desc->dd_size = 0;
dma_data = 0;
dma_control = (lastdesc ? QDPC_DMA_LAST_DESC : 0);
} else {
rx_desc->dd_vaddr = skb->data;
rx_desc->dd_size = buffer_size;
qdpc_map_buffer(priv, rx_desc, PCI_DMA_FROMDEVICE);
dma_data = rx_desc->dd_paddr + priv->epmem.eps_dma_offset;
dma_control = ((buffer_size & QDPC_DMA_LEN_MASK)|(lastdesc ? QDPC_DMA_LAST_DESC : 0));
}
rx_desc->dd_metadata = (void *)skb;
#endif
qdpc_pci_writel(dma_data, &rx_hwdesc->dma_data);
qdpc_pci_writel(dma_control, &rx_hwdesc->dma_control);
/*
* Memory mapped IO barrier, if defined on systems forces the other parts of the descriptor to complete
* before ownership status is changed
*/
mmiowb();
qdpc_pci_writel(QDPC_DMA_OWN, &rx_hwdesc->dma_status);
return dmalen;
}
static SRAM_TEXT void qdpc_indicate_peer_rx_nfree(struct qdpc_priv *priv,
qdpc_pdring_t *rxq, qdpc_desc_t *rx_desc, uint32_t dma_status)
{
uint32_t nfree = rxq->pd_ringsize;
qdpc_desc_t *rx_last_desc;
unsigned long flags;
local_irq_save(flags);
if (unlikely(QDPC_DMA_OWNED(dma_status) == 0)) {
rx_last_desc = rx_desc;
for(rx_desc = rxq->pd_nextdesc; (rx_desc != rx_last_desc) && (nfree > 0);) {
dma_status = qdpc_pci_readl(&rx_desc->dd_hwdesc->dma_status);
if (QDPC_DMA_OWNED(dma_status) == 0) {
nfree--;
} else {
break;
}
if (rx_desc == rxq->pd_lastdesc) {
rx_desc = rxq->pd_firstdesc;
} else {
rx_desc++;
}
}
if (nfree <= QDPC_VETH_RX_LOW_WATERMARK)
nfree = 0;
}
qdpc_pci_writel(nfree, priv->host_h2ep_txd_budget);
mmiowb();
local_irq_restore(flags);
}
SRAM_TEXT void qdpc_veth_rx(struct net_device *ndev)
{
struct qdpc_priv *priv = netdev_priv(ndev);
qdpc_pdring_t *rxq = &priv->pktq.pkt_usq;
uint32_t budget = QDPC_DESC_RING_SIZE << 1;
uint32_t dma_status = 0;
qdpc_desc_t *rx_desc;
qdpc_dmadesc_t *rx_hwdesc;
bool lastdesc;
size_t pktlen;
if (rxq->pd_nextdesc == NULL)
return;
rx_desc = rxq->pd_nextdesc;
rx_hwdesc = rx_desc->dd_hwdesc;
dma_status = qdpc_pci_readl(&rx_hwdesc->dma_status);
while (budget-- > 0 && (QDPC_DMA_OWNED(dma_status) == 0)) {
rx_desc = rxq->pd_nextdesc;
lastdesc = (rxq->pd_nextdesc == rxq->pd_lastdesc);
rx_hwdesc = rx_desc->dd_hwdesc;
qdpc_unmap_buffer(priv, rx_desc, PCI_DMA_FROMDEVICE);
pktlen = qdpc_rx_process_frame(ndev, rx_desc,dma_status, lastdesc);
/* Check for end of ring, and loop around */
if (lastdesc) {
rxq->pd_nextdesc = rxq->pd_firstdesc;
} else {
rxq->pd_nextdesc++;
}
/* Update the statistics */
priv->stats.rx_packets++;
priv->stats.rx_bytes += pktlen;
rx_hwdesc = rxq->pd_nextdesc->dd_hwdesc;
dma_status = qdpc_pci_readl(&rx_hwdesc->dma_status);
}
qdpc_indicate_peer_rx_nfree(priv, rxq, rx_desc, dma_status);
qdpc_interrupt_target(priv, QDPC_HOST_TXDONE);
}
SRAM_TEXT uint32_t qdpc_veth_txprocessq(struct net_device *ndev, uint32_t maxpkts)
{
struct qdpc_priv *priv = netdev_priv(ndev);
qdpc_pdring_t *txq = &priv->pktq.pkt_dsq;
qdpc_desc_t *tx_desc;
uint32_t nprocessed = 0;
for (nprocessed = 0; nprocessed < maxpkts; nprocessed++) {
tx_desc = STAILQ_FIRST(&txq->pd_pending);
if (!tx_desc || !(QDPC_DMA_OWNED(qdpc_pci_readl(&tx_desc->dd_hwdesc->dma_status)) == 0)) {
break;
}
STAILQ_REMOVE_HEAD(&txq->pd_pending, dd_entry);
qdpc_unmap_buffer(priv, tx_desc, PCI_DMA_TODEVICE);
#if !defined(QTN_PCIE_USE_LOCAL_BUFFER)
qdpc_tx_skb_recycle((struct sk_buff *)tx_desc->dd_metadata);
tx_desc->dd_metadata = NULL;
#endif
tx_desc->dd_qtime = 0;
}
txq->pd_nfree += nprocessed;
txq->pd_npending -= nprocessed;
return nprocessed;
}
static SRAM_TEXT inline void qdpc_start_txdma(struct qdpc_priv *priv)
{
qdpc_interrupt_target(priv, QDPC_HOST_TXREADY);
}
static SRAM_TEXT inline void qdpc_start_rxtasklet(struct qdpc_priv *priv)
{
qdpc_interrupt_target(priv, QDPC_HOST_START_RX);
}
SRAM_DATA static int peer_rx_nfree = QDPC_DESC_RING_SIZE;
static SRAM_TEXT int32_t qdpc_update_peer_nfree(struct qdpc_priv *priv)
{
int32_t budget;
budget = qdpc_pci_readl(priv->host_ep2h_txd_budget);
if (budget < 0) {
budget = peer_rx_nfree;
} else {
peer_rx_nfree = budget;
qdpc_pci_writel(-1, priv->host_ep2h_txd_budget);
}
return budget;
}
/* TX completion routine, Runs as tasklet/softirq priority */
SRAM_TEXT void qdpc_veth_txdone(struct net_device *ndev)
{
struct qdpc_priv *priv = netdev_priv(ndev);
qdpc_pdring_t *txq = &priv->pktq.pkt_dsq;
int32_t npending = (int32_t)txq->pd_npending;
uint32_t high_watermark = txq->pd_ringsize >> 1;
spin_lock_bh(&txq->pd_lock);
qdpc_veth_txprocessq(ndev, npending >> 1);
npending = (int32_t)txq->pd_npending;
if (npending <= high_watermark && netif_queue_stopped(ndev)) {
del_timer(&priv->txq_enable_timer);
netif_wake_queue(ndev);
} else if (npending > high_watermark) {
netif_stop_queue(ndev);
mod_timer(&priv->txq_enable_timer, jiffies + msecs_to_jiffies(QDPC_STOP_QUEUE_TIMER_DELAY));
}
spin_unlock_bh(&txq->pd_lock);
}
inline static SRAM_TEXT unsigned long qdpc_align_val_up(unsigned long val, unsigned long step)
{
return ((val + step - 1) & (~(step - 1)));
}
inline SRAM_TEXT int qdpc_send_packet_invalid (struct qdpc_priv *priv, struct sk_buff *skb)
{
qdpc_pdring_t *txq = &priv->pktq.pkt_dsq;
return (!txq->pd_nextdesc || (!skb->len) || (skb->len > QDPC_MAX_MTU));
}
static SRAM_TEXT int qdpc_send_desc_check(struct qdpc_priv *priv,
qdpc_pdring_t *txq)
{
int32_t ret = 1;
int32_t budget;
unsigned long flags;
local_irq_save(flags);
if (txq->pd_nfree > QDPC_VETH_TX_LOW_WATERMARK) {
budget = qdpc_update_peer_nfree(priv);
if (budget > (txq->pd_npending + QDPC_VETH_RX_LOW_WATERMARK)) {
peer_rx_nfree--;
ret = 0;
} else {
qdpc_start_rxtasklet(priv);
}
} else {
qdpc_start_txdma(priv);
}
local_irq_restore(flags);
return ret;
}
SRAM_TEXT int qdpc_send_packet(struct sk_buff *skb, struct net_device *ndev)
{
struct qdpc_priv *priv = netdev_priv(ndev);
qdpc_pdring_t *txq = &priv->pktq.pkt_dsq;
qdpc_dmadesc_t* tx_hwdesc;
qdpc_desc_t *tx_desc;
bool lastdesc;
uint32_t dma_len = 0;
void *dma_data;
if (unlikely(qdpc_send_packet_invalid(priv, skb))) {
DBGPRINTF(DBG_LL_WARNING, QDRV_LF_WARN,
"Xmit packet invalid: len %d\n", skb->len);
dev_kfree_skb_any(skb);
priv->stats.tx_dropped++;
return NETDEV_TX_OK;
}
spin_lock_bh(&txq->pd_lock);
if (qdpc_send_desc_check(priv, txq)) {
netif_stop_queue(ndev);
mod_timer(&priv->txq_enable_timer, jiffies + msecs_to_jiffies(QDPC_STOP_QUEUE_TIMER_DELAY));
spin_unlock_bh(&txq->pd_lock);
priv->stats.tx_errors++;
return NETDEV_TX_BUSY;
}
if (skb->len <= priv->epmem.eps_minbuf) {
dma_len = priv->epmem.eps_minbuf;
} else {
dma_len = skb->len & QDPC_DMA_LEN_MASK;
}
if (unlikely(dma_len >= QDPC_DMA_MAXBUF)) {
spin_unlock_bh(&txq->pd_lock);
DBGPRINTF(DBG_LL_WARNING, QDRV_LF_WARN,
"Xmit packet too big: len %d dmalen %d\n", skb->len, dma_len);
dev_kfree_skb_any(skb);
priv->stats.tx_dropped++;
return NETDEV_TX_OK;
}
if (unlikely(skb_linearize(skb) != 0)) {
DBGPRINTF(DBG_LL_WARNING, QDRV_LF_WARN,
"WARNING:%u Linearize failed\n", __LINE__);
dev_kfree_skb_any(skb);
priv->stats.tx_dropped++;
return NETDEV_TX_OK;
}
tx_desc = txq->pd_nextdesc;
tx_hwdesc = tx_desc->dd_hwdesc;
lastdesc = (tx_desc == txq->pd_lastdesc);
if (lastdesc) {
txq->pd_nextdesc = txq->pd_firstdesc;
} else {
txq->pd_nextdesc++;
}
#ifdef QTN_PCIE_USE_LOCAL_BUFFER
/* On X86 copy to local buffer
*/
dma_data = tx_desc->dd_metadata;
memcpy(tx_desc->dd_metadata, skb->data, skb->len);
#else
dma_data = skb->data;
/* Hold onto skb. Release when we get a txdone */
tx_desc->dd_metadata = skb;
#endif
tx_desc->dd_vaddr = dma_data;
tx_desc->dd_size = dma_len;
qdpc_map_buffer(priv, tx_desc, PCI_DMA_TODEVICE);
qdpc_pci_writel(tx_desc->dd_paddr + priv->epmem.eps_dma_offset,
&tx_hwdesc->dma_data);
qdpc_pci_writel((QDPC_DMA_SINGLE_TXBUFFER | QDPC_DMA_TX_NOCRC | dma_len |
(lastdesc ? QDPC_DMA_LAST_DESC : 0)), &tx_hwdesc->dma_control);
mmiowb(); /* Memory mapped io barrrier */
qdpc_pci_writel((QDPC_DMA_OWN), &tx_hwdesc->dma_status);
/*
* Add delay equivalent to enlarge the IFG
* It's a workaround for BBIC3 RX Run-out
*/
#define RUBY_PCIE_HOST_WR_DELAY 50
udelay(RUBY_PCIE_HOST_WR_DELAY);
tx_desc->dd_qtime = jiffies;
STAILQ_INSERT_TAIL(&txq->pd_pending, tx_desc, dd_entry);
txq->pd_nfree--;
txq->pd_npending++;
qdpc_start_txdma(priv);
spin_unlock_bh(&txq->pd_lock);
priv->stats.tx_packets++;
priv->stats.tx_bytes += skb->len;
/* Do this for X86 */
#ifdef QTN_PCIE_USE_LOCAL_BUFFER
qdpc_tx_skb_recycle(skb);
#endif
return NETDEV_TX_OK;
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)
netdev_tx_t qdpc_veth_tx(struct sk_buff *skb, struct net_device *ndev)
#else
int qdpc_veth_tx(struct sk_buff *skb, struct net_device *ndev)
#endif
{
return qdpc_send_packet(skb, ndev);
}
struct net_device_stats *qdpc_veth_stats(struct net_device *ndev)
{
struct qdpc_priv *priv = netdev_priv(ndev);
return &priv->stats;
}
int qdpc_veth_change_mtu(struct net_device *ndev, int new_mtu)
{
struct qdpc_priv *priv = netdev_priv(ndev);
spinlock_t *lock = &priv->lock;
unsigned long flags;
/* check ranges */
if ((new_mtu < QDPC_MIN_MTU) || (new_mtu > QDPC_MAX_MTU))
return -EINVAL;
/*
* Do anything you need, and the accept the value
*/
spin_lock_irqsave(lock, flags);
ndev->mtu = new_mtu;
spin_unlock_irqrestore(lock, flags);
return SUCCESS; /* success */
}
int qdpc_veth_set_mac_addr(struct net_device *ndev, void *paddr)
{
struct sockaddr *addr = (struct sockaddr *)paddr;
if (netif_running(ndev))
return -EBUSY;
/* Check the validity */
if (!is_valid_ether_addr((const u8 *)addr->sa_data))
return -EINVAL;
memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
return SUCCESS;
}
int qdpc_init_netdev(struct net_device **net_dev, struct pci_dev *pdev)
{
struct qdpc_priv *priv;
struct net_device *ndev;
int ret = 0;
/* Allocate the devices */
ndev = alloc_netdev(sizeof(struct qdpc_priv), "host%d", ether_setup);
if (!ndev) {
PRINT_ERROR("Error in allocating the net device \n");
return -ENOMEM;
}
SET_NETDEV_DEV(ndev, &pdev->dev);
priv = netdev_priv(ndev);
memset(priv, 0, sizeof(struct qdpc_priv));
priv->pdev = pdev;
priv->ndev = ndev;
pci_set_drvdata(pdev, ndev);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)
ndev->netdev_ops = &veth_ops;
#else
ndev->open = qdpc_veth_open;
ndev->stop = qdpc_veth_release;
ndev->hard_start_xmit = qdpc_veth_tx;
ndev->get_stats = qdpc_veth_stats;
ndev->change_mtu = qdpc_veth_change_mtu;
ndev->set_mac_address = qdpc_veth_set_mac_addr;
#endif
ndev->ethtool_ops = &qdpc_veth_ethtool_ops;
/* Initialize locks */
spin_lock_init(&priv->lock);
memcpy(ndev->dev_addr, "\0HOST%", ETH_ALEN);
*net_dev = ndev;
return ret;
}
static void qdpc_veth_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *info)
{
struct qdpc_priv *priv = netdev_priv(ndev);
struct pci_dev *pci_dev = (struct pci_dev *) priv->pdev;
strcpy(info->driver, qdpc_pcie_driver_name);
strcpy(info->version, QDPC_MODULE_VERSION);
strcpy(info->fw_version, "N/A");
strcpy(info->bus_info, pci_name(pci_dev));
return;
}
static void qdpc_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
struct qdpc_priv *priv = netdev_priv(netdev);
int i;
for (i = 0; i < QDPC_NET_STATS_LEN; i++) {
data[i] = ((unsigned long *)&priv->stats)[i];
}
return;
}
static void qdpc_veth_get_strings(struct net_device *netdev,
u32 stringset, u8 *data)
{
if (ETH_SS_STATS == stringset) {
memcpy(data, *qdpc_veth_gstrings_stats, sizeof(qdpc_veth_gstrings_stats));
}
return;
}
static int qdpc_veth_get_sset_count(struct net_device *netdev, int sset)
{
switch (sset) {
case ETH_SS_STATS:
return QDPC_VETH_STATS_LEN;
default:
return -EOPNOTSUPP;
}
}

View File

@ -0,0 +1,496 @@
/**
* Copyright (c) 2011-2013 Quantenna Communications, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
**/
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/version.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <asm/byteorder.h>
#include <linux/pci.h>
#include <asm-generic/pci-dma-compat.h>
#include "qdpc_config.h"
#include "qdpc_debug.h"
#include "qdpc_init.h"
#include "qdpc_regs.h"
#include "qdpc_ruby.h"
#include <qdpc_platform.h>
static irqreturn_t qdpc_isr(int irq, void *dev_id);
static void qdpc_rx_tasklet(void *dev);
static void qdpc_txd_tasklet(void *dev);
static int qdpc_pcie_init_intr(struct net_device *pdev);
static int qdpc_pcie_init_mem(struct net_device *ndev);
#define MSI_64_EN (1 << 7)
static void qdpc_tx_runout_func(unsigned long data)
{
struct qdpc_priv *priv = (struct qdpc_priv*)data;
qdpc_pdring_t *txq = &priv->pktq.pkt_dsq;
int32_t budget = qdpc_pci_readl(priv->host_ep2h_txd_budget);
tasklet_schedule(&priv->txd_tasklet);
DBGPRINTF(DBG_LL_WARNING, QDRV_LF_WARN, "Restarting tx queue\n");
DBGPRINTF(DBG_LL_WARNING, QDRV_LF_WARN, "budget %d, free %d, pending %d\n",
budget, txq->pd_nfree, txq->pd_npending);
}
int32_t qdpc_pcie_init_intr_and_mem(struct net_device *ndev)
{
struct qdpc_priv *priv = netdev_priv(ndev);
struct pci_dev *pdev = priv->pdev;
int result = 0;
/* Initialize interrupts */
if (( result = qdpc_pcie_init_intr(ndev)) < 0) {
PRINT_ERROR("PCIe Interrupt Initialization failed \n");
return result;
}
/* Memory Initialization */
if (( result = qdpc_pcie_init_mem(ndev)) < 0) {
PRINT_ERROR("PCIe Memory Initialization failed \n");
qdpc_free_interrupt(pdev);
}
return result;
}
static int32_t qdpc_pcie_init_intr(struct net_device *ndev)
{
struct qdpc_priv *priv = netdev_priv(ndev);
struct pci_dev *pdev = priv->pdev;
int ret = 0, pos = 0;
u16 msi_ctl = 0 ;
/* Check if the device has MSI capability */
pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
if (!pos) {
PRINT_ERROR("Device doesn't have MSI capability INTx will be instead\n");
} else {
pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &msi_ctl);
/* Check if the device has enabled MSI */
if (msi_ctl & PCI_MSI_FLAGS_ENABLE) {
/* Enable MSI support */
if (!pci_enable_msi(pdev)) {
priv->msi_enabled = 1;
printk("Host using MSI interrupt\n");
} else {
PRINT_ERROR("Enable MSI fail\n");
return -1;
}
} else {
PRINT_ERROR("Device doesn't enable MSI capability, INTx will be instead\n");
}
}
tasklet_init(&priv->rx_tasklet, (void *)qdpc_rx_tasklet, (unsigned long)ndev);
tasklet_disable(&priv->rx_tasklet);
tasklet_init(&priv->txd_tasklet, (void *)qdpc_txd_tasklet, (unsigned long)ndev);
tasklet_disable(&priv->txd_tasklet);
priv->txq_enable_timer.function = qdpc_tx_runout_func;
priv->txq_enable_timer.data = (unsigned long)priv;
init_timer(&priv->txq_enable_timer);
/* Request the interrupt line with HSM device driver name */
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22))
ret = request_irq(pdev->irq, qdpc_isr,
SA_SHIRQ, QDPC_DEV_NAME, ndev);
#else
ret = request_irq(pdev->irq, qdpc_isr,
IRQF_SHARED | IRQF_DISABLED, QDPC_DEV_NAME, ndev);
#endif
if (ret) {
PRINT_ERROR("Failed to allocate interrupt line %d\n", pdev->irq);
goto out;
}
return ret;
out:
if (1 == priv->msi_enabled) {
pci_disable_msi(pdev);
}
return ret;
}
static void qdpc_deassert_intx(struct qdpc_priv *priv)
{
unsigned long pcie_cfg0 = 0x0;
void *basereg = QDPC_BAR_VADDR(priv->sysctl_bar, QDPC_SYSCFG_REG);
pcie_cfg0 = qdpc_pci_readl(basereg);
if (pcie_cfg0 & BIT(9)) {
pcie_cfg0 &= ~(BIT(9));
qdpc_pcie_posted_write(pcie_cfg0, basereg);
}
}
static irqreturn_t qdpc_isr(int irq, void *dev)
{
struct net_device *ndev = (struct net_device *)dev;
struct qdpc_priv *priv = netdev_priv(ndev);
uint32_t isrstatus = qdpc_pci_readl(priv->ep2host_irqstatus);
qdpc_pcie_posted_write(0, priv->ep2host_irqstatus);
if (isrstatus & QDPC_EP_RXDONE) {
tasklet_schedule(&priv->rx_tasklet);
}
if (isrstatus & QDPC_EP_TXDONE) {
tasklet_schedule(&priv->txd_tasklet);
}
/* This still needs to be investigated further as this alters the
* contention timing if removed between PCIe and
* the other parts of the BBIC register file
*/
//if (priv->msi_enabled) {
qdpc_deassert_intx(priv);
//}
return IRQ_HANDLED;
}
void qdpc_free_interrupt(struct pci_dev *pdev)
{
struct net_device *ndev = pci_get_drvdata(pdev);
struct qdpc_priv *priv = netdev_priv(ndev);
free_irq(pdev->irq, ndev);
if (1 == priv->msi_enabled) {
pci_disable_msi(pdev);
}
tasklet_kill(&priv->rx_tasklet);
tasklet_kill(&priv->txd_tasklet);
del_timer(&priv->txq_enable_timer);
return;
}
static void qdpc_rx_tasklet(void *dev)
{
qdpc_veth_rx((struct net_device *)dev);
}
static void qdpc_txd_tasklet(void *dev)
{
qdpc_veth_txdone((struct net_device *)dev);
}
void qdpc_interrupt_target(struct qdpc_priv *priv, uint32_t intr)
{
unsigned long flags;
void *basereg = QDPC_BAR_VADDR(priv->sysctl_bar, QDPC_H2EP_INTERRUPT);
__iomem uint32_t *irqstatus = priv->host2ep_irqstatus;
spin_lock_irqsave(&priv->lock, flags);
qdpc_pcie_posted_write((intr)|qdpc_pci_readl(irqstatus), irqstatus);
qdpc_pcie_posted_write(QDPC_H2EP_INTERRUPT_BIT, basereg);
spin_unlock_irqrestore(&priv->lock, flags);
}
static bool qdpc_bar_check(struct qdpc_priv *priv, qdpc_bar_t *bar)
{
uint32_t offset = bar->b_offset;
size_t len = bar->b_len;
dma_addr_t busaddr = bar->b_busaddr;
uint8_t index = bar->b_index;
if (index > 5) {
printk("Invalid BAR index:%u. Must be between 0 and 5\n", index);
return 0;
}
if (!len) {
/* NOTE:
* Do not use an implicit length such as the BAR length
* if the map length is too large say > 16Mb this leaves
* the implementation vulnerable to
* Linux and the attack of the Silent "S" (one between the n and u)
*/
printk("Zero length BAR\n");
return 0;
}
if (busaddr) { /*initialized BAR */
unsigned long bar_start = pci_resource_start(priv->pdev , index);
unsigned long bar_end = pci_resource_end(priv->pdev , index);
if (!bar_start) {
printk("Invalid BAR address: 0x%p.\n", (void *)busaddr);
return 0;
}
if ((busaddr - offset) != bar_start) {
printk("Invalid BAR offset:0x%p. BAR starts at 0x%p\n",
(void *)(busaddr -offset), (void *)bar_start);
return 0;
}
/* Check the span of the BAR including the offset + length, bar_end points to the last byte of BAR */
if ((busaddr + len - 1) > bar_end) {
printk("Invalid BAR end address:0x%p. BAR ends at 0x%p\n",
(void *)(busaddr + len), (void *)bar_end);
return 0;
}
} else { /* Unitialized bar */
unsigned long bar_end = pci_resource_end(priv->pdev , index);
busaddr = pci_resource_start(priv->pdev , index);
if (!busaddr) {
printk("Invalid BAR address: 0x%p.\n", (void *)busaddr);
return 0;
}
/* Checks that offset area is within bar */
if ( (busaddr + offset) > bar_end) {
printk("Invalid BAR offset 0x%p, extends beyond end of BAR(0x%p).\n",
(void *)(busaddr + offset), (void *)bar_end);
return 0;
}
/* Checks that mapped area is within bar */
if ((busaddr + len + offset - 1) > bar_end) {
printk("Mapped area 0x%p, extends beyond end of BAR(0x%p).\n",
(void *)(busaddr + len + offset - 1), (void *)bar_end);
return 0;
}
}
return 1;
}
static qdpc_bar_t *qdpc_map_bar(struct qdpc_priv *priv, qdpc_bar_t *bar,
uint8_t index, size_t len, uint32_t offset)
{
void *vaddr = NULL;
dma_addr_t busaddr = 0;
qdpc_bar_t temp_bar;
memset(&temp_bar, 0 ,sizeof(qdpc_bar_t));
temp_bar.b_len = len;
temp_bar.b_offset = offset;
temp_bar.b_index = index;
if (!qdpc_bar_check(priv, &temp_bar)) {
printk("Failed bar mapping sanity check in %s\n", __FUNCTION__);
return NULL;
}
/* Reserve PCIe memory region*/
busaddr = pci_resource_start(priv->pdev , index) + offset;
if (!request_mem_region(busaddr, len , QDPC_DEV_NAME)) {
printk("Failed to reserve %u bytes of PCIe memory "
"region starting at 0x%p\n", (uint32_t)len, (void *)busaddr);
return NULL;
}
vaddr = ioremap_nocache(busaddr, len);
if (!vaddr) {
printk("Failed to map %u bytes at BAR%u at bus address 0x%p.\n",
(uint32_t)len, index, (void *)busaddr);
release_mem_region(busaddr, len);
return NULL;
}
memset(&temp_bar, 0 ,sizeof(qdpc_bar_t));
bar->b_vaddr = vaddr;
bar->b_busaddr = busaddr;
bar->b_len = len;
bar->b_index = index;
bar->b_offset = offset;
printk("BAR:%u vaddr=0x%p busaddr=%p offset=%u len=%u\n",
bar->b_index, bar->b_vaddr, (void *)bar->b_busaddr,
bar->b_offset, (uint32_t)bar->b_len);
return bar;
}
static bool qdpc_unmap_bar(struct qdpc_priv *priv, qdpc_bar_t *bar)
{
if (!qdpc_bar_check(priv, bar)) {
PRINT_ERROR("Failed bar mapping sanity check in %s\n", __FUNCTION__);
return 0;
}
iounmap(bar->b_vaddr);
release_mem_region(bar->b_busaddr - bar->b_offset, bar->b_len);
memset(bar, 0 , sizeof(qdpc_bar_t));
return 1;
}
static void qdpc_map_epmem(struct qdpc_priv *priv)
{
printk("%s() Mapping epmem\n", __FUNCTION__);
qdpc_map_bar(priv, &priv->epmem_bar, QDPC_SHMEM_BAR,
pci_resource_len(priv->pdev, QDPC_SHMEM_BAR) , 0);
priv->bda =(qdpc_pcie_bda_t *)QDPC_BAR_VADDR(priv->epmem_bar, 0);
/* Init IRQ status pointers */
priv->host2ep_irqstatus = &priv->bda->bda_h2ep_irqstatus;
priv->ep2host_irqstatus = &priv->bda->bda_ep2h_irqstatus;
priv->host_ep2h_txd_budget = &priv->bda->bda_ep2h_txd_budget;
priv->host_h2ep_txd_budget = &priv->bda->bda_h2ep_txd_budget;
}
static void qdpc_map_sysctl_regs(struct qdpc_priv *priv)
{
printk("%s() Mapping sysctl\n", __FUNCTION__);
qdpc_map_bar(priv, &priv->sysctl_bar, QDPC_SYSCTL_BAR, pci_resource_len(priv->pdev, QDPC_SYSCTL_BAR) , 0);
}
static void qdpc_unmap_epmem(struct qdpc_priv *priv)
{
printk("%s() Unmapping sysctl\n", __FUNCTION__);
priv->bda = NULL;
qdpc_unmap_bar(priv, &priv->epmem_bar);
}
static void qdpc_unmap_sysctl_regs(struct qdpc_priv *priv)
{
printk("%s() Unmapping sysctl\n", __FUNCTION__);
qdpc_unmap_bar(priv, &priv->sysctl_bar);
}
int32_t qdpc_set_dma_mask(struct qdpc_priv *priv) {
int result = 0;
uint64_t dma_mask = qdpc_pci_readl(&priv->bda->bda_dma_mask);
printk("Requested DMA mask:0x%llx\n", dma_mask);
result = pci_set_dma_mask(priv->pdev, dma_mask);
if (!result) {
result = pci_set_consistent_dma_mask(priv->pdev, dma_mask);
if (result) {
printk(" pci_set_consistent_dma_mask() error %d. Mask:0x%llx\n", result, dma_mask);
return 1;
}
} else {
printk(" pci_set_dma_mask() error %d. Mask:0x%llx\n", result, dma_mask);
return 1;
}
return 0;
}
static int32_t qdpc_pcie_init_mem(struct net_device *ndev)
{
struct qdpc_priv *priv = netdev_priv(ndev);
int ret = 0;
/* Map SynControl registers and Host to Endpoint interrupt registers to BAR-2 */
qdpc_map_sysctl_regs(priv);
qdpc_map_epmem(priv);
/* Initialize Tx and Rx buffers*/
qdpc_datapath_init(priv);
return ret;
}
int qdpc_unmap_iomem(struct qdpc_priv *priv)
{
qdpc_unmap_epmem(priv);
qdpc_unmap_sysctl_regs(priv);
return SUCCESS;
}
int qdpc_dma_setup(struct qdpc_priv *priv)
{
qdpc_epshmem_hdr_t *shm;
shm = (qdpc_epshmem_hdr_t *)QDPC_BAR_VADDR(priv->epmem_bar, sizeof(qdpc_pcie_bda_t));
priv->epmem.eps_mapsize = qdpc_pci_readl(&shm->eps_mapsize);
priv->epmem.eps_ver = qdpc_pci_readl(&shm->eps_ver);
priv->epmem.eps_size = qdpc_pci_readl(&shm->eps_size);
priv->epmem.eps_dma_offset = qdpc_pci_readl(&shm->eps_dma_offset);
priv->epmem.eps_dsdma_desc = qdpc_pci_readl(&shm->eps_dsdma_desc);
priv->epmem.eps_usdma_desc = qdpc_pci_readl(&shm->eps_usdma_desc);
priv->epmem.eps_dsdma_ndesc = qdpc_pci_readl(&shm->eps_dsdma_ndesc);
priv->epmem.eps_usdma_ndesc = qdpc_pci_readl(&shm->eps_usdma_ndesc);
priv->epmem.eps_maxbuf = qdpc_pci_readl(&shm->eps_maxbuf);
priv->epmem.eps_minbuf = qdpc_pci_readl(&shm->eps_minbuf);
priv->epmem.eps_align = qdpc_pci_readl(&shm->eps_align);
printk("Remaddr: 0x%p Size:%u\n", (void *)shm, priv->epmem.eps_mapsize);
printk("Shmem: Len:%u Maplen:%u Ver0x%x\nUSDesc %u@0x%x DSDesc %u@0x%x DMA:%u/%u/%u\n",
priv->epmem.eps_size, priv->epmem.eps_mapsize, priv->epmem.eps_ver,
priv->epmem.eps_usdma_ndesc, priv->epmem.eps_usdma_desc,
priv->epmem.eps_dsdma_ndesc, priv->epmem.eps_dsdma_desc,
priv->epmem.eps_maxbuf, priv->epmem.eps_minbuf, priv->epmem.eps_align);
if (qdpc_dmainit_rxq(priv) < 0) {
printk("RX Queue DMA fail to initialize.\n");
return FAILURE;
}
if (qdpc_dmainit_txq(priv) < 0) {
printk("TX Queue DMA fail to initialize.\n");
return FAILURE;
}
return SUCCESS;
}
void qdpc_pcie_free_mem(struct pci_dev *pdev)
{
return;
}
void *qdpc_map_pciemem(unsigned long busaddr, size_t len)
{
/* Reserve PCIe memory region*/
if (!request_mem_region(busaddr, len, QDPC_DEV_NAME)) {
PRINT_ERROR(KERN_ERR "Failed to reserve %u bytes of "
"PCIe memory region starting at 0x%lx\n", (uint32_t)len, busaddr);
return NULL;
}
return ioremap_nocache(busaddr, len);
}
void qdpc_unmap_pciemem(unsigned long busaddr, void *vaddr, size_t len)
{
if (!vaddr || !busaddr)
return;
iounmap(vaddr);
release_mem_region(busaddr, len);
}

View File

@ -0,0 +1,29 @@
/**
* Copyright (c) 2011-2013 Quantenna Communications, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
**/
#ifndef __QDPC_REGS_H__
#define __QDPC_REGS_H__
#define QDPC_SYSCTL_BAR 0
#define QDPC_SHMEM_BAR 2
#define QDPC_H2EP_INTERRUPT 0x3c
#define QDPC_SYSCFG_REG 0x6c
#endif //__QDPC_REGS_H__

View File

@ -0,0 +1,207 @@
/**
* Copyright (c) 2011-2013 Quantenna Communications, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
**/
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/version.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include "qdpc_config.h"
#include "qdpc_debug.h"
#include "qdpc_init.h"
#include "qdpc_regs.h"
#include "qdpc_ruby.h"
int qdpc_dmainit_txq(struct qdpc_priv *priv)
{
uint32_t i;
qdpc_pdring_t *txq;
uint32_t ringsize = priv->epmem.eps_dsdma_ndesc;
/* Downstream TX queue */
txq = &priv->pktq.pkt_dsq;
txq->pd_hwdesc = (qdpc_dmadesc_t *)QDPC_BAR_VADDR(priv->epmem_bar, priv->epmem.eps_dsdma_desc);
#ifdef QTN_PCIE_USE_LOCAL_BUFFER
/* Downstream TX data . Temporary buffer used for bringup on X86*/
priv->dsdata = kmalloc(ringsize * sizeof(qdpc_dmadata_t), GFP_KERNEL | GFP_DMA);
if (!(priv->dsdata)) {
printk("ERROR: TX DMA allocation fails.\n");
return -1;
}
printk("TX DMA bounce buffers: 0x%p\n", priv->dsdata);
#endif
/* Initialize Downstream TX queue */
spin_lock_init(&txq->pd_lock);
STAILQ_INIT(&txq->pd_pending);
txq->pd_ringsize = ringsize;
txq->pd_nfree = txq->pd_ringsize;
txq->pd_npending = 0;
memset(txq->pd_desc, 0, sizeof(txq->pd_desc));
memset(txq->pd_hwdesc, 0, (ringsize*sizeof(qdpc_dmadesc_t)));
for (i = 0 ; i < txq->pd_ringsize ; i++)
{
/* Buffer contains entire packet */
txq->pd_desc[i].dd_hwdesc = &txq->pd_hwdesc[i];
txq->pd_desc[i].dd_paddr = 0;
qdpc_pci_writel((QDPC_DMA_SINGLE_TXBUFFER | QDPC_DMA_TX_NOCRC),
&txq->pd_hwdesc[i].dma_control);
qdpc_pci_writel(0, &txq->pd_hwdesc[i].dma_status);
mmiowb(); /* Flush out writes */
#ifdef QTN_PCIE_USE_LOCAL_BUFFER
/* X86 only attach local buffer */
txq->pd_desc[i].dd_metadata = &priv->dsdata[i];
#endif
}
/* Mark end of buffer */
qdpc_pci_writel((QDPC_DMA_LAST_DESC) | qdpc_pci_readl(&txq->pd_hwdesc[i-1].dma_control),
&txq->pd_hwdesc[i-1].dma_control);
txq->pd_lastdesc = &txq->pd_desc[i-1];
txq->pd_firstdesc = &txq->pd_desc[0];
txq->pd_nextdesc = txq->pd_firstdesc;
printk("ringsize = %d txq->pd_hwdesc = 0x%p\n", ringsize, &txq->pd_hwdesc[0]);
for (i = 0; i < (ringsize < 16 ? ringsize : 16); i++)
printk("0x%p:td0=0x%x t1=0x%x t2=0x%x\n", &txq->pd_hwdesc[i],
qdpc_pci_readl(&(txq->pd_hwdesc[i].dma_status)),
qdpc_pci_readl(&(txq->pd_hwdesc[i].dma_control)),
qdpc_pci_readl(&(txq->pd_hwdesc[i].dma_data)));
return 0;
}
int qdpc_dmainit_rxq(struct qdpc_priv *priv)
{
uint32_t i;
qdpc_dmadesc_t *rx_hwdesc = NULL;
uint32_t buffer_size = QDPC_DMA_MAXBUF;
qdpc_pdring_t *rxq = &priv->pktq.pkt_usq;
uint32_t ringsize = priv->epmem.eps_usdma_ndesc;
#if !defined(QTN_PCIE_USE_LOCAL_BUFFER)
struct sk_buff *skb;
#endif
spin_lock_init(&rxq->pd_lock);
rxq->pd_ringsize = ringsize;
/* X86 workaround to use local buffer */
#ifdef QTN_PCIE_USE_LOCAL_BUFFER
priv->usdata = kmalloc(ringsize * sizeof(qdpc_dmadata_t), GFP_KERNEL | GFP_DMA);
if (!(priv->usdata)) {
printk("ERROR: RX DMA allocation fails.\n");
return -1;
}
printk("RX DMA bounce buffers: 0x%p\n", priv->usdata);
#endif
rxq->pd_hwdesc = (qdpc_dmadesc_t *)QDPC_BAR_VADDR(priv->epmem_bar, priv->epmem.eps_usdma_desc);
for (i = 0 ; i < ringsize; i++) {
rx_hwdesc = &rxq->pd_hwdesc[i];
rxq->pd_desc[i].dd_hwdesc = &rxq->pd_hwdesc[i];
/* X86 workaround to use local buffer */
#ifdef QTN_PCIE_USE_LOCAL_BUFFER
rxq->pd_desc[i].dd_metadata = &priv->usdata[i];
rxq->pd_desc[i].dd_vaddr = rxq->pd_desc[i].dd_metadata;
#else
skb = qdpc_get_skb(priv, buffer_size);
skb->len = 0;
rxq->pd_desc[i].dd_metadata = (void*)skb;
rxq->pd_desc[i].dd_vaddr = skb->data;
memset(skb->data, 0, buffer_size);
#endif
rxq->pd_desc[i].dd_size = buffer_size;
rxq->pd_desc[i].dd_paddr = pci_map_single(priv->pdev, rxq->pd_desc[i].dd_vaddr,
rxq->pd_desc[i].dd_size, PCI_DMA_FROMDEVICE);
#if !defined(CONFIG_ARCH_ARC)
if (pci_dma_mapping_error(priv->pdev, rxq->pd_desc[i].dd_paddr)) {
printk("%s(), ERROR: pci_map_single fails.\n", __func__);
return -1;
}
#endif
qdpc_pci_writel(rxq->pd_desc[i].dd_paddr + priv->epmem.eps_dma_offset,
&rx_hwdesc->dma_data);
qdpc_pci_writel((buffer_size & QDPC_DMA_LEN_MASK), &rx_hwdesc->dma_control);
qdpc_pci_writel(0, &rx_hwdesc->dma_ptr);
qdpc_pci_writel(QDPC_DMA_OWN, &rx_hwdesc->dma_status);
mmiowb(); /* Flush out writes */
}
/* Mark end of buffer */
qdpc_pci_writel((QDPC_DMA_LAST_DESC) | qdpc_pci_readl(&rx_hwdesc->dma_control),
&rx_hwdesc->dma_control);
rxq->pd_desc[rxq->pd_ringsize - 1].dd_flags |= QDPC_DMA_LAST_DESC;
rxq->pd_lastdesc = &rxq->pd_desc[rxq->pd_ringsize - 1];
rxq->pd_firstdesc = &rxq->pd_desc[0];
rxq->pd_nextdesc = rxq->pd_firstdesc;
printk("ringsize = %d rxq->pd_hwdesc = 0x%p\n", ringsize, &rxq->pd_hwdesc[0]);
for (i = 0; i < (ringsize < 16 ? ringsize : 16); i++) {
printk("0x%p:rd0=0x%x r1=0x%x r2=0x%x\n", &rxq->pd_hwdesc[i],
qdpc_pci_readl(&(rxq->pd_hwdesc[i].dma_status)),
qdpc_pci_readl(&(rxq->pd_hwdesc[i].dma_control)),
qdpc_pci_readl(&(rxq->pd_hwdesc[i].dma_data)));
}
return 0;
}
int qdpc_dmauninit_txq(struct qdpc_priv *priv)
{
#ifdef QTN_PCIE_USE_LOCAL_BUFFER
kfree(priv->dsdata);
#endif
return 0;
}
int qdpc_dmauninit_rxq(struct qdpc_priv *priv)
{
#ifdef QTN_PCIE_USE_LOCAL_BUFFER
kfree(priv->usdata);
#endif
return 0;
}
void qdpc_datapath_init(struct qdpc_priv *priv)
{
/* Init DMA ring buffers */
priv->pktq.pkt_usq.pd_nextdesc = NULL;
priv->pktq.pkt_dsq.pd_nextdesc = NULL;
return;
}
void qdpc_datapath_uninit(struct qdpc_priv *priv)
{
qdpc_dmauninit_txq(priv);
qdpc_dmauninit_rxq(priv);
}

View File

@ -0,0 +1,34 @@
#
# Makefile for Intel platform
#
COMMON_DIR := ../common
MAIN_INCLUDES := $(PWD)/../../include
INCLUDES := -I$(COMMON_DIR) -I$(PWD) -I$(MAIN_INCLUDES)
EXTRA_CFLAGS += -Winline -Wall -O2 $(INCLUDES)
EXTRA_CFLAGS += -DQTN_PCIE_USE_LOCAL_BUFFER
#EXTRA_CFLAGS += -DDEBUG
KVERSION = $(shell uname -r)
default: all
qdpc-host-objs := $(COMMON_DIR)/qdpc_ring.o \
$(COMMON_DIR)/qdpc_net.o \
$(COMMON_DIR)/qdpc_pcie.o \
$(COMMON_DIR)/qdpc_init.o
obj-m := qdpc-host.o
qdpc_host.o: $(qdpc-host-objs)
ld -r $^ -o $@
all:
make -C /lib/modules/$(KVERSION)/build M=$(PWD) modules
clean:
make -C /lib/modules/$(KVERSION)/build M=$(PWD) clean
rm -rf $(COMMON_DIR)/.*.cmd $(COMMON_DIR)/.tmp_versions
rm -rf Module.markers modules.order *~ $(qdpc-host-objs)

View File

@ -0,0 +1,39 @@
/**
* Copyright (c) 2011-2013 Quantenna Communications, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
**/
#ifndef __QDPC_PFDEP_H__
#define __QDPC_PFDEP_H__
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
#define IOREMAP ioremap_wc
#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) */
#define IOREMAP ioremap
#endif
#define qdpc_platform_init() 0
#define qdpc_platform_exit() do { } while(0)
#define qdpc_platform_xfer(dst, src, len) memcpy_toio(dst, src, len)
#define SRAM_TEXT
#define SRAM_DATA
#endif /* __QDPC_PFDEP_H__ */

View File

@ -0,0 +1,24 @@
#
# Makefile for Lantiq platform
#
COMMON_DIR := ../common
EXTRA_CFLAGS += -DOUTBOUND_HW_SWAP
default: all
qdpc-host-objs := $(COMMON_DIR)/qdpc_ring.o \
$(COMMON_DIR)/qdpc_net.o \
$(COMMON_DIR)/qdpc_pcie.o \
$(COMMON_DIR)/qdpc_init.o
obj-m := qdpc-host.o
all:
$(MAKE) -C $(kernel_source) SUBDIRS=`pwd` modules;
clean:
rm -rf *.o *~ core .depend .*.cmd *.ko *.mod.c .tmp_versions Module.symvers
rm -rf $(COMMON_DIR)/.*.cmd $(COMMON_DIR)/.tmp_versions
rm -rf Module.markers modules.order $(qdpc-host-objs)

View File

@ -0,0 +1,39 @@
/**
* Copyright (c) 2011-2013 Quantenna Communications, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
**/
#ifndef __QDPC_PFDEP_H__
#define __QDPC_PFDEP_H__
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
#define IOREMAP ioremap_wc
#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) */
#define IOREMAP ioremap
#endif
#define qdpc_platform_init() 0
#define qdpc_platform_exit() do { } while(0)
#define qdpc_platform_xfer(dst, src, len) memcpy_toio(dst, src, len)
#define SRAM_TEXT
#define SRAM_DATA
#endif /* __QDPC_PFDEP_H__ */

View File

@ -0,0 +1,57 @@
#
# Makefile for Quantenna RC paltform
#
#
PCIE_HOST_BASE = ../drivers/pcie/host/
EXTRA_CFLAGS += -Wall \
-I../include \
-I$(src)/../../../drivers \
-I$(src)/../../../include \
-I$(src)/../../include \
-I$(src)/../common \
-I$(src)/../quantenna
EXTRA_CFLAGS += -mlong-calls
ifeq (${PCIE_HOST_CRUMBS},1)
EXTRA_CFLAGS += -finstrument-functions
endif
#EXTRA_CFLAGS += -DDEBUG
ifneq ($(KERNELRELEASE),)
COMMON_DIR := ../common
qdpc-host-objs := $(COMMON_DIR)/qdpc_ring.o \
$(COMMON_DIR)/qdpc_net.o \
$(COMMON_DIR)/qdpc_pcie.o \
$(COMMON_DIR)/qdpc_init.o
obj-m := qdpc-host.o
else
KERNELDIR ?= ../../../../linux
INSTALL = INSTALL_MOD_PATH=../linux/modules
CROSS = ARCH=arc CROSS_COMPILE=/usr/local/ARC/gcc/bin/arc-linux-uclibc-
PWD := $(shell pwd)
default:
$(MAKE) -C $(KERNELDIR) $(CROSS) M=$(PWD) modules
install:
$(MAKE) -C $(KERNELDIR) $(CROSS) $(INSTALL) M=$(PWD) modules_install
endif
clean:
rm -rf *.o *~ core .depend .*.cmd *.ko *.mod.c .tmp_versions Module.symvers modules.order
rm -rf $(COMMON_DIR)/.*.cmd $(COMMON_DIR)/.tmp_versions $(COMMON_DIR)/*.o
depend .depend dep:
$(CC) $(CFLAGS) -M *.c > .depend
ifeq (.depend,$(wildcard .depend))
include .depend
endif

View File

@ -0,0 +1,39 @@
/**
* Copyright (c) 2011-2013 Quantenna Communications, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
**/
#ifndef __QDPC_PFDEP_H__
#define __QDPC_PFDEP_H__
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
#define IOREMAP ioremap_nocache
#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) */
#define IOREMAP ioremap
#endif
#define qdpc_platform_init() 0
#define qdpc_platform_exit() do { } while(0)
#define qdpc_platform_xfer(dst, src, len) memcpy_toio(dst, src, len)
#define SRAM_TEXT __sram_text
#define SRAM_DATA __sram_data
#endif /* __QDPC_PFDEP_H__ */

View File

@ -0,0 +1,373 @@
/**
* Copyright (c) 2011-2013 Quantenna Communications, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
**/
#ifndef __QDPC_CONFIG_H__
#define __QDPC_CONFIG_H__
/* Global debug log messsage function mask definition of the moudle pcie */
#define QDRV_LF_TRACE 0x00000001
#define QDRV_LF_WARN 0x00000002
#define QDRV_LF_ERROR 0x00000004
#define QDRV_LF_PKT_RX 0x00000008
#define QDRV_LF_PKT_TX 0x00000010
#define QDRV_LF_ALL 0x0000001F
#define DBG_LM DBG_LM_QPCIE
#define DBGFMT "%s-%d: "
#define DBGARG __func__, __LINE__
#define DBGPRINTF(ll, lf, fmt, ...) \
do { \
if(printk_ratelimit()) { \
printk(DBGFMT fmt, DBGARG, ##__VA_ARGS__); \
} \
} while(0)
/* Configuration parameters for desc ring */
#define QDPC_PCIE_AGGRESSIVE_LEVEL 0
#define QDPC_DESC_RING_SIZE 256 /* Max. number of descriptors in a ring */
#define QDPC_STOP_QUEUE_TIMER_DELAY (20)
#if (QDPC_PCIE_AGGRESSIVE_LEVEL==1)
#define QDPC_USE_SKB_RECYCLE 1
#define QDPC_DESC_USRING_SIZE (QDPC_DESC_RING_SIZE >> 1)
#define QDPC_DESC_DSRING_SIZE (QDPC_DESC_RING_SIZE >> 1)
#define QDPC_VETH_TX_LOW_WATERMARK ((QDPC_DESC_RING_SIZE >> 3) + 8)
#define QDPC_VETH_RX_LOW_WATERMARK ((QDPC_DESC_RING_SIZE >> 1) - (QDPC_VETH_TX_LOW_WATERMARK) + 16)
#else
#define QDPC_DESC_USRING_SIZE (QDPC_DESC_RING_SIZE >> 1)
#define QDPC_DESC_DSRING_SIZE (QDPC_DESC_RING_SIZE >> 1)
#define QDPC_VETH_TX_LOW_WATERMARK ((QDPC_DESC_RING_SIZE >> 2) + 8)
#define QDPC_VETH_RX_LOW_WATERMARK (QDPC_DESC_RING_SIZE >> 1)
#endif
#define QDPC_SHARED_MEM_SIZE (sizeof(qdpc_epbuf_t))
#define QDPC_DMA_ALIGN 8
#define QDPC_MIN_MTU 60
#define QDPC_MAX_MTU 1518
#define QDPC_MIN_FRAMESZ 64
#define QDPC_DMA_MAXBUF 1600
#define QDPC_DMA_MINBUF 64
#define QDPC_MAX_FRAMESZ QDPC_DMA_MAXBUF
#define QDPC_MAX_JABBER ((QDPC_MAX_FRAMESZ) + 128)
#define QDPC_MAC_ADDR_SIZE 6
#define QDPC_PKT_DATA_OFFSET 40 /* NET_IP_ALIGN+NET_SKB_PAD */
#define QDPC_PKT_DATA_INTF_GAP 64 /* Inter-Frame Gap */
#define QDPC_PKT_HEADROOM (NET_IP_ALIGN + NET_SKB_PAD)
#define QDPC_H2EP_INTERRUPT_BIT BIT(31)
#define QDPC_H2EP_INTERRUPT_MASK BIT(31)
#define QDPC_EP_TXDONE BIT(1)
#define QDPC_EP_RXDONE BIT(2)
#define QDPC_EP_STOPQ BIT(3)
#define QDPC_HOST_TXREADY BIT(1)
#define QDPC_HOST_ENABLEDMA BIT(2)
#define QDPC_HOST_TXDONE BIT(3)
#define QDPC_HOST_START_RX BIT(4)
#define QDPC_DMA_OWN BIT(31)
#define QDPC_DMA_LAST_DESC BIT(26)
#define QDPC_DMA_TX_LASTSEG BIT(30)
#define QDPC_DMA_TX_FIRSTSEG BIT(29)
#define QDPC_DMA_TX_NOCRC BIT(28)
#define QDPC_DMA_RX_LASTSEG BIT(29)
#define QDPC_DMA_RX_FIRSTSEG BIT(30)
#define QDPC_DMA_MASK_BITS(nbits) ((nbits) ? (BIT(nbits) - 1) : 0)
#define QDPC_DMA_LEN_MASK (QDPC_DMA_MASK_BITS(12))
#define QDPC_DMA_RXLEN(x) ((x) & QDPC_DMA_LEN_MASK)
#define QDPC_DMA_OWNED(x) ((x) & QDPC_DMA_OWN)
#define QDPC_DMA_SINGLE_TXBUFFER (QDPC_DMA_TX_LASTSEG | QDPC_DMA_TX_FIRSTSEG)
#define QDPC_DMA_SINGLE_RXBUFFER (QDPC_DMA_RX_LASTSEG | QDPC_DMA_RX_FIRSTSEG)
#define QDPC_DMA_SINGLE_BUFFER(x) (((x) & QDPC_DMA_SINGLE_RXBUFFER) == QDPC_DMA_SINGLE_RXBUFFER)
#define QDPC_DMA_TX_DONE BIT(31)
/*
* Using Type/Length field for checking if data packet or
* netlink packet(call_qcsapi remote interface).
* Using 0x0601 as netlink packet type and MAC magic number(Quantenna OUI)
* to distinguish netlink packet
*/
#define QDPC_APP_NETLINK_TYPE 0x0601
#define QDPC_NETLINK_DST_MAGIC "\x00\x26\x86\x00\x00\x00"
#define QDPC_NETLINK_SRC_MAGIC "\x00\x26\x86\x00\x00\x00"
typedef struct qdpc_cmd_hdr {
uint8_t dst_magic[ETH_ALEN];
uint8_t src_magic[ETH_ALEN];
__be16 type;
__be16 len;
}qdpc_cmd_hdr_t;
/* State of the driver. This is used to syncronize with target during init. */
enum qdpc_drv_state
{
QDPC_NO_INIT, /* Uninitialized */
QDPC_INIT, /* Init */
QDPC_CONN_EST, /* In sync state */
QDPC_EXIT
};
struct qdpc_init_info {
uint32_t handshake;
uint32_t remote_mem;
uint32_t remote_memsz;
uint8_t mac_addr[QDPC_MAC_ADDR_SIZE]; /* MAC of Ruby board */
};
typedef struct qdpc_init_info qdpc_init_info_t;
/*
* Use this as the start to specify the structure ofthe shared memory area with the EP.
*
* The area is mapped as both inbound and outbound
* On the EP is starts at 0xC200 0000 as before
*
*
* The EMAC0 DMA will read from the descriptor block below across the PCIe bus
* The 4Mb shared memory buffer on the EP is no longer necessary
*
* Ignore the work OC has done
*
* For RX: The RX DMA descriptors map to the top of the buffer as shown
* and continues to use the OC buffer scheme for now.
* Once the TX buffer descriptor work is completed, this will be removed and
* a similar DMA buffer scheme like the TX one will be used as its replacement
* For TX: On X86 memcpy the skb data to the TX data area and update the descriptor
*/
/* Hardware DMA descriptor
* This is the hardware format used by the Arasan EMAC
*/
typedef struct qdpc_dmadesc {
uint32_t dma_status;
uint32_t dma_control;
uint32_t dma_data;
uint32_t dma_ptr;
} __attribute__ ((packed)) qdpc_dmadesc_t;
/* DMA data area */
typedef struct qdpc_dmadata {
uint8_t dma_data[QDPC_DMA_MAXBUF];
} __attribute__ ((packed)) qdpc_dmadata_t;
#include "queue.h"
/*
* TX software descriptor
* Must be aligned on a 32bit boundary
*/
struct qdpc_desc {
/* hardware DMA descriptor */
qdpc_dmadesc_t *dd_hwdesc;
/* Metadata pointer used for things like the skb associated with the buffer */
void *dd_metadata;
void *dd_vaddr;
uint32_t dd_paddr;
size_t dd_size;
uint32_t dd_flags;
uint64_t dd_qtime;
/* Linked list entry */
STAILQ_ENTRY(qdpc_desc) dd_entry;
} __attribute__ ((aligned (8)));
typedef struct qdpc_desc qdpc_desc_t;
/* Packet DMA ring */
typedef struct qdpc_pdring {
/* Size of ring */
uint32_t pd_ringsize;
/*
* Number of free descriptors
*/
uint32_t pd_nfree;
uint32_t pd_npending;
uint32_t pd_tx_basereg;
uint32_t pd_rx_basereg;
uint32_t pd_highwatermark;
uint32_t pd_lowatermark;
/* Location of the next free descriptor
* Initialize to the first descriptor in the ring.
* It must increase in one direction only and wrap around when the end of the ring is reached.
* Increase by 1 each time a pkt is queued
*/
qdpc_desc_t *pd_nextdesc;
qdpc_desc_t *pd_lastdesc;
qdpc_desc_t *pd_firstdesc;
/* Start of HW descriptor block */
qdpc_dmadesc_t *pd_hwdesc;
/* Bus Address of HW descriptor block */
uint32_t pd_src_busaddr;
uint32_t pd_dst_busaddr;
void * pd_handle;
/* txq spinlock */
spinlock_t pd_lock; /* Packet enqueue/dequeue lock */
/* Status flags */
uint32_t pd_f_dma_active:1; /* DMA fetch in progress */
/* Pending list of buffers owned by DMA hardware */
STAILQ_HEAD(qdpc_pdpend, qdpc_desc) pd_pending;
/* Descriptor pool */
qdpc_desc_t pd_desc[QDPC_DESC_RING_SIZE];
uint8_t *src_macaddr;
} qdpc_pdring_t;
typedef struct qdpc_pktring {
qdpc_pdring_t pkt_usq;
qdpc_pdring_t pkt_dsq;
} qdpc_pktring_t;
#define QDPC_PCIE_BDA_VERSION 0x1000
#define QDPC_BDA_PCIE_INIT 0x01
#define QDPC_BDA_PCIE_RDY 0x02
#define QDPC_BDA_FW_LOAD_RDY 0x03
#define QDPC_BDA_FW_LOAD_DONE 0x04
#define QDPC_BDA_FW_START 0x05
#define QDPC_BDA_FW_RUN 0x06
#define QDPC_BDA_FW_HOST_RDY 0x07
#define QDPC_BDA_FW_TARGET_RDY 0x11
#define QDPC_BDA_FW_TARGET_BOOT 0x12
#define QDPC_BDA_FW_FLASH_BOOT 0x13
#define QDPC_BDA_FW_HOST_LOAD 0x08
#define QDPC_BDA_FW_BLOCK_DONE 0x09
#define QDPC_BDA_FW_BLOCK_RDY 0x0A
#define QDPC_BDA_FW_EP_RDY 0x0B
#define QDPC_BDA_FW_BLOCK_END 0x0C
#define QDPC_BDA_FW_CONFIG 0x0D
#define QDPC_BDA_FW_RUNNING 0x0E
#define QDPC_BDA_PCIE_FAIL 0x82
#define QDPC_BDA_FW_LOAD_FAIL 0x85
#define PCIE_BDA_RCMODE BIT(1)
#define PCIE_BDA_MSI BIT(2)
#define PCIE_BDA_BAR64 BIT(3)
#define PCIE_BDA_FLASH_PRESENT BIT(4) /* Tell the Host if EP have flash contain firmware */
#define PCIE_BDA_FLASH_BOOT BIT(5) /* Tell TARGET to boot from flash */
#define PCIE_BDA_TARGET_FBOOT_ERR BIT(8) /* TARGET flash boot failed */
#define PCIE_BDA_TARGET_FWLOAD_ERR BIT(9) /* TARGET firmware load failed */
#define PCIE_BDA_HOST_NOFW_ERR BIT(12) /* Host not find any firmware */
#define PCIE_BDA_HOST_MEMALLOC_ERR BIT(13) /* Host malloc firmware download memory block failed */
#define PCIE_BDA_HOST_MEMMAP_ERR BIT(14) /* Host pci map download memory block failed */
#define PCIE_BDA_VER(x) (((x) >> 4) & 0xFF)
#define PCIE_BDA_ERROR_MASK 0xFF00 /* take the second 8 bits as error flag */
#define PCIE_BDA_NAMELEN 32
#define QDPC_PCI_ENDIAN_DETECT_DATA 0x12345678
#define QDPC_PCI_ENDIAN_REVERSE_DATA 0x78563412
#define QDPC_PCI_ENDIAN_VALID_STATUS 0x3c3c3c3c
#define QDPC_PCI_ENDIAN_INVALID_STATUS 0
#define QDPC_PCI_LITTLE_ENDIAN 0
#define QDPC_PCI_BIG_ENDIAN 0xffffffff
#define QDPC_SCHED_TIMEOUT (HZ / 20)
typedef struct qdpc_pcie_board_cfg {
int bc_board_id;
char *bc_name; /* optional name of cfg */
int bc_ddr_type; /* ID */
int bc_ddr_speed; /* speed in MHz */
int bc_ddr_size; /* in bytes */
int bc_emac0; /* in use? */
int bc_emac1; /* in use? */
int bc_phy0_addr; /* address */
int bc_phy1_addr; /* address */
int bc_spi1; /* in use? */
int bc_wifi_hw; /* WiFi hardware type */
int bc_uart1; /* in use? */
int bc_pcie; /* in use? */
int bc_rgmii_timing; /* special timing value for RGMII */
} __attribute__ ((packed)) qdpc_pcie_board_cfg_t;
/* There is a copy named ruby_pcie_bda_t in ruby_pcie_bda.h they must be the same */
typedef struct qdpc_pcie_bda {
uint16_t bda_len; /* Size of BDA block */
uint16_t bda_version; /* BDA version */
uint32_t bda_bootstate; /* Boot state of device */
uint32_t bda_dma_mask; /* Number of addressable DMA bits */
uint32_t bda_dma_offset; /* HW specific offset for DMA engine */
uint32_t bda_flags;
uint32_t bda_img; /* Current load image block */
uint32_t bda_img_size; /* Current load image block size */
uint32_t bda_ep2h_irqstatus; /* Added here to allow boot loader to use irqs if desired */
uint32_t bda_h2ep_irqstatus; /* Added here to allow boot loader to use irqs if desired */
uint32_t bda_msi_addr;
qdpc_pcie_board_cfg_t bda_boardcfg;
uint32_t bda_flashsz;
char bda_boardname[PCIE_BDA_NAMELEN];
/* Warning: these two fields are re-used at run time to store the host MAC address. */
uint32_t bda_pci_pre_status; /* PCI endian check previous status */
uint32_t bda_pci_endian; /* Check pci memory endian format */
uint32_t bda_pci_post_status; /* PCI endian check post status */
int32_t bda_h2ep_txd_budget; /* txdone replenish budget for ep */
int32_t bda_ep2h_txd_budget; /* txdone replenish budget for host */
} __attribute__ ((packed)) qdpc_pcie_bda_t;
typedef struct qdpc_epshmem_hdr {
uint32_t eps_mapsize; /* Iomap size for PCIe hardware */
uint32_t eps_ver; /* Header version */
uint32_t eps_size; /* Size of shared memory area */
uint32_t eps_dma_offset; /* HW dependent DMA offset */
uint32_t eps_dsdma_desc; /* Downstream Descriptor offset */
uint32_t eps_usdma_desc; /* Upstream Descriptor offset */
uint32_t eps_dsdma_ndesc; /* Number of downstream descriptros */
uint32_t eps_usdma_ndesc; /* Number of upstream descriptros */
uint32_t eps_align;
uint32_t eps_maxbuf;
uint32_t eps_minbuf;
} __attribute__ ((packed)) qdpc_epshmem_hdr_t;
#endif /* __QDPC_CONFIG_H__ */

View File

@ -0,0 +1,54 @@
/**
* Copyright (c) 2011-2013 Quantenna Communications, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
**/
#ifndef __QDPC_DEBUG_H__
#define __QDPC_DEBUG_H__
/* Debug macros */
#define SUCCESS 0
#define FAILURE -1
#ifdef DEBUG
#define PRINT_DBG(format, ...) printk(KERN_DEBUG format, ##__VA_ARGS__)
#else
#define PRINT_DBG(format, ...) do { } while(0);
#endif
#define PRINT_ERROR(format, ...) printk(KERN_ERR format, ##__VA_ARGS__)
#define PRINT_INFO(format, ...) printk(KERN_INFO format, ##__VA_ARGS__)
#ifdef DEBUG
#define qdpc_print_dump(str_, buf_, len_) \
{ \
u32 i = 0; \
printk("%s\n", str_); \
printk("0x%04X : ", i*8); \
for (i=0; i < (u32)(len_); i++) { \
if (i && ((i%8) == 0)) { \
printk( "%s", "\n"); \
printk("0x%04X : ", (i));\
} \
printk("%02x ", (buf_)[i]); \
} \
printk("\n%s\n", str_); \
}
#else
#define qdpc_print_dump(str_, buf_, len_)
#endif
#endif

View File

@ -0,0 +1,68 @@
/**
* Copyright (c) 2011-2013 Quantenna Communications, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
**/
#ifndef __QDPC_RUBY_H__
#define __QDPC_RUBY_H__
#define QDPC_SYSCTL_INTREG 0x2c
#define QDPC_SYSCTL_INTMASK 0x30
/* DMA Base Address */
#define QDPC_DMA_BASE_ADDR 0xEA000000
/* SRAM/DDR2 Base Address */
#define QDPC_SRAM_BASE_ADDR 0x80000000
#define QDPC_DDR2_BASE_ADDR 0x88000000
#define QDPC_PCIE_BASE_ADDR 0xc2000000
#define QDPC_SYSCTL_REG_BASE 0xe0000000
#define QDPC_SYSCTL_REG_INTSTS 0x2c
#define QDPC_SYSCTL_REG_INTMSK 0x30
/* ATU register offsets in the configuration space */
/* View port register */
#define QDPC_ATU_VIEWPORT_REG 0x900
/* CTRL1 register */
#define QDPC_ATU_CTRL1_REG 0x904
/* CTRL2 register */
#define QDPC_ATU_CTRL2_REG 0x908
/* LBAR register */
#define QDPC_ATU_LBAR_REG 0x90c
/* UBAR register */
#define QDPC_ATU_UBAR_REG 0x910
/* LAR register */
#define QDPC_ATU_LAR_REG 0x914
/* LTAR register */
#define QDPC_ATU_LTAR_REG 0x918
/* UTAR register */
#define QDPC_ATU_UTAR_REG 0x91c
#define QDPC_ATU_OB_REGION 0x00000000
#define QDPC_ATU_IB_REGION 0x80000001
#define QDPC_ATU_EN_OB_REGION 0x80000000
#endif

View File

@ -0,0 +1,561 @@
/*
* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: queue.h 1441 2006-02-06 16:03:21Z mrenzmann $
*/
#ifndef _SYS_QUEUE_H_
#define _SYS_QUEUE_H_
/*
* This file defines four types of data structures: singly-linked lists,
* singly-linked tail queues, lists and tail queues.
*
* A singly-linked list is headed by a single forward pointer. The elements
* are singly linked for minimum space and pointer manipulation overhead at
* the expense of O(n) removal for arbitrary elements. New elements can be
* added to the list after an existing element or at the head of the list.
* Elements being removed from the head of the list should use the explicit
* macro for this purpose for optimum efficiency. A singly-linked list may
* only be traversed in the forward direction. Singly-linked lists are ideal
* for applications with large datasets and few or no removals or for
* implementing a LIFO queue.
*
* A singly-linked tail queue is headed by a pair of pointers, one to the
* head of the list and the other to the tail of the list. The elements are
* singly linked for minimum space and pointer manipulation overhead at the
* expense of O(n) removal for arbitrary elements. New elements can be added
* to the list after an existing element, at the head of the list, or at the
* end of the list. Elements being removed from the head of the tail queue
* should use the explicit macro for this purpose for optimum efficiency.
* A singly-linked tail queue may only be traversed in the forward direction.
* Singly-linked tail queues are ideal for applications with large datasets
* and few or no removals or for implementing a FIFO queue.
*
* A list is headed by a single forward pointer (or an array of forward
* pointers for a hash table header). The elements are doubly linked
* so that an arbitrary element can be removed without a need to
* traverse the list. New elements can be added to the list before
* or after an existing element or at the head of the list. A list
* may only be traversed in the forward direction.
*
* A tail queue is headed by a pair of pointers, one to the head of the
* list and the other to the tail of the list. The elements are doubly
* linked so that an arbitrary element can be removed without a need to
* traverse the list. New elements can be added to the list before or
* after an existing element, at the head of the list, or at the end of
* the list. A tail queue may be traversed in either direction.
*
* For details on the use of these macros, see the queue(3) manual page.
*
*
* SLIST LIST STAILQ TAILQ
* _HEAD + + + +
* _HEAD_INITIALIZER + + + +
* _ENTRY + + + +
* _INIT + + + +
* _EMPTY + + + +
* _FIRST + + + +
* _NEXT + + + +
* _PREV - - - +
* _LAST - - + +
* _FOREACH + + + +
* _FOREACH_SAFE + + + +
* _FOREACH_REVERSE - - - +
* _FOREACH_REVERSE_SAFE - - - +
* _INSERT_HEAD + + + +
* _INSERT_BEFORE - + - +
* _INSERT_AFTER + + + +
* _INSERT_TAIL - - + +
* _CONCAT - - + +
* _REMOVE_HEAD + - + -
* _REMOVE + + + +
*
*/
#define QUEUE_MACRO_DEBUG 0
#if QUEUE_MACRO_DEBUG
/* Store the last 2 places the queue element or head was altered */
struct qm_trace {
char *lastfile;
int lastline;
char *prevfile;
int prevline;
};
#define TRACEBUF struct qm_trace trace;
#define TRASHIT(x) do {(x) = (void *)-1;} while (0)
#define QMD_TRACE_HEAD(head) do { \
(head)->trace.prevline = (head)->trace.lastline; \
(head)->trace.prevfile = (head)->trace.lastfile; \
(head)->trace.lastline = __LINE__; \
(head)->trace.lastfile = __FILE__; \
} while (0)
#define QMD_TRACE_ELEM(elem) do { \
(elem)->trace.prevline = (elem)->trace.lastline; \
(elem)->trace.prevfile = (elem)->trace.lastfile; \
(elem)->trace.lastline = __LINE__; \
(elem)->trace.lastfile = __FILE__; \
} while (0)
#else
#define QMD_TRACE_ELEM(elem)
#define QMD_TRACE_HEAD(head)
#define TRACEBUF
#define TRASHIT(x)
#endif /* QUEUE_MACRO_DEBUG */
/*
* Singly-linked List declarations.
*/
#define SLIST_HEAD(name, type) \
struct name { \
struct type *slh_first; /* first element */ \
}
#define SLIST_HEAD_INITIALIZER(head) \
{ NULL }
#define SLIST_ENTRY(type) \
struct { \
struct type *sle_next; /* next element */ \
}
/*
* Singly-linked List functions.
*/
#define SLIST_EMPTY(head) ((head)->slh_first == NULL)
#define SLIST_FIRST(head) ((head)->slh_first)
#define SLIST_FOREACH(var, head, field) \
for ((var) = SLIST_FIRST((head)); \
(var); \
(var) = SLIST_NEXT((var), field))
#define SLIST_FOREACH_SAFE(var, head, field, tvar) \
for ((var) = SLIST_FIRST((head)); \
(var) && ((tvar) = SLIST_NEXT((var), field), 1); \
(var) = (tvar))
#define SLIST_FOREACH_PREVPTR(var, varp, head, field) \
for ((varp) = &SLIST_FIRST((head)); \
((var) = *(varp)) != NULL; \
(varp) = &SLIST_NEXT((var), field))
#define SLIST_INIT(head) do { \
SLIST_FIRST((head)) = NULL; \
} while (0)
#define SLIST_INSERT_AFTER(slistelm, elm, field) do { \
SLIST_NEXT((elm), field) = SLIST_NEXT((slistelm), field); \
SLIST_NEXT((slistelm), field) = (elm); \
} while (0)
#define SLIST_INSERT_HEAD(head, elm, field) do { \
SLIST_NEXT((elm), field) = SLIST_FIRST((head)); \
SLIST_FIRST((head)) = (elm); \
} while (0)
#define SLIST_NEXT(elm, field) ((elm)->field.sle_next)
#define SLIST_REMOVE(head, elm, type, field) do { \
if (SLIST_FIRST((head)) == (elm)) { \
SLIST_REMOVE_HEAD((head), field); \
} \
else { \
struct type *curelm = SLIST_FIRST((head)); \
while (SLIST_NEXT(curelm, field) != (elm)) \
curelm = SLIST_NEXT(curelm, field); \
SLIST_NEXT(curelm, field) = \
SLIST_NEXT(SLIST_NEXT(curelm, field), field); \
} \
} while (0)
#define SLIST_REMOVE_HEAD(head, field) do { \
SLIST_FIRST((head)) = SLIST_NEXT(SLIST_FIRST((head)), field); \
} while (0)
/*
* Singly-linked Tail queue declarations.
*/
#define STAILQ_HEAD(name, type) \
struct name { \
struct type *stqh_first;/* first element */ \
struct type **stqh_last;/* addr of last next element */ \
}
#define STAILQ_HEAD_INITIALIZER(head) \
{ NULL, &(head).stqh_first }
#define STAILQ_ENTRY(type) \
struct { \
struct type *stqe_next; /* next element */ \
}
/*
* Singly-linked Tail queue functions.
*/
#define STAILQ_CONCAT(head1, head2) do { \
if (!STAILQ_EMPTY((head2))) { \
*(head1)->stqh_last = (head2)->stqh_first; \
(head1)->stqh_last = (head2)->stqh_last; \
STAILQ_INIT((head2)); \
} \
} while (0)
#define STAILQ_EMPTY(head) ((head)->stqh_first == NULL)
#define STAILQ_FIRST(head) ((head)->stqh_first)
#define STAILQ_FOREACH(var, head, field) \
for((var) = STAILQ_FIRST((head)); \
(var); \
(var) = STAILQ_NEXT((var), field))
#define STAILQ_FOREACH_SAFE(var, head, field, tvar) \
for ((var) = STAILQ_FIRST((head)); \
(var) && ((tvar) = STAILQ_NEXT((var), field), 1); \
(var) = (tvar))
#define STAILQ_INIT(head) do { \
STAILQ_FIRST((head)) = NULL; \
(head)->stqh_last = &STAILQ_FIRST((head)); \
} while (0)
#define STAILQ_INSERT_AFTER(head, tqelm, elm, field) do { \
if ((STAILQ_NEXT((elm), field) = STAILQ_NEXT((tqelm), field)) == NULL)\
(head)->stqh_last = &STAILQ_NEXT((elm), field); \
STAILQ_NEXT((tqelm), field) = (elm); \
} while (0)
#define STAILQ_INSERT_HEAD(head, elm, field) do { \
if ((STAILQ_NEXT((elm), field) = STAILQ_FIRST((head))) == NULL) \
(head)->stqh_last = &STAILQ_NEXT((elm), field); \
STAILQ_FIRST((head)) = (elm); \
} while (0)
#define STAILQ_INSERT_TAIL(head, elm, field) do { \
STAILQ_NEXT((elm), field) = NULL; \
*(head)->stqh_last = (elm); \
(head)->stqh_last = &STAILQ_NEXT((elm), field); \
} while (0)
#define STAILQ_LAST(head, type, field) \
(STAILQ_EMPTY((head)) ? \
NULL : \
((struct type *) \
((char *)((head)->stqh_last) - __offsetof(struct type, field))))
#define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next)
#define STAILQ_REMOVE(head, elm, type, field) do { \
if (STAILQ_FIRST((head)) == (elm)) { \
STAILQ_REMOVE_HEAD((head), field); \
} \
else { \
struct type *curelm = STAILQ_FIRST((head)); \
while (STAILQ_NEXT(curelm, field) != (elm)) \
curelm = STAILQ_NEXT(curelm, field); \
if ((STAILQ_NEXT(curelm, field) = \
STAILQ_NEXT(STAILQ_NEXT(curelm, field), field)) == NULL)\
(head)->stqh_last = &STAILQ_NEXT((curelm), field);\
} \
} while (0)
#define STAILQ_REMOVE_AFTER(head, elm, field) do { \
if (STAILQ_NEXT(elm, field)) { \
if ((STAILQ_NEXT(elm, field) = \
STAILQ_NEXT(STAILQ_NEXT(elm, field), field)) == NULL)\
(head)->stqh_last = &STAILQ_NEXT((elm), field); \
} \
} while (0)
#define STAILQ_REMOVE_HEAD(head, field) do { \
if ((STAILQ_FIRST((head)) = \
STAILQ_NEXT(STAILQ_FIRST((head)), field)) == NULL) \
(head)->stqh_last = &STAILQ_FIRST((head)); \
} while (0)
#define STAILQ_REMOVE_HEAD_UNTIL(head, elm, field) do { \
if ((STAILQ_FIRST((head)) = STAILQ_NEXT((elm), field)) == NULL) \
(head)->stqh_last = &STAILQ_FIRST((head)); \
} while (0)
/*
* List declarations.
*/
#define ATH_LIST_HEAD(name, type) \
struct name { \
struct type *lh_first; /* first element */ \
}
#define LIST_HEAD_INITIALIZER(head) \
{ NULL }
#define LIST_ENTRY(type) \
struct { \
struct type *le_next; /* next element */ \
struct type **le_prev; /* address of previous next element */ \
}
/*
* List functions.
*/
#define LIST_EMPTY(head) ((head)->lh_first == NULL)
#define LIST_FIRST(head) ((head)->lh_first)
#define LIST_FOREACH(var, head, field) \
for ((var) = LIST_FIRST((head)); \
(var); \
(var) = LIST_NEXT((var), field))
#define LIST_FOREACH_SAFE(var, head, field, tvar) \
for ((var) = LIST_FIRST((head)); \
(var) && ((tvar) = LIST_NEXT((var), field), 1); \
(var) = (tvar))
#define LIST_INIT(head) do { \
LIST_FIRST((head)) = NULL; \
} while (0)
#define LIST_INSERT_AFTER(listelm, elm, field) do { \
if ((LIST_NEXT((elm), field) = LIST_NEXT((listelm), field)) != NULL)\
LIST_NEXT((listelm), field)->field.le_prev = \
&LIST_NEXT((elm), field); \
LIST_NEXT((listelm), field) = (elm); \
(elm)->field.le_prev = &LIST_NEXT((listelm), field); \
} while (0)
#define LIST_INSERT_BEFORE(listelm, elm, field) do { \
(elm)->field.le_prev = (listelm)->field.le_prev; \
LIST_NEXT((elm), field) = (listelm); \
*(listelm)->field.le_prev = (elm); \
(listelm)->field.le_prev = &LIST_NEXT((elm), field); \
} while (0)
#define LIST_INSERT_HEAD(head, elm, field) do { \
if ((LIST_NEXT((elm), field) = LIST_FIRST((head))) != NULL) \
LIST_FIRST((head))->field.le_prev = &LIST_NEXT((elm), field);\
LIST_FIRST((head)) = (elm); \
(elm)->field.le_prev = &LIST_FIRST((head)); \
} while (0)
#define LIST_NEXT(elm, field) ((elm)->field.le_next)
#define LIST_REMOVE(elm, field) do { \
if (LIST_NEXT((elm), field) != NULL) \
LIST_NEXT((elm), field)->field.le_prev = \
(elm)->field.le_prev; \
*(elm)->field.le_prev = LIST_NEXT((elm), field); \
} while (0)
/*
* Tail queue declarations.
*/
#define TAILQ_HEAD(name, type) \
struct name { \
struct type *tqh_first; /* first element */ \
struct type **tqh_last; /* addr of last next element */ \
TRACEBUF \
}
#define TAILQ_HEAD_INITIALIZER(head) \
{ NULL, &(head).tqh_first }
#if (!defined(TAILQ_ENTRY))
#define TAILQ_ENTRY(type) \
struct { \
struct type *tqe_next; /* next element */ \
struct type **tqe_prev; /* address of previous next element */ \
TRACEBUF \
}
#endif
/*
* Tail queue functions.
*/
#define TAILQ_CONCAT(head1, head2, field) do { \
if (!TAILQ_EMPTY(head2)) { \
*(head1)->tqh_last = (head2)->tqh_first; \
(head2)->tqh_first->field.tqe_prev = (head1)->tqh_last; \
(head1)->tqh_last = (head2)->tqh_last; \
TAILQ_INIT((head2)); \
QMD_TRACE_HEAD(head); \
QMD_TRACE_HEAD(head2); \
} \
} while (0)
#define TAILQ_EMPTY(head) ((head)->tqh_first == NULL)
#define TAILQ_FIRST(head) ((head)->tqh_first)
#define TAILQ_FOREACH(var, head, field) \
for ((var) = TAILQ_FIRST((head)); \
(var); \
(var) = TAILQ_NEXT((var), field))
#define TAILQ_FOREACH_SAFE(var, head, field, tvar) \
for ((var) = TAILQ_FIRST((head)); \
(var) && ((tvar) = TAILQ_NEXT((var), field), 1); \
(var) = (tvar))
#define TAILQ_FOREACH_REVERSE(var, head, headname, field) \
for ((var) = TAILQ_LAST((head), headname); \
(var); \
(var) = TAILQ_PREV((var), headname, field))
#define TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, tvar) \
for ((var) = TAILQ_LAST((head), headname); \
(var) && ((tvar) = TAILQ_PREV((var), headname, field), 1); \
(var) = (tvar))
#define TAILQ_INIT(head) do { \
TAILQ_FIRST((head)) = NULL; \
(head)->tqh_last = &TAILQ_FIRST((head)); \
QMD_TRACE_HEAD(head); \
} while (0)
#define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
if ((TAILQ_NEXT((elm), field) = TAILQ_NEXT((listelm), field)) != NULL)\
TAILQ_NEXT((elm), field)->field.tqe_prev = \
&TAILQ_NEXT((elm), field); \
else { \
(head)->tqh_last = &TAILQ_NEXT((elm), field); \
QMD_TRACE_HEAD(head); \
} \
TAILQ_NEXT((listelm), field) = (elm); \
(elm)->field.tqe_prev = &TAILQ_NEXT((listelm), field); \
QMD_TRACE_ELEM(&(elm)->field); \
QMD_TRACE_ELEM(&listelm->field); \
} while (0)
#define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
(elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
TAILQ_NEXT((elm), field) = (listelm); \
*(listelm)->field.tqe_prev = (elm); \
(listelm)->field.tqe_prev = &TAILQ_NEXT((elm), field); \
QMD_TRACE_ELEM(&(elm)->field); \
QMD_TRACE_ELEM(&listelm->field); \
} while (0)
#define TAILQ_INSERT_HEAD(head, elm, field) do { \
if ((TAILQ_NEXT((elm), field) = TAILQ_FIRST((head))) != NULL) \
TAILQ_FIRST((head))->field.tqe_prev = \
&TAILQ_NEXT((elm), field); \
else \
(head)->tqh_last = &TAILQ_NEXT((elm), field); \
TAILQ_FIRST((head)) = (elm); \
(elm)->field.tqe_prev = &TAILQ_FIRST((head)); \
QMD_TRACE_HEAD(head); \
QMD_TRACE_ELEM(&(elm)->field); \
} while (0)
#define TAILQ_INSERT_TAIL(head, elm, field) do { \
TAILQ_NEXT((elm), field) = NULL; \
(elm)->field.tqe_prev = (head)->tqh_last; \
*(head)->tqh_last = (elm); \
(head)->tqh_last = &TAILQ_NEXT((elm), field); \
QMD_TRACE_HEAD(head); \
QMD_TRACE_ELEM(&(elm)->field); \
} while (0)
#define TAILQ_LAST(head, headname) \
(*(((struct headname *)((head)->tqh_last))->tqh_last))
#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
#define TAILQ_PREV(elm, headname, field) \
(*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
#define TAILQ_REMOVE(head, elm, field) do { \
if ((TAILQ_NEXT((elm), field)) != NULL) \
TAILQ_NEXT((elm), field)->field.tqe_prev = \
(elm)->field.tqe_prev; \
else { \
(head)->tqh_last = (elm)->field.tqe_prev; \
QMD_TRACE_HEAD(head); \
} \
*(elm)->field.tqe_prev = TAILQ_NEXT((elm), field); \
TRASHIT((elm)->field.tqe_next); \
TRASHIT((elm)->field.tqe_prev); \
QMD_TRACE_ELEM(&(elm)->field); \
} while (0)
#ifdef _KERNEL
/*
* XXX insque() and remque() are an old way of handling certain queues.
* They bogusly assumes that all queue heads look alike.
*/
struct quehead {
struct quehead *qh_link;
struct quehead *qh_rlink;
};
#if defined(__GNUC__) || defined(__INTEL_COMPILER)
static __inline void
insque(void *a, void *b)
{
struct quehead *element = (struct quehead *)a,
*head = (struct quehead *)b;
element->qh_link = head->qh_link;
element->qh_rlink = head;
head->qh_link = element;
element->qh_link->qh_rlink = element;
}
static __inline void
remque(void *a)
{
struct quehead *element = (struct quehead *)a;
element->qh_link->qh_rlink = element->qh_rlink;
element->qh_rlink->qh_link = element->qh_link;
element->qh_rlink = 0;
}
#else /* !(__GNUC__ || __INTEL_COMPILER) */
void insque(void *a, void *b);
void remque(void *a);
#endif /* __GNUC__ || __INTEL_COMPILER */
#endif /* _KERNEL */
#endif /* !_SYS_QUEUE_H_ */

View File

@ -0,0 +1,57 @@
#
# Quantenna Communications Inc. Driver Makefile
#
# Author:
#
EXTRA_CFLAGS += -Wall \
-I../include \
-I../common \
-I../drivers/include/shared \
-I../drivers/include/kernel
EXTRA_CFLAGS += -I$(M)/pcie/include
EXTRA_CFLAGS += -mlong-calls
EXTRA_CFLAGS += -DUSE_EMAC_DMA
EXTRA_CFLAGS += -DQDPC_USE_DMA_RX_DONE_INTR
EXTRA_CFLAGS += -DQDPC_DSPIO
EXTRA_CFLAGS += -DQDPC_PHY_CROSS_MODE
#EXTRA_CFLAGS += -DCVM_HOST
#EXTRA_CFLAGS += -DDEBUG
ifeq (${PCIE_TARGET_CRUMBS},1)
EXTRA_CFLAGS += -finstrument-functions
endif
ifneq ($(KERNELRELEASE),)
qdpc-pcie-objs := qdpc_ring.o \
qdpc_emac.o \
qdpc_net.o \
qdpc_pcie.o \
qdpc_init.o
obj-m := qdpc-pcie.o
else
KERNELDIR ?= ../../../linux
INSTALL = INSTALL_MOD_PATH=../linux/modules
CROSS = ARCH=arc CROSS_COMPILE=/usr/local/ARC/gcc/bin/arc-linux-uclibc-
PWD := $(shell pwd)
default:
$(MAKE) -C $(KERNELDIR) $(CROSS) M=$(PWD) modules
install:
$(MAKE) -C $(KERNELDIR) $(CROSS) $(INSTALL) M=$(PWD) modules_install
endif
clean:
rm -rf *.o *~ core .depend .*.cmd *.ko *.mod.c .tmp_versions Module.symvers modules.order
depend .depend dep:
$(CC) $(CFLAGS) -M *.c > .depend
ifeq (.depend,$(wildcard .depend))
include .depend
endif

View File

@ -0,0 +1,405 @@
/**
* Copyright (c) 2011-2013 Quantenna Communications, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
**/
#include <linux/mm.h>
#include <linux/io.h>
#include <asm/page.h>
#include <asm/cache.h>
#include <asm/board/board_config.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/version.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/kthread.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <linux/netlink.h>
#include <common/topaz_platform.h>
#include "qdpc_config.h"
#include "qdpc_debug.h"
#include "qdpc_init.h"
#include "qdpc_emac.h"
#include "qdpc_ruby.h"
#define REG_WRITE(x,y) (writel((y),(x)))
#define REG_READ(x) (readl(x))
#define QDPC_PHY1_ADDRESS (31)
#define QDPC_PHY0_ADDRESS (24)
#define QDPC_NUM_DESC QDPC_DESC_RING_SIZE
#define QDPC_WRITE_INDEX_LEN (32 << 12)
/*
* Gather EMAC DMA rx lost statistics for netdebug function.
* Any module used for netdebug backhaul must define this function.
*/
uint32_t qtn_eth_rx_lost_get(struct net_device *dev)
{
struct qdpc_priv *priv = netdev_priv(dev);
uint32_t dsbase = priv->pktq.pkt_dsq.pd_rx_basereg;
uint32_t usbase = priv->pktq.pkt_usq.pd_rx_basereg;
uint32_t ds_lost = REG_READ(dsbase + EMAC_DMAMFC) & 0x7fffffff;
uint32_t us_lost = REG_READ(usbase + EMAC_DMAMFC) & 0x7fffffff;
return (ds_lost + us_lost);
}
EXPORT_SYMBOL(qtn_eth_rx_lost_get);
void qdpc_flush_and_inv_dcache_range(unsigned long start, unsigned long end)
{
unsigned long flags, dc_ctrl;
start &= DCACHE_LINE_MASK;
local_irq_save(flags);
/* Set the Invalidate mode to FLUSH BEFORE INV */
dc_ctrl = read_new_aux_reg(ARC_REG_DC_CTRL);
write_new_aux_reg(ARC_REG_DC_CTRL, dc_ctrl | INV_MODE_FLUSH);
/* Invoke Cache INV CMD */
while (end > start) {
write_new_aux_reg(ARC_REG_DC_IVDL, start);
start = start + ARC_DCACHE_LINE_LEN;
}
/* wait for the flush to complete, poll on the FS Bit */
while (read_new_aux_reg(ARC_REG_DC_CTRL) & DC_FLUSH_STATUS_BIT) ;
/* Switch back the DISCARD ONLY Invalidate mode */
write_new_aux_reg(ARC_REG_DC_CTRL, dc_ctrl & ~INV_MODE_FLUSH);
local_irq_restore(flags);
}
void phy_write(u32 phyaddr, u32 regn, u16 data)
{
u32 timeout = 0x10000;
/* write data */
REG_WRITE(PHY_MDIODATA, data);
/* issue write */
REG_WRITE(PHY_MDIOCTRL,
(phyaddr << 0) | (regn << 5) | (0 << 10) | (1 << 15));
/* wait for write completion */
while (REG_READ(PHY_MDIOCTRL) & (1 << 15)) {
if (timeout-- == 0) {
PRINT_ERROR("MDIO timeout\n");
break;
}
}
}
int32_t qdpc_emac_getconfig(struct qdpc_priv *priv)
{
int pcie_config = 0;
/* EMAC DMA engine assignment */
priv->pktq.pkt_dsq.pd_tx_basereg = EMAC0_ADDR;
#ifdef QDPC_PHY_CROSS_MODE
priv->pktq.pkt_dsq.pd_rx_basereg = EMAC1_ADDR;
#else
priv->pktq.pkt_dsq.pd_rx_basereg = EMAC0_ADDR;
#endif
priv->pktq.pkt_usq.pd_tx_basereg = EMAC1_ADDR;
#ifdef QDPC_PHY_CROSS_MODE
priv->pktq.pkt_usq.pd_rx_basereg = EMAC0_ADDR;
#else
priv->pktq.pkt_usq.pd_rx_basereg = EMAC1_ADDR;
#endif
priv->pktq.pkt_dsq.pd_src_busaddr =
priv->shmem_busaddr + priv->shmem->eps_dsdma_desc;
priv->pktq.pkt_usq.pd_dst_busaddr =
priv->shmem_busaddr + priv->shmem->eps_usdma_desc;
/* check for Use PHY Loopback configuration */
get_board_config(BOARD_CFG_PCIE, &pcie_config);
if ((pcie_config & PCIE_USE_PHY_LOOPBK) == PCIE_USE_PHY_LOOPBK)
{
priv->mii_pllclk = 0x8f8f8f8f;
} else {
#ifdef CVM_HOST
priv->mii_pllclk = 0x8f808f8f;
#else
priv->mii_pllclk = 0x8f808f80;
#endif
}
return 0;
}
void qdpc_emac_startdma(qdpc_pdring_t *pktq)
{
REG_WRITE(pktq->pd_rx_basereg + EMAC_DMARPD, 1);
REG_WRITE(pktq->pd_tx_basereg + EMAC_DMATPD, 1);
}
void qdpc_emac_disable(struct qdpc_priv *priv)
{
/* Disable EMAC1 TX poll demand */
REG_WRITE(priv->pktq.pkt_usq.pd_tx_basereg + EMAC_DMATAPC, 0x0);
/* Disable Interrupts */
REG_WRITE(priv->pktq.pkt_dsq.pd_rx_basereg + EMAC_DMAINTEN, ~DmaAllInts);
REG_WRITE(priv->pktq.pkt_usq.pd_rx_basereg + EMAC_DMAINTEN, ~DmaAllInts);
}
void qdpc_emac_enable(struct qdpc_priv *priv)
{
uint32_t tmp;
uint32_t dsdma_base = priv->pktq.pkt_dsq.pd_tx_basereg;
uint32_t usdma_base = priv->pktq.pkt_usq.pd_tx_basereg;
/* EMAC0 Interrupts:
* DmaRxDone
* EMAC0_ERROR (DmaRxMissedFrame | DmaNoTxDesc | DmaNoRxDesc)
* DmaMacInterrupt
* MAC Interupts :
* MacUnderrun
*/
REG_WRITE(dsdma_base + EMAC_MACINTEN, QDPC_MAC_DS_INTR);
REG_WRITE(dsdma_base + EMAC_DMAINTEN, QDPC_DMA_DS_INTR);
/* EMAC1 Interrupts:
* DmaRxDone
*/
REG_WRITE(usdma_base + EMAC_DMAINTEN, QDPC_DMA_US_INTR);
REG_WRITE(dsdma_base + EMAC_MACINTEN, QDPC_MAC_US_INTR);
REG_WRITE(dsdma_base + EMAC_DMACTRL, DmaStartTx | DmaStartRx);
REG_WRITE(usdma_base + EMAC_DMACTRL, DmaStartTx | DmaStartRx);
/* EMAC1 TX poll demand */
REG_WRITE(usdma_base + EMAC_DMATAPC, QDPC_DMA_AUTO_POLLING_CNT);
/* clear missed frames and stopped flush */
tmp = REG_READ(dsdma_base + EMAC_DMAMFC);
tmp = REG_READ(dsdma_base + EMAC_DMASFC);
tmp = REG_READ(usdma_base + EMAC_DMAMFC);
tmp = REG_READ(usdma_base + EMAC_DMASFC);
}
void qdpc_emac_phyinit(struct qdpc_priv *priv)
{
int rgmii_timing = priv->mii_pllclk;
#if defined(CONFIG_RUBY_PCIE_HAVE_PHY)
int phy_setting_mask = 0;
#endif
REG_WRITE(RUBY_SYS_CTL_MASK, RUBY_SYS_CTL_MASK_MII);
REG_WRITE(RUBY_SYS_CTL_CTRL, 0);
REG_WRITE(RUBY_SYS_CTL_MASK, RUBY_SYS_CTL_MASK_GMII0_TXCLK);
/* 2 is 1000MB */
REG_WRITE(RUBY_SYS_CTL_CTRL, RUBY_SYS_CTL_MASK_GMII0_1000M);
REG_WRITE(RUBY_SYS_CTL_GMII_CLKDLL, rgmii_timing);
REG_WRITE(RUBY_SYS_CTL_MASK, RUBY_SYS_CTL_MASK_GMII1_TXCLK);
/* 2 is 1000MB */
REG_WRITE(RUBY_SYS_CTL_CTRL,RUBY_SYS_CTL_MASK_GMII1_1000M);
REG_WRITE(RUBY_SYS_CTL_GMII_CLKDLL, rgmii_timing);
REG_WRITE(RUBY_SYS_CTL_CPU_VEC_MASK, RUBY_SYS_CTL_RESET_ENET0 | RUBY_SYS_CTL_RESET_ENET1);
REG_WRITE(RUBY_SYS_CTL_CPU_VEC, 0);
REG_WRITE(RUBY_SYS_CTL_CPU_VEC, RUBY_SYS_CTL_RESET_ENET0 | RUBY_SYS_CTL_RESET_ENET1);
#if defined(CONFIG_RUBY_PCIE_HAVE_PHY)
/* EMAC0 RGMII PLL Setup 1GB FDX loopback */
phy_write(QDPC_PHY0_ADDRESS, PHY_MODE_CTL, PHY_MODE_CTL_RESET);
udelay(140);
phy_setting_mask |= PHY_MODE_CTL_100MB |
PHY_MODE_CTL_FULL_DUPLEX | PHY_MODE_CTL_LOOPBACK;
phy_write(QDPC_PHY0_ADDRESS, PHY_MODE_CTL, phy_setting_mask);
/* EMAC1 RGMII Setup Setup 1GB FDX loopback*/
phy_write(QDPC_PHY1_ADDRESS, PHY_MODE_CTL, PHY_MODE_CTL_RESET);
udelay(140);
phy_write(QDPC_PHY1_ADDRESS, PHY_MODE_CTL, phy_setting_mask);
#endif
}
void qdpc_emac_reset(struct qdpc_priv *priv)
{
uint32_t dsbase = priv->pktq.pkt_dsq.pd_tx_basereg;
uint32_t usbase = priv->pktq.pkt_usq.pd_tx_basereg;
/* Reset EMACs */
REG_WRITE(dsbase + EMAC_DMACONFIG, DmaSoftReset);
REG_WRITE(usbase + EMAC_DMACONFIG, DmaSoftReset);
REG_WRITE(dsbase + EMAC_DMACONFIG, 0);
REG_WRITE(usbase + EMAC_DMACONFIG, 0);
/* Disable DMA */
REG_WRITE(dsbase + EMAC_DMACTRL, 0);
REG_WRITE(usbase + EMAC_DMACTRL, 0);
/* Disable Interrupts */
REG_WRITE(dsbase + EMAC_DMAINTEN, ~DmaAllInts);
REG_WRITE(usbase + EMAC_DMAINTEN, ~DmaAllInts);
}
int32_t qdpc_usdma_init(struct qdpc_priv *priv, size_t ringsize)
{
qdpc_pdring_t *usq= &priv->pktq.pkt_usq;
uint32_t regbase = usq->pd_tx_basereg;
uint32_t rx_regbase = usq->pd_rx_basereg;
/* Setup Downstream DMA - EMAC1 */
qdpc_init_txq(priv, ringsize);
/* Write start of RX ring to EMAC1 */
REG_WRITE(rx_regbase + EMAC_DMARBA, usq->pd_dst_busaddr);
/* Write start of TX ring (on host ) to EMAC1 */
REG_WRITE(regbase + EMAC_DMATBA, usq->pd_src_busaddr);
/* Setup DMA config, bring DMA controller out of reset */
REG_WRITE(regbase + EMAC_DMACONFIG, Dma64BitMode | DmaWait4Done | Dma16WordBurst);
printk("Upstream DMA: s:0x%x d:0x%x\n", usq->pd_src_busaddr, usq->pd_dst_busaddr);
return 0;
}
int32_t qdpc_dsdma_init(struct qdpc_priv *priv, size_t ringsize)
{
qdpc_pdring_t *dsq= &priv->pktq.pkt_dsq;
uint32_t regbase = dsq->pd_tx_basereg;
uint32_t rx_regbase = dsq->pd_rx_basereg;
/* Setup Downstream DMA - EMAC0 */
qdpc_init_rxq(priv, ringsize);
/* Write start of RX ring to EMAC0 */
REG_WRITE(rx_regbase + EMAC_DMARBA, dsq->pd_dst_busaddr);
/* Write start of TX ring (on host ) to EMAC0 */
REG_WRITE(regbase + EMAC_DMATBA, dsq->pd_src_busaddr);
/* Setup DMA config, bring DMA controller out of reset */
REG_WRITE(regbase + EMAC_DMACONFIG, Dma64BitMode | DmaWait4Done | Dma16WordBurst);
printk("downstream DMA: s:0x%x d:0x%x\n", dsq->pd_src_busaddr, dsq->pd_dst_busaddr);
return 0;
}
void qdpc_emac_initcommon(uint32_t regbase)
{
/* PHY mode */
REG_WRITE(regbase + EMAC_MACGCTRL, EMAC_100MBPS | EMAC_FD);
/* Max Frame Length */
REG_WRITE(regbase + EMAC_MACMFS, QDPC_MAX_FRAMESZ);
REG_WRITE(regbase + EMAC_MACTJS, QDPC_MAX_JABBER);
REG_WRITE(regbase + EMAC_MACRJS, QDPC_MAX_JABBER);
/* Flow Control Decode and Generation Disabled */
//REG_WRITE(regbase + EMAC_MACFCPTV, 0);
REG_WRITE(regbase + EMAC_MACFCCTRL, MacFlowDecodeEnable |
MacFlowGenerationEnable | MacAutoFlowGenerationEnable |
MacFlowMulticastMode | MacBlockPauseFrames);
/* !!! FIXME - whether or not we need this depends on whether
* the auto-pause generation uses it. The auto function may just
* use 0xffff val to stop sending & then 0 to restart it.
*/
REG_WRITE(regbase + EMAC_MACFCPTV, 100);
REG_WRITE(regbase + EMAC_MACFCPTVH, 200);
REG_WRITE(regbase + EMAC_MACFCPTVL, 0);
/* source mac: 00:26:86:00:00:26 */
REG_WRITE(regbase + EMAC_MACFCSAH, 0x0026);
REG_WRITE(regbase + EMAC_MACFCSAM, 0x8600);
REG_WRITE(regbase + EMAC_MACFCSAL, 0x0026);
REG_WRITE(regbase + EMAC_DMAFCT, MacFCHighThreshold | MacFCLowThreshold);
REG_WRITE(regbase + EMAC_DMATAPC, 0x00);
REG_WRITE(regbase + EMAC_MACACTRL, MacPromiscuous);
REG_WRITE(regbase + EMAC_MACTCTRL, MacTxEnable | MacTxIFG256 | MacTxDisableFCSInsertion);
}
void qdpc_emac_dsinit(struct qdpc_priv *priv)
{
uint32_t dsbase = priv->pktq.pkt_dsq.pd_tx_basereg;
/* Init Downstream DMA ring */
qdpc_dsdma_init(priv, QDPC_DESC_RING_SIZE);
/* Specific flow, buffering and timing inits */
REG_WRITE(dsbase + EMAC_MACTFIFOAF, 512 - 8);
REG_WRITE(dsbase + EMAC_MACTPST, 1518);
REG_WRITE(dsbase + EMAC_MACRPST, 0);
/* Enable DMA Rx Transfer Done interrupt mitigation control */
REG_WRITE(dsbase + EMAC_DMARDIMC, (QDPC_DMA_TX_IMC_EN |
QDPC_DMA_TX_IMC_DELAY | QDPC_DMA_TX_IMC_NFRAMES(QDPC_DMA_INT_MITIGATION_NUM)));
REG_WRITE(dsbase + EMAC_MACRCTRL, MacRxEnable | MacRxDisableFcs | MacRxStoreAndForward);
/* Flow Control Decode and Generation Disabled */
REG_WRITE(dsbase + EMAC_MACFCPTV, 0);
/* Common init */
qdpc_emac_initcommon(dsbase);
}
void qdpc_emac_usinit(struct qdpc_priv *priv)
{
uint32_t usbase = priv->pktq.pkt_usq.pd_tx_basereg;
/* Init UPstream DMA ring */
qdpc_usdma_init(priv, QDPC_DESC_USRING_SIZE);
/* Specific flow, buffering and timing inits */
REG_WRITE(usbase + EMAC_MACTFIFOAF, 512 - 8);
REG_WRITE(usbase + EMAC_MACTPST, 1518);
REG_WRITE(usbase + EMAC_MACRPST, 0);
/* Enable DMA Rx Transfer Done interrupt mitigation control */
REG_WRITE(usbase + EMAC_DMARDIMC,(QDPC_DMA_TX_IMC_EN |
QDPC_DMA_TX_IMC_DELAY | QDPC_DMA_TX_IMC_NFRAMES(QDPC_DMA_INT_MITIGATION_NUM)));
REG_WRITE(usbase + EMAC_MACRCTRL, MacRxEnable | MacRxDisableFcs | MacRxStoreAndForward);
/* Enable Flow Control Decode and Generation */
REG_WRITE(usbase + EMAC_MACFCPTV, 0);
/* Common init */
qdpc_emac_initcommon(usbase);
}
int32_t qdpc_emac_init(struct qdpc_priv *priv)
{
/* Get configration for upstream and downstream DMA engines */
qdpc_emac_getconfig(priv);
/* Setup PHYs */
qdpc_emac_phyinit(priv);
/* Reset the EMACs */
qdpc_emac_reset(priv);
/* Initialize the downstream and upstream DMA engines */
qdpc_emac_dsinit(priv);
qdpc_emac_usinit(priv);
return SUCCESS;
}
/* USE_EMAC_DMA */

View File

@ -0,0 +1,316 @@
/**
* Copyright (c) 2011-2013 Quantenna Communications, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
**/
#ifndef __QDPC_EMAC_H__
#define __QDPC_EMAC_H__
#include <asm/cache.h>
/* Register definitions */
/* Some Bit values */
#define DC_FLUSH_STATUS_BIT 0x100
#define INV_MODE_FLUSH 0x40
#define CACHE_DISABLE_BIT 0x01
#define EMAC0_ADDR (0xed000000)
#define EMAC1_ADDR (0xe8000000)
#define EMAC_DMACONFIG (0x0000)
#define EMAC_DMACTRL (0x0004)
#define EMAC_DMASTATUS (0x0008)
#define EMAC_DMAINTEN (0x000c)
#define EMAC_DMATAPC (0x0010)
#define EMAC_DMATPD (0x0014)
#define EMAC_DMARPD (0x0018)
#define EMAC_DMATBA (0x001c)
#define EMAC_DMARBA (0x0020)
#define EMAC_DMAMFC (0x0024)
#define EMAC_DMASFC (0x0028)
#define EMAC_DMARDIMC (0x002c)
#define EMAC_DMACTDP (0x0030)
#define EMAC_DMACTBP (0x0034)
#define EMAC_DMARDP (0x0038)
#define EMAC_DMACRBP (0x003c)
#define EMAC_DMAFCT (0x0044)
#define EMAC_MACGCTRL (0x0100) /* Global Control Register */
#define EMAC_MACTCTRL (0x0104) /* Transmit Control Register */
#define EMAC_MACRCTRL (0x0108) /* Receive Control Register */
#define EMAC_MACMFS (0x010C) /* Maximum Frame Size Register */
#define EMAC_MACTJS (0x0110) /* Transmit Jabber Size Register */
#define EMAC_MACRJS (0x0114) /* Receive Jabber Size Register */
#define EMAC_MACACTRL (0x0118) /* Address Control Register. */
#define EMAC_MACAH1 (0x0120) /* Address#1 High Register */
#define EMAC_MACAM1 (0x0124) /* Address#1 Med Register */
#define EMAC_MACAL1 (0x0128) /* Address#1 Low Register */
#define EMAC_MACAH2 (0x012C) /* Address#2 High Register */
#define EMAC_MACAM2 (0x0130) /* Address#2 Med Register */
#define EMAC_MACAL2 (0x0134) /* Address#2 Low Register */
#define EMAC_MACAH3 (0x0138) /* Address#3 High Register */
#define EMAC_MACAM3 (0x013C) /* Address#3 Med Register */
#define EMAC_MACAL3 (0x0140) /* Address#3 Low Register */
#define EMAC_MACAH4 (0x0144) /* Address#4 High Register */
#define EMAC_MACAM4 (0x0148) /* Address#4 Med Register */
#define EMAC_MACAL4 (0x014C) /* Address#4 Low Register */
#define EMAC_MACHT1 (0x0150) /* Hash Table#1 Register */
#define EMAC_MACHT2 (0x0154) /* Hash Table#2 Register */
#define EMAC_MACHT3 (0x0158) /* Hash Table#3 Register */
#define EMAC_MACHT4 (0x015C) /* Hash Table#4 Register */
#define EMAC_MACFCCTRL (0x0160) /* Flow-Control Control Register */
#define EMAC_MACFCPFG (0x0164) /* Flow-Control Pause Frame Generate Register */
#define EMAC_MACFCSAH (0x0168) /* Flow-Control Source Address High Register */
#define EMAC_MACFCSAM (0x016C) /* Flow-Control Source Address Med Register */
#define EMAC_MACFCSAL (0x0170) /* Flow-Control Source Address Low Register */
#define EMAC_MACFCDAH (0x0174) /* Flow-Control Destination Address High Register */
#define EMAC_MACFCDAM (0x0178) /* Flow-Control Destination Address Med Register */
#define EMAC_MACFCDAL (0x017C) /* Flow-Control Destination Address Low Register */
#define EMAC_MACFCPTV (0x0180) /* Flow-Control Pause Time Value Register */
#define EMAC_MACFCPTVH (0x0184) /* Flow-Control High Pause Time Value Register */
#define EMAC_MACFCPTVL (0x0188) /* Flow-Control Low Pause Time Value Register */
#define EMAC_MACMDIOCTRL (0x01A0) /* MDIO Control Register */
#define EMAC_MACMDIODAT (0x01A4) /* MDIO Data Register */
#define EMAC_MACRXSCCTRL (0x01A8) /* Rx. Stat Ctr Control Register */
#define EMAC_MACRXSCDH (0x01AC) /* Rx. Stat Ctr Data High Register */
#define EMAC_MACRXSCDL (0x01B0) /* Rx. Stat Ctr Data Low Register */
#define EMAC_MACTXSCCTRL (0x01B4) /* Tx. Stat Ctr Control Register */
#define EMAC_MACTXSCDH (0x01B8) /* Tx. Stat Ctr Data High Register */
#define EMAC_MACTXSCDL (0x01BC) /* Tx. Stat Ctr Data Low Register */
#define EMAC_MACTFIFOAF (0x01C0) /* Transmit FIFO Almost Full Register */
#define EMAC_MACTPST (0x01C4) /* Transmit Packet Start Threshold Register */
#define EMAC_MACRPST (0x01C8) /* Receive Packet Start Threshold Register */
#define EMAC_MACINTR (0x01E0) /* Interrupt Register */
#define EMAC_MACINTEN (0x01E4) /* Interrupt Enable Register */
#define PHY_MDIOCTRL (EMAC0_ADDR + 0x01A0) /* MDIO Control Register */
#define PHY_MDIODATA (EMAC0_ADDR + 0x01A4) /* MDIO Data Register */
/* mac global control register 0x0100 */
#define EMAC_10MBPS 0
#define EMAC_100MBPS 1
#define EMAC_1000MBPS 2
#define EMAC_HD 0
#define EMAC_FD (1<<2)
#define MAC_RX_STAT_RESET 0x08
#define MAX_TX_STAT_RESET 0x10
#define EMAC_STATS_TXDMA_STOPPED (0) /* stopped */
#define EMAC_STATS_TXDMA_FETCH0 (1) /* fetch descriptor */
#define EMAC_STATS_TXDMA_FETCH1 (3) /* fetch data buffer */
#define EMAC_STATS_TXDMA_CLOSE (4) /* close descriptor */
#define EMAC_STATS_TXDMA_SUSPEND (5) /* suspended */
#define EMAC_STATS_RXDMA_STOPPED (0) /* stopped */
#define EMAC_STATS_RXDMA_FETCH (1) /* fetch descriptor */
#define EMAC_STATS_RXDMA_WAIT0 (2) /* wait for end of receive */
#define EMAC_STATS_RXDMA_WAIT1 (3) /* wait for rx frame */
#define EMAC_STATS_RXDMA_SUSPEND (4) /* suspended */
#define EMAC_STATS_RXDMA_CLOSE (5) /* close descriptor */
#define EMAC_STATS_RXDMA_FLUSH (6) /* flush buffer */
#define EMAC_STATS_RXDMA_PUT (7) /* put buffer */
#define EMAC_STATS_RXDMA_WAIT2 (8) /* wait for status */
#define EMAC_TXENABL 1
#define EMAC_RXENABLE 1
#define EMAC_STANDFMODE (1<<3)
#define EMAC_AUTORETRY (1<<3)
#define EMAC_DMA_TXSTART 1
#define EMAC_DMA_RXSTART (1<<1)
#define EMAC_DMA_BL4 (4<<1)
#define EMAC_DMA_ARBRR (1<<15)
#define EMAC_DMA_TXWFD (1<<16)
#define EMAC_DMA_SBL (1<<17)
#define EMAC_DMA_64BDAT (1<<18)
#define MAX_TX_COUNTER (12)
#define MAX_RX_COUNTER (24)
#define TX_STAT_READ (BIT(15))
#define RX_STAT_READ (BIT(15))
#define TXDESC_INTONCOMP (BIT(31))
#define TXDESC_LASTSEG (BIT(30))
#define TXDESC_FIRSTSEG (BIT(29))
#define TXDESC_ADDCRCDIS (BIT(28))
#define TXDESC_DISPADDING (BIT(27))
#define TXDESC_ENDOFRING (BIT(26))
#define TXDESC_SECONDCHAIN (BIT(25))
#define TXDESC_FORCEEOPERR (BIT(24))
#define TXDESC_OWN (BIT(31))
#define RXDESC_OWN (BIT(31))
#define RXDESC_ENDOFRING (BIT(26))
#define PHY_MODE_CTL (0)
#define PHY_MODE_CTL_RESET BIT(15)
#define PHY_MODE_CTL_1000MB BIT(6)
#define PHY_MODE_CTL_100MB BIT(13)
#define PHY_MODE_CTL_10MB (0)
#define PHY_MODE_CTL_LOOPBACK BIT(14)
#define PHY_MODE_CTL_AUTONEG BIT(12)
#define PHY_MODE_CTL_POWERDOWN BIT(11)
#define PHY_MODE_CTL_ISOLATE BIT(10)
#define PHY_MODE_CTL_RESTART_AUTONEG BIT(9)
#define PHY_MODE_CTL_FULL_DUPLEX BIT(8)
#define PHY_MODE_CTL_COL_TEST BIT(7)
#define PHY_ID1 (2)
#define PHY_ID2 (3)
#define PHY_SPEED_AUTO (0)
#define PHY_SPEED_1000MB (1)
#define PHY_SPEED_100MB (2)
#define PHY_SPEED_10MB (3)
enum AraMacRegVals {
/* DMA config register */
DmaSoftReset = 1,
Dma1WordBurst = (0x01 << 1),
Dma4WordBurst = (0x04 << 1),
Dma16WordBurst = (0x10 << 1),
DmaRoundRobin = (1 << 15),
DmaWait4Done = (1 << 16),
DmaStrictBurst = (1 << 17),
Dma64BitMode = (1 << 18),
/* DMA control register */
DmaStartTx = (1 << 0),
DmaStartRx = (1 << 1),
/* DMA status/interrupt & interrupt mask registers */
DmaTxDone = (1 << 0),
DmaNoTxDesc = (1 << 1),
DmaTxStopped = (1 << 2),
DmaRxDone = (1 << 4),
DmaNoRxDesc = (1 << 5),
DmaRxStopped = (1 << 6),
DmaRxMissedFrame = (1 << 7),
DmaMacInterrupt = (1 << 8),
DmaAllInts = DmaTxDone | DmaNoTxDesc | DmaTxStopped | DmaRxDone |
DmaNoRxDesc | DmaRxStopped | DmaRxMissedFrame | DmaMacInterrupt,
DmaTxStateMask = (7 << 16),
DmaTxStateStopped = (0 << 16),
DmaTxStateFetchDesc = (1 << 16),
DmaTxStateFetchData = (2 << 16),
DmaTxStateWaitEOT = (3 << 16),
DmaTxStateCloseDesc = (4 << 16),
DmaTxStateSuspended = (5 << 16),
DmaRxStateMask = (15 << 21),
DmaRxStateStopped = (0 << 21),
DmaRxStateFetchDesc = (1 << 21),
DmaRxStateWaitEOR = (2 << 21),
DmaRxStateWaitFrame = (3 << 21),
DmaRxStateSuspended = (4 << 21),
DmaRxStateCloseDesc = (5 << 21),
DmaRxStateFlushBuf = (6 << 21),
DmaRxStatePutBuf = (7 << 21),
DmaRxStateWaitStatus = (8 << 21),
/* MAC global control register */
MacSpeed10M = (0 << 0),
MacSpeed100M = (1 << 0),
MacSpeed1G = (2 << 0),
MacSpeedMask = (3 << 0),
MacFullDuplex = (1 << 2),
MacResetRxStats = (1 << 3),
MacResetTxStats = (1 << 4),
/* MAC TX control */
MacTxEnable = (1 << 0),
MacTxInvertFCS = (1 << 1),
MacTxDisableFCSInsertion = (1 << 2),
MacTxAutoRetry = (1 << 3),
MacTxIFG96 = (0 << 4),
MacTxIFG64 = (1 << 4),
MacTxIFG128 = (2 << 4),
MacTxIFG256 = (3 << 4),
MacTxPreamble7 = (0 << 6),
MacTxPreamble3 = (2 << 6),
MacTxPreamble5 = (3 << 6),
/* MAC RX control */
MacRxEnable = (1 << 0),
MacRxDisableFcs = (1 << 1),
MacRxStripFCS = (1 << 2),
MacRxStoreAndForward = (1 << 3),
MacRxPassBadFrames = (1 << 5),
MacAccountVLANs = (1 << 6),
/* MAC address control */
MacAddr1Enable = (1 << 0),
MacAddr2Enable = (1 << 1),
MacAddr3Enable = (1 << 2),
MacAddr4Enable = (1 << 3),
MacInverseAddr1Enable = (1 << 4),
MacInverseAddr2Enable = (1 << 5),
MacInverseAddr3Enable = (1 << 6),
MacInverseAddr4Enable = (1 << 7),
MacPromiscuous = (1 << 8),
/* MAC flow control */
MacFlowDecodeEnable = (1 << 0),
MacFlowGenerationEnable = (1 << 1),
MacAutoFlowGenerationEnable = (1 << 2),
MacFlowMulticastMode = (1 << 3),
MacBlockPauseFrames = (1 << 4),
/* MDIO control register values */
MacMdioCtrlPhyMask = 0x1f,
MacMdioCtrlPhyShift = 0,
MacMdioCtrlRegMask = 0x1f,
MacMdioCtrlRegShift = 5,
MacMdioCtrlRead = (1 << 10),
MacMdioCtrlWrite = 0,
MacMdioCtrlStart = (1 << 15),
/* MDIO data register values */
MacMdioDataMask = 0xffff,
/* MAC interrupt & interrupt mask values */
MacUnderrun = (1 << 0),
MacJabber = (1 << 0),
/* RX statistics counter control */
RxStatReadBusy = (1 << 15),
/* TX statistics counter control */
TxStatReadBusy = (1 << 15),
/* Flow Control */
MacFCHighThreshold = (((QDPC_DESC_RING_SIZE - (QDPC_DESC_RING_SIZE >> 3)) & 0xffff) << 16),
MacFCLowThreshold = (8 << 0),
};
#define QDPC_DMA_DS_ERROR (DmaRxMissedFrame | DmaNoRxDesc)
#define QDPC_DMA_DS_INTR (DmaRxDone | DmaMacInterrupt | QDPC_DMA_DS_ERROR)
#define QDPC_MAC_DS_INTR (MacUnderrun | MacJabber)
#define QDPC_DMA_US_MISC (DmaNoRxDesc | DmaRxStopped | DmaTxStopped | DmaRxMissedFrame )
#define QDPC_DMA_US_INTR (DmaRxDone | DmaMacInterrupt | QDPC_DMA_US_MISC)
#define QDPC_MAC_US_INTR (MacUnderrun | MacJabber)
#define QDPC_DMA_TX_IMC_EN BIT(31)
#define QDPC_DMA_TX_IMC_NFRAMES(x) ((x)&0xF)
#define QDPC_DMA_TX_IMC_TICKS(ahb_ticks) (((ahb_ticks) & 0xFFFFF) << 8)
#define QDPC_DMA_TX_IMC_NTICKS 0x07FFF
#define QDPC_DMA_TX_IMC_DELAY (QDPC_DMA_TX_IMC_TICKS(QDPC_DMA_TX_IMC_NTICKS))
#define QDPC_DMA_TX_STATUS(x) (((x) >> 16) & 0x7)
#define QDPC_DMA_RX_STATUS(x) (((x) >> 20) & 0xf)
#define QDPC_DMA_INT_MITIGATION_NUM (8)
#define QDPC_DMA_AUTO_POLLING_CNT (0x1FFF)
int qdpc_emac_init(struct qdpc_priv *priv);
int qdpc_emac_dma(void *dst, void *src, u32 len);
void qdpc_flush_and_inv_dcache_range(unsigned long start, unsigned long end);
void qdpc_emac_ack(void);
void qdpc_emac_enable(struct qdpc_priv *priv);
void qdpc_emac_disable(struct qdpc_priv *priv);
void qdpc_emac_startdma(qdpc_pdring_t *pktqpktq);
#endif /* __QDPC_EMAC_H__ */

View File

@ -0,0 +1,237 @@
/**
* Copyright (c) 2011-2013 Quantenna Communications, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
**/
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/version.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/kthread.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <linux/netlink.h>
#include <asm/board/board_config.h>
#include <common/topaz_platform.h>
#include "qdpc_config.h"
#include "qdpc_debug.h"
#include "qdpc_init.h"
MODULE_AUTHOR("Quantenna");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Virtual ethernet driver for Quantenna Ruby device");
MODULE_VERSION(QDPC_MODULE_VERSION);
char *qdpc_mac0addr = NULL;
module_param(qdpc_mac0addr, charp, 0);
uint8_t qdpc_basemac[ETH_ALEN] = {'\0', 'R', 'U', 'B', 'Y', '%'};
static int qdpc_parse_mac(const char *mac_str, uint8_t *mac)
{
unsigned int tmparray[ETH_ALEN];
if (mac_str == NULL)
return -1;
if (sscanf(mac_str, "%02x:%02x:%02x:%02x:%02x:%02x",
&tmparray[0],
&tmparray[1],
&tmparray[2],
&tmparray[3],
&tmparray[4],
&tmparray[5]) != ETH_ALEN) {
return -1;
}
mac[0] = tmparray[0];
mac[1] = tmparray[1];
mac[2] = tmparray[2];
mac[3] = tmparray[3];
mac[4] = tmparray[4];
mac[5] = tmparray[5];
return 0;
}
static void qdpc_nl_recv_msg(struct sk_buff *skb);
int qdpc_init_netdev(struct net_device **net_dev);
static void qdpc_get_config(struct qdpc_priv *priv)
{
int cfg_var = 0;
qdpc_config_t *ep_config = &priv->ep_config;
memset (ep_config, 0, sizeof(qdpc_config_t));
ep_config->cf_usdma = 1;
ep_config->cf_dsdma = 0;
ep_config->cf_msi = 1;
ep_config->cf_64bit = 0;
ep_config->cf_usdma_ndesc = QDPC_DESC_RING_SIZE;
ep_config->cf_dsdma_ndesc = QDPC_DESC_DSRING_SIZE;
get_board_config(BOARD_CFG_PCIE, &cfg_var);
if ((cfg_var & PCIE_USE_PHY_LOOPBK) == PCIE_USE_PHY_LOOPBK)
{
ep_config->cf_miipllclk = 0x8f8f8f8f;
} else {
#ifdef CVM_HOST
ep_config->cf_miipllclk = 0x8f808f8f;
#else
ep_config->cf_miipllclk = 0x8f808f80;
#endif
}
}
/* Global net device pointer */
struct net_device *qdpc_ndev = NULL;
int __init qdpc_init_module(void)
{
struct qdpc_priv *priv;
struct net_device *ndev = NULL;
int ret = SUCCESS;
if (qdpc_mac0addr != NULL) {
qdpc_parse_mac(qdpc_mac0addr, qdpc_basemac);
}
/* Creating net device */
ret = qdpc_init_netdev(&ndev);
if (ret) {
return ret;
}
qdpc_ndev = ndev;
priv = netdev_priv(ndev);
/* ep2h isr lock */
spin_lock_init(&priv->lock);
/* Get board configuration */
qdpc_get_config(priv);
/* Initialize INIT WORK */
INIT_WORK(&priv->init_work, qdpc_init_work);
/* Create netlink */
priv->netlink_socket = netlink_kernel_create(&init_net, QDPC_NETLINK_RPC_PCI, 0, qdpc_nl_recv_msg,
NULL, THIS_MODULE);
if (!priv->netlink_socket) {
PRINT_ERROR(KERN_ALERT "Error creating netlink socket.\n");
ret = FAILURE;
goto error;
}
/* Initialize Interrupts */
ret = qdpc_pcie_init_intr(ndev);
if (ret < 0) {
PRINT_ERROR("Interrupt Initialization failed \n");
goto out;
}
schedule_work(&priv->init_work);
return ret;
out:
/* Release netlink */
netlink_kernel_release(priv->netlink_socket);
error:
/* Free netdevice */
unregister_netdev(ndev);
free_netdev(ndev);
return ret;
}
static void __exit qdpc_exit_module(void)
{
struct qdpc_priv *priv = NULL;
if (qdpc_ndev) {
priv = netdev_priv(qdpc_ndev);
/* release netlink socket */
netlink_kernel_release(priv->netlink_socket);
/* release scheduled work */
cancel_work_sync(&priv->init_work);
/* Free intettupt line */
qdpc_free_interrupt(qdpc_ndev);
/* Free net device */
unregister_netdev(qdpc_ndev);
free_netdev(qdpc_ndev);
qdpc_ndev = NULL;
}
return;
}
static void qdpc_nl_recv_msg(struct sk_buff *skb)
{
struct qdpc_priv *priv = netdev_priv(qdpc_ndev);
struct nlmsghdr *nlh = (struct nlmsghdr*)skb->data;
struct sk_buff *skb2;
/* Parsing the netlink message */
PRINT_DBG(KERN_INFO "%s line %d Netlink received pid:%d, size:%d, type:%d\n",
__FUNCTION__, __LINE__, nlh->nlmsg_pid, nlh->nlmsg_len, nlh->nlmsg_type);
switch (nlh->nlmsg_type) {
case QDPC_NETLINK_TYPE_SVC_REGISTER:
priv->netlink_pid = nlh->nlmsg_pid;/*pid of sending process */
return;
case QDPC_NETLINK_TYPE_SVC_RESPONSE:
break;
default:
PRINT_DBG(KERN_INFO "%s line %d Netlink Invalid type %d\n",
__FUNCTION__, __LINE__, nlh->nlmsg_type);
return;
}
/*
* make a new skb. The old skb will freed in netlink_unicast_kernel,
* but we must hold the skb before DMA transfer done
*/
skb2 = alloc_skb(nlh->nlmsg_len+sizeof(qdpc_cmd_hdr_t), GFP_ATOMIC);
if (skb2) {
qdpc_cmd_hdr_t *cmd_hdr;
cmd_hdr = (qdpc_cmd_hdr_t *)skb2->data;
memcpy(cmd_hdr->dst_magic, QDPC_NETLINK_DST_MAGIC, ETH_ALEN);
memcpy(cmd_hdr->src_magic, QDPC_NETLINK_SRC_MAGIC, ETH_ALEN);
cmd_hdr->type = htons(QDPC_APP_NETLINK_TYPE);
cmd_hdr->len = htons(nlh->nlmsg_len);
memcpy(skb2->data+sizeof(qdpc_cmd_hdr_t), skb->data+sizeof(struct nlmsghdr), nlh->nlmsg_len);
skb_put(skb2, nlh->nlmsg_len+sizeof(qdpc_cmd_hdr_t));
qdpc_send_packet(skb2, qdpc_ndev);
}
}
module_init(qdpc_init_module);
module_exit(qdpc_exit_module);

View File

@ -0,0 +1,123 @@
/**
* Copyright (c) 2011-2013 Quantenna Communications, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
**/
#ifndef __QDPC_INIT_H_
#define __QDPC_INIT_H_
#include <linux/workqueue.h>
#define QDPC_MODULE_NAME "qdpc_ruby"
#define QDPC_DEV_NAME "qdpc_ruby"
#define QDPC_MODULE_VERSION "1.0"
#define SRAM_TEXT __sram_text
#define SRAM_DATA __sram_data
/*
* Netlink Message types.
*/
#define QDPC_NETLINK_RPC_PCI 31
#define QDPC_NETLINK_TYPE_SVC_REGISTER 10
#define QDPC_NETLINK_TYPE_SVC_RESPONSE 11
#define qdpc_phys_addr(x) ((u32)(x) & 0x0fffffff)
extern struct net_device *qdpc_ndev;
extern uint8_t qdpc_basemac[ETH_ALEN];
typedef struct qdpc_config {
uint32_t cf_usdma:1,
cf_dsdma:1,
cf_msi:1,
cf_64bit:1;
uint32_t cf_usdma_ndesc;
uint32_t cf_dsdma_ndesc;
uint32_t cf_miipllclk;
} qdpc_config_t;
/* This stores an anchor to packet buffers. */
struct qdpc_priv {
struct net_device *ndev; /* points net device */
struct net_device_stats stats; /* Network statistics */
struct tasklet_struct rx_tasklet; /* Tasklet scheduled in interrupt handler */
struct tasklet_struct txd_tasklet; /* Tasklet scheduled in interrupt handler */
struct tasklet_struct dmastatus_tasklet;
struct timer_list txq_enable_timer; /* timer for enable tx */
uint32_t dsisr_status;
uint32_t usisr_status;
uint32_t dsdma_status;
uint32_t usdma_status;
uint32_t dsdma_desc;
uint32_t usdma_desc;
uint16_t host_msi_data; /* MSI data */
struct work_struct init_work; /* INIT handshake work */
void (*ep2host_irq)(struct qdpc_priv*);
qdpc_pktring_t pktq;
uint32_t mii_pllclk;
uint32_t *ep2host_irqstatus;
uint32_t *host2ep_irqstatus;
int32_t *host_ep2h_txd_budget;
int32_t *host_h2ep_txd_budget;
qdpc_epshmem_hdr_t *shmem;
dma_addr_t shmem_busaddr;
struct sock *netlink_socket;
uint32_t netlink_pid;
qdpc_config_t ep_config;
spinlock_t lock; /* Private structure lock */
qdpc_pcie_bda_t *bda;
uint32_t msiaddr;
};
#define le32_readl(x) le32_to_cpu(readl((x)))
#define le32_writel(x, addr) writel(cpu_to_le32((x)), addr)
static inline void qdpc_pcie_posted_write(uint32_t val, void *basereg)
{
writel(val, basereg);
/* flush posted write */
readl(basereg);
}
static inline int qdpc_isbootstate(struct qdpc_priv *p, uint32_t state) {
__iomem uint32_t *status = &p->bda->bda_bootstate;
uint32_t s = le32_readl(status);
return ( s == state);
}
/* Function prototypes */
void qdpc_veth_rx(struct net_device *ndev);
void qdpc_pcie_irqsetup(struct net_device *ndev);
void qdpc_disable_irqs(struct qdpc_priv *priv);
void qdpc_enable_irqs(struct qdpc_priv *priv);
void qdpc_init_work(struct work_struct *task);
void qdpc_free_interrupt(struct net_device *ndev);
int qdpc_pcie_init_intr(struct net_device *ndev);
void qdpc_disable_irqs(struct qdpc_priv *priv);
void qdpc_enable_irqs(struct qdpc_priv *priv);
int qdpc_pcie_init_mem(struct net_device *ndev);
struct sk_buff *qdpc_get_skb(size_t len);
int qdpc_init_rxq(struct qdpc_priv *priv, size_t ringsize);
int qdpc_init_txq(struct qdpc_priv *priv, size_t ringsize);
void qdpc_veth_txdone(struct net_device *ndev);
void qdpc_netlink_rx(struct net_device *ndev, void *buf, size_t len);
void qdpc_intr_ep2host(struct qdpc_priv *priv, uint32_t intr);
int32_t qdpc_send_packet(struct sk_buff *skb, struct net_device *ndev);
#endif /*__QDPC_INIT_H_ */

View File

@ -0,0 +1,668 @@
/**
* Copyright (c) 2011-2013 Quantenna Communications, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
**/
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/version.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/inetdevice.h>
#include <linux/ip.h>
#include <net/sock.h>
#include <net/netlink.h>
#include <qtn/skb_recycle.h>
#include <qtn/qtn_global.h>
#include "qdpc_config.h"
#include "qdpc_debug.h"
#include "qdpc_init.h"
#include "qdpc_emac.h"
#ifdef CONFIG_QVSP
#include <trace/ippkt.h>
#include "qtn/qdrv_sch.h"
#include "qtn/qvsp.h"
static struct qvsp_wrapper qdpc_qvsp = {NULL, NULL};
#endif
#define MAX(X,Y) ((X) > (Y) ? X : Y)
/* Net device function prototypes */
int32_t qdpc_veth_open(struct net_device *ndev);
int32_t qdpc_veth_release(struct net_device *ndev);
struct net_device_stats *qdpc_veth_stats(struct net_device *ndev);
int32_t qdpc_veth_change_mtu(struct net_device *ndev, int new_mtu);
int32_t qdpc_veth_set_mac_addr(struct net_device *ndev, void *paddr);
int32_t qdpc_veth_tx(struct sk_buff *skb, struct net_device *ndev);
/* Alignment helper functions */
inline static unsigned long qdpc_align_val_up(unsigned long val, unsigned long step)
{
return ((val + step - 1) & (~(step - 1)));
}
inline static unsigned long qdpc_align_val_down(unsigned long val, unsigned long step)
{
return (val & (~(step - 1)));
}
inline static void* qdpc_align_buf_dma(void *addr)
{
return (void*)qdpc_align_val_up((unsigned long)addr, dma_get_cache_alignment());
}
inline static unsigned long qdpc_align_buf_dma_offset(void *addr)
{
return (qdpc_align_buf_dma(addr) - addr);
}
inline static void* qdpc_align_buf_cache(void *addr)
{
return (void*)qdpc_align_val_down((unsigned long)addr, dma_get_cache_alignment());
}
inline static unsigned long qdpc_align_buf_cache_offset(void *addr)
{
return (addr - qdpc_align_buf_cache(addr));
}
inline static unsigned long qdpc_align_buf_cache_size(void *addr, unsigned long size)
{
return qdpc_align_val_up(size + qdpc_align_buf_cache_offset(addr),
dma_get_cache_alignment());
}
/* Net device operations structure */
static struct net_device_ops veth_ops = {
.ndo_open = qdpc_veth_open,
.ndo_stop = qdpc_veth_release,
.ndo_get_stats = qdpc_veth_stats,
.ndo_set_mac_address = qdpc_veth_set_mac_addr,
.ndo_change_mtu = qdpc_veth_change_mtu,
.ndo_start_xmit = qdpc_veth_tx,
};
static inline bool check_netlink_magic(qdpc_cmd_hdr_t *cmd_hdr)
{
return ((memcmp(cmd_hdr->dst_magic, QDPC_NETLINK_DST_MAGIC, ETH_ALEN) == 0)
&& (memcmp(cmd_hdr->src_magic, QDPC_NETLINK_SRC_MAGIC, ETH_ALEN) == 0));
}
int32_t qdpc_veth_open(struct net_device *ndev)
{
struct qdpc_priv *priv = netdev_priv(ndev);
if (!qdpc_isbootstate(priv, QDPC_BDA_FW_RUNNING)) {
netif_stop_queue(ndev); /* can't transmit any more */
return SUCCESS;
}
flush_scheduled_work();
netif_start_queue(ndev);
return SUCCESS;
}
int32_t qdpc_veth_release(struct net_device *ndev)
{
struct qdpc_priv *priv = netdev_priv(ndev);
netif_stop_queue(ndev); /* can't transmit any more */
qdpc_emac_disable(priv);
return SUCCESS;
}
#ifdef CONFIG_QVSP
void qvsp_wrapper_init(struct qvsp_ext_s *qvsp, QVSP_CHECK_FUNC_PROTOTYPE(*check_func))
{
qdpc_qvsp.qvsp = qvsp;
qdpc_qvsp.qvsp_check_func = check_func;
}
EXPORT_SYMBOL(qvsp_wrapper_init);
void qvsp_wrapper_exit(void)
{
qdpc_qvsp.qvsp_check_func = NULL;
qdpc_qvsp.qvsp = NULL;
}
EXPORT_SYMBOL(qvsp_wrapper_exit);
static __sram_text int qdpc_rx_vsp_should_drop(struct sk_buff *skb, struct ethhdr *eh)
{
u8 *data_start;
u16 ether_type = 0;
if (qvsp_is_active(qdpc_qvsp.qvsp) && qdpc_qvsp.qvsp_check_func) {
data_start = qdrv_sch_find_data_start(skb, (struct ether_header *)eh, &ether_type);
qdrv_sch_classify(skb, ether_type, data_start);
if (qdpc_qvsp.qvsp_check_func(qdpc_qvsp.qvsp, QVSP_IF_PCIE_RX, skb,
data_start, skb->len - (data_start - skb->data),
skb->priority)) {
trace_ippkt_dropped(TRACE_IPPKT_DROP_RSN_VSP, 1, 0);
return 1;
}
}
return 0;
}
#else
#define qdpc_rx_vsp_should_drop(_skb, _data_start) (0)
#endif /* CONFIG_QVSP */
static inline SRAM_TEXT void qdpc_tx_skb_recycle(struct sk_buff *skb)
{
#if defined(QDPC_USE_SKB_RECYCLE)
struct qtn_skb_recycle_list *recycle_list = qtn_get_shared_recycle_list();
if (!qtn_skb_recycle_list_push(recycle_list,
&recycle_list->stats_pcie, skb)) {
dev_kfree_skb_any(skb);
}
#else
dev_kfree_skb_any(skb);
#endif
}
/* function runs in tasklet/softirq priority */
void qdpc_netlink_rx(struct net_device *ndev, void *buf, size_t len)
{
struct qdpc_priv *priv = netdev_priv(ndev);
struct sk_buff *skb = nlmsg_new(len, 0);
struct nlmsghdr *nlh;
int ret;
if (skb == NULL) {
DBGPRINTF(DBG_LL_WARNING, QDRV_LF_WARN, "WARNING: out of netlink SKBs\n");
return ;
}
nlh = nlmsg_put(skb, 0, 0, NLMSG_DONE, len, 0); ;
memcpy(nlmsg_data(nlh), buf, len);
NETLINK_CB(skb).dst_group = 0;
/* Send the netlink message to user application */
ret = nlmsg_unicast(priv->netlink_socket, skb, priv->netlink_pid);
if (ret < 0) {
DBGPRINTF(DBG_LL_NOTICE, QDRV_LF_TRACE,
"pid %d socket 0x%p ret %d\n",
priv->netlink_pid, priv->netlink_socket, ret);
}
}
static SRAM_TEXT size_t qdpc_rx_process_frame(struct net_device *ndev,
qdpc_desc_t *rx_desc, uint32_t dma_status, bool lastdesc)
{
qdpc_dmadesc_t *rx_hwdesc = rx_desc->dd_hwdesc;
const uint32_t buffer_size = QDPC_DMA_MAXBUF;
uint32_t dma_control = 0;
uint32_t dma_data = 0;
struct sk_buff *skb;
struct ethhdr *eth;
qdpc_cmd_hdr_t *cmd_hdr;
size_t dmalen;
skb = (struct sk_buff *)rx_desc->dd_metadata;
dmalen = QDPC_DMA_RXLEN(dma_status);
/* Check for runt packets */
if (skb && (dmalen >= QDPC_DMA_MINBUF) && (dmalen <= QDPC_DMA_MAXBUF)
&& (QDPC_DMA_SINGLE_BUFFER(dma_status))){
dma_unmap_single((struct device *)ndev, rx_hwdesc->dma_data,
buffer_size, DMA_FROM_DEVICE);
eth = (struct ethhdr *)skb->data;
switch (ntohs(eth->h_proto)) {
case QDPC_APP_NETLINK_TYPE:
/* Double Check if it's netlink packet*/
cmd_hdr = (qdpc_cmd_hdr_t *)skb->data;
if (check_netlink_magic(cmd_hdr)) {
qdpc_netlink_rx(ndev, skb->data + sizeof(qdpc_cmd_hdr_t), ntohs(cmd_hdr->len));
}
break;
default:
skb->dev = ndev;
skb->len = 0;
skb_reset_tail_pointer(skb);
skb_put(skb, dmalen);
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb->protocol = eth_type_trans(skb, ndev);
if (!is_multicast_ether_addr((uint8_t *)eth) &&
qdpc_rx_vsp_should_drop(skb, eth)) {
dev_kfree_skb_any(skb);
} else {
netif_rx(skb);
}
skb = NULL;
break;
}
} else {
DBGPRINTF(DBG_LL_WARNING, QDRV_LF_WARN,
"RX: Drop packet. Skb: 0x%p Status:0x%x Len:%u\n", skb, dma_status, dmalen);
}
if (skb)
dev_kfree_skb_any(skb);
skb = qdpc_get_skb(buffer_size);
if (skb == NULL) {
dma_data = 0;
dma_control = (lastdesc ? QDPC_DMA_LAST_DESC : 0);
} else {
/*
* skb->data has already cache line alignment when returned from qdpc_get_skb,
* no need to do cache line alignment again.
*/
dma_data = dma_map_single((struct device *)ndev, (void*)skb->data,
buffer_size, DMA_FROM_DEVICE);
dma_control = (buffer_size & QDPC_DMA_LEN_MASK) | (lastdesc ? QDPC_DMA_LAST_DESC : 0);
}
dma_status = (QDPC_DMA_OWN);
rx_desc->dd_metadata = (void *)skb;
arc_write_uncached_32(&rx_hwdesc->dma_control, dma_control);
arc_write_uncached_32(&rx_hwdesc->dma_data, dma_data);
arc_write_uncached_32(&rx_hwdesc->dma_status, dma_status);
return dmalen;
}
static SRAM_TEXT void qdpc_indicate_peer_rx_nfree(struct qdpc_priv *priv,
qdpc_pdring_t *rxq, qdpc_desc_t *rx_desc, uint32_t dma_status)
{
uint32_t nfree = rxq->pd_ringsize;
qdpc_desc_t *rx_last_desc;
unsigned long flags;
local_irq_save(flags);
if (unlikely(QDPC_DMA_OWNED(dma_status) == 0)) {
rx_last_desc = rx_desc;
for (rx_desc = rxq->pd_nextdesc; (rx_desc != rx_last_desc) && (nfree > 0);) {
dma_status = arc_read_uncached_32(&rx_desc->dd_hwdesc->dma_status);
if (QDPC_DMA_OWNED(dma_status) == 0) {
nfree--;
} else {
break;
}
if (rx_desc == rxq->pd_lastdesc) {
rx_desc = rxq->pd_firstdesc;
} else {
rx_desc++;
}
}
if (nfree <= QDPC_VETH_RX_LOW_WATERMARK)
nfree = 0;
}
arc_write_uncached_32(priv->host_ep2h_txd_budget, nfree);
local_irq_restore(flags);
}
SRAM_TEXT void qdpc_veth_rx(struct net_device *ndev)
{
struct qdpc_priv *priv = netdev_priv(ndev);
qdpc_pdring_t *rxq = &priv->pktq.pkt_dsq;
uint32_t budget = QDPC_DESC_RING_SIZE << 1;
uint32_t dma_status = 0;
qdpc_desc_t *rx_desc;
qdpc_dmadesc_t *rx_hwdesc;
bool lastdesc;
uint32_t pktlen;
if (rxq->pd_nextdesc == NULL)
return;
rx_desc = rxq->pd_nextdesc;
rx_hwdesc = rx_desc->dd_hwdesc;
dma_status = arc_read_uncached_32(&rx_hwdesc->dma_status);
while (budget-- > 0 && (QDPC_DMA_OWNED(dma_status) == 0)) {
rx_desc = rxq->pd_nextdesc;
lastdesc = (rxq->pd_nextdesc == rxq->pd_lastdesc);
rx_hwdesc = rx_desc->dd_hwdesc;
pktlen = qdpc_rx_process_frame(ndev, rx_desc, dma_status, lastdesc);
/* Check for end of ring, and loop around */
if (lastdesc) {
rxq->pd_nextdesc = rxq->pd_firstdesc;
} else {
rxq->pd_nextdesc++;
}
/* Update the statistics */
priv->stats.rx_packets++;
priv->stats.rx_bytes += pktlen;
dma_status = arc_read_uncached_32(&rxq->pd_nextdesc->dd_hwdesc->dma_status);
}
qdpc_indicate_peer_rx_nfree(priv, rxq, rx_desc, dma_status);
qdpc_intr_ep2host(priv, QDPC_EP_TXDONE);
}
SRAM_TEXT uint32_t qdpc_dma_state(qdpc_pdring_t *q)
{
return readl(q->pd_rx_basereg + EMAC_DMASTATUS);
}
static inline SRAM_TEXT void qdpc_start_txdma(struct qdpc_priv *priv)
{
qdpc_pdring_t *txq = &priv->pktq.pkt_usq;
/* Must hold TXQ lock before running */
//ASSERT(spin_is_locked(&txq->pd_lock));
if (txq->pd_npending) {
qdpc_emac_startdma(txq);
}
}
static SRAM_TEXT uint32_t qdpc_veth_txprocessq(struct net_device *ndev, uint32_t maxpkts)
{
struct qdpc_priv *priv = netdev_priv(ndev);
qdpc_pdring_t *txq = &priv->pktq.pkt_usq;
qdpc_desc_t *tx_desc;
uint32_t nprocessed = 0;
uint32_t dma_data = 0;
uint32_t dma_len = 0;
struct sk_buff * skb;
/* Must hold TXQ lock before running */
//ASSERT(spin_is_locked(&txq->pd_lock));
for(nprocessed = 0; nprocessed < maxpkts; nprocessed++) {
tx_desc = STAILQ_FIRST(&txq->pd_pending);
if (!tx_desc || !((arc_read_uncached_32(&tx_desc->dd_hwdesc->dma_status) & QDPC_DMA_OWN) == 0)) {
break;
}
STAILQ_REMOVE_HEAD(&txq->pd_pending, dd_entry);
skb = (struct sk_buff *)tx_desc->dd_metadata;
dma_data = tx_desc->dd_hwdesc->dma_data;
if (skb->len <= QDPC_DMA_MINBUF) {
dma_len = QDPC_DMA_MINBUF;
} else {
dma_len = skb->len & QDPC_DMA_LEN_MASK;
}
dma_unmap_single((struct device *)ndev, qdpc_align_buf_cache(dma_data),
qdpc_align_buf_cache_size(dma_data, dma_len), DMA_TO_DEVICE);
qdpc_tx_skb_recycle(skb);
tx_desc->dd_metadata = NULL;
tx_desc->dd_qtime = 0;
}
txq->pd_nfree += nprocessed;
txq->pd_npending -= nprocessed;
return nprocessed;
}
inline static SRAM_TEXT void qdpc_send_rxdone(struct qdpc_priv *priv)
{
qdpc_intr_ep2host(priv, QDPC_EP_RXDONE);
}
SRAM_DATA static int peer_rx_nfree = QDPC_DESC_RING_SIZE;
static SRAM_TEXT int32_t qdpc_update_peer_nfree(struct qdpc_priv *priv)
{
int32_t budget;
budget = arc_read_uncached_32(priv->host_h2ep_txd_budget);
if (budget < 0) {
budget = peer_rx_nfree;
} else {
peer_rx_nfree = budget;
arc_write_uncached_32(priv->host_h2ep_txd_budget, -1);
}
return budget;
}
/* TX completion routine, Runs as tasklet/softirq priority */
SRAM_TEXT void qdpc_veth_txdone(struct net_device *ndev)
{
struct qdpc_priv *priv = netdev_priv(ndev);
qdpc_pdring_t *txq = &priv->pktq.pkt_usq;
int32_t npending = (int32_t)txq->pd_npending;
spin_lock_bh(&txq->pd_lock);
qdpc_veth_txprocessq(ndev, npending >> 1);
if (netif_queue_stopped(ndev)) {
del_timer(&priv->txq_enable_timer);
qdpc_send_rxdone(priv);
netif_wake_queue(ndev);
}
spin_unlock_bh(&txq->pd_lock);
}
static SRAM_TEXT int qdpc_send_desc_check(struct qdpc_priv *priv,
qdpc_pdring_t *txq)
{
int32_t budget;
int32_t ret = 1;
unsigned long flags;
local_irq_save(flags);
if (txq->pd_nfree > QDPC_VETH_TX_LOW_WATERMARK) {
budget = qdpc_update_peer_nfree(priv);
if (budget > (txq->pd_npending + QDPC_VETH_RX_LOW_WATERMARK)) {
peer_rx_nfree--;
ret = 0;
}
} else {
qdpc_start_txdma(priv);
}
local_irq_restore(flags);
return ret;
}
SRAM_TEXT int32_t qdpc_send_packet(struct sk_buff *skb, struct net_device *ndev)
{
struct qdpc_priv *priv = netdev_priv(ndev);
qdpc_pdring_t *txq = &priv->pktq.pkt_usq;
qdpc_dmadesc_t *tx_hwdesc;
qdpc_desc_t *tx_desc;
struct sk_buff *skb_cpy = NULL;
bool lastdesc;
uint32_t dma_control = 0 ;
uint32_t dma_status = 0;
uint32_t dma_data = 0;
uint32_t dma_len = 0;
if (unlikely((skb->len == 0) || (skb->len > QDPC_MAX_MTU))) {
DBGPRINTF(DBG_LL_WARNING, QDRV_LF_WARN,
"Xmit packet invalid: len %d\n", skb->len);
dev_kfree_skb_any(skb);
priv->stats.tx_dropped++;
return NETDEV_TX_OK;
}
spin_lock_bh(&txq->pd_lock);
if (qdpc_send_desc_check(priv, txq)) {
netif_stop_queue(ndev);
mod_timer(&priv->txq_enable_timer, jiffies + msecs_to_jiffies(QDPC_STOP_QUEUE_TIMER_DELAY));
spin_unlock_bh(&txq->pd_lock);
priv->stats.tx_errors++;
return NETDEV_TX_BUSY;
}
/* Map data buffer. */
if (skb->len <= QDPC_DMA_MINBUF) {
dma_len = QDPC_DMA_MINBUF;
} else {
dma_len = skb->len & QDPC_DMA_LEN_MASK;
}
if (unlikely(dma_len >= QDPC_DMA_MAXBUF)) {
spin_unlock_bh(&txq->pd_lock);
DBGPRINTF(DBG_LL_WARNING, QDRV_LF_WARN,
"Xmit packet too big: len %d dmalen %d\n", skb->len, dma_len);
dev_kfree_skb_any(skb);
priv->stats.tx_dropped++;
return NETDEV_TX_OK;
}
if (unlikely(skb_linearize(skb) != 0)) {
DBGPRINTF(DBG_LL_WARNING, QDRV_LF_WARN,
"WARNING:%u Linearize failed\n", __LINE__);
dev_kfree_skb_any(skb);
priv->stats.tx_dropped++;
return NETDEV_TX_OK;
}
tx_desc = txq->pd_nextdesc;
tx_hwdesc = tx_desc->dd_hwdesc;
lastdesc = (tx_desc == txq->pd_lastdesc);
/* Hold onto skb. Release when we get a txdone */
tx_desc->dd_metadata = (void *)skb;
if (lastdesc) {
txq->pd_nextdesc = txq->pd_firstdesc;
} else {
txq->pd_nextdesc++;
}
dma_data = dma_map_single((struct device *)ndev, qdpc_align_buf_cache(skb->data),
qdpc_align_buf_cache_size(skb->data, dma_len), DMA_TO_DEVICE) +
qdpc_align_buf_cache_offset(skb->data);
dma_control = (QDPC_DMA_SINGLE_TXBUFFER | QDPC_DMA_TX_NOCRC
|(lastdesc ? QDPC_DMA_LAST_DESC : 0) | (QDPC_DMA_LEN_MASK & dma_len));
dma_status = QDPC_DMA_OWN;
arc_write_uncached_32(&tx_hwdesc->dma_control, dma_control);
arc_write_uncached_32(&tx_hwdesc->dma_data, dma_data);
arc_write_uncached_32(&tx_hwdesc->dma_status, dma_status);
tx_desc->dd_qtime = jiffies;
STAILQ_INSERT_TAIL(&txq->pd_pending, tx_desc, dd_entry);
txq->pd_nfree--;
txq->pd_npending++;
/* DMA engine setup for auto poll so no doorbell push needed */
spin_unlock_bh(&txq->pd_lock);
priv->stats.tx_packets++;
priv->stats.tx_bytes += skb->len;
return NETDEV_TX_OK;
}
SRAM_TEXT int32_t qdpc_veth_tx(struct sk_buff *skb, struct net_device *ndev)
{
return qdpc_send_packet(skb, ndev);
}
struct net_device_stats *qdpc_veth_stats(struct net_device *ndev)
{
struct qdpc_priv *priv = netdev_priv(ndev);
return &priv->stats;
}
int32_t qdpc_veth_change_mtu(struct net_device *ndev,
int new_mtu)
{
struct qdpc_priv *priv = netdev_priv(ndev);
spinlock_t *lock = &priv->lock;
unsigned long flags;
/* check ranges */
if ((new_mtu < QDPC_MIN_MTU) || (new_mtu > QDPC_MAX_MTU))
return -EINVAL;
/*
* Do anything you need, and the accept the value
*/
spin_lock_irqsave(lock, flags);
ndev->mtu = new_mtu;
spin_unlock_irqrestore(lock, flags);
return SUCCESS; /* success */
}
int32_t qdpc_veth_set_mac_addr(struct net_device *ndev,
void *paddr)
{
struct sockaddr *addr = (struct sockaddr *)paddr;
if (netif_running(ndev))
return -EBUSY;
if (!is_valid_ether_addr((u8 *)addr->sa_data))
return -EINVAL;
memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
return SUCCESS;
}
int32_t qdpc_init_netdev(struct net_device **net_dev)
{
struct qdpc_priv *priv;
struct net_device *ndev;
unsigned char macaddr[ETH_ALEN];
int ret = 0;
/* Net device initialization */
/* Allocate the devices */
ndev = alloc_netdev(sizeof(struct qdpc_priv), "pcie%d",
ether_setup);
if (!ndev) {
PRINT_ERROR("Error in allocating the net device \n");
return -ENOMEM;
}
/* No interesting netdevice features in this card... */
priv = netdev_priv(ndev);
priv->ndev = ndev;
memcpy(macaddr, qdpc_basemac, ETH_ALEN);
macaddr[0] = (macaddr[0] & 0x1F) | (((macaddr[0] & 0xE0) + 0x20) & 0xE0) | 0x02;
memcpy(ndev->dev_addr, macaddr, ETH_ALEN);
*net_dev = ndev; /* Storing ndev in global dev */
/* Assign net device operations structure */
ndev->netdev_ops = &veth_ops;
/* Register net device */
ret = register_netdev(ndev);
if (ret) {
PRINT_ERROR("veth: error %i registering net device \"%s\"\n",
ret, ndev->name);
}
if (ret) {
free_netdev(ndev);
}
return ret;
}

View File

@ -0,0 +1,460 @@
/**
* Copyright (c) 2011-2013 Quantenna Communications, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
**/
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/version.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/kthread.h>
#include <common/topaz_platform.h>
#include <common/ruby_pcie_bda.h>
#include <qtn/mproc_sync.h>
#include <asm/hardware.h>
#include "qdpc_config.h"
#include "qdpc_debug.h"
#include "qdpc_init.h"
#include "qdpc_regs.h"
#include "qdpc_ruby.h"
#include "qdpc_emac.h"
/* Utility macros. Move ? */
#ifndef MAX
#define MAX(X,Y) ((X) > (Y) ? X : Y)
#endif
#ifndef MIN
#define MIN(X,Y) ((X) < (Y) ? X : Y)
#endif
#define REG_WRITE(x,y) (writel((y),(x)))
#define REG_READ(x) (readl(x))
static void qdpc_tx_runout_func(unsigned long data)
{
struct qdpc_priv *priv = (struct qdpc_priv*)data;
qdpc_pdring_t *txq = &priv->pktq.pkt_usq;
int32_t budget = arc_read_uncached_32(priv->host_h2ep_txd_budget);
tasklet_schedule(&priv->txd_tasklet);
DBGPRINTF(DBG_LL_WARNING, QDRV_LF_WARN, "Restarting tx queue\n");
DBGPRINTF(DBG_LL_WARNING, QDRV_LF_WARN, "budget %d, free %d, pending %d\n",
budget, txq->pd_nfree, txq->pd_npending);
}
static SRAM_TEXT inline void qdpc_intr_ep2host_unlocked(struct qdpc_priv *priv, uint32_t intr)
{
intr |= arc_read_uncached_32(priv->ep2host_irqstatus);
arc_write_uncached_32(priv->ep2host_irqstatus, intr);
priv->ep2host_irq(priv);
}
SRAM_TEXT void qdpc_intr_ep2host(struct qdpc_priv *priv, uint32_t intr)
{
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
qdpc_intr_ep2host_unlocked(priv, intr);
spin_unlock_irqrestore(&priv->lock, flags);
}
static SRAM_TEXT void qdpc_irq_msi(struct qdpc_priv *priv)
{
writel(priv->host_msi_data, priv->msiaddr);
}
static SRAM_TEXT void qdpc_irq_legacy(struct qdpc_priv *priv)
{
/* Enable legacy interrupt mechanism */
unsigned long pcie_cfg0 = readl(RUBY_SYS_CTL_PCIE_CFG0);
if (!(pcie_cfg0 & BIT(9))) {
pcie_cfg0 |= BIT(9);
writel(pcie_cfg0, RUBY_SYS_CTL_PCIE_CFG0);
}
}
void qdpc_pcie_irqsetup(struct net_device *ndev)
{
struct qdpc_priv *priv = netdev_priv(ndev);
if ((arc_read_uncached_32(&priv->bda->bda_flags))& PCIE_BDA_MSI){
priv->msiaddr = arc_read_uncached_32(&priv->bda->bda_msi_addr);
priv->ep2host_irq = qdpc_irq_msi;
PRINT_INFO("MSI Enabled@0x%x\n", priv->msiaddr);
} else {
priv->msiaddr = 0;
priv->ep2host_irq = qdpc_irq_legacy;
PRINT_INFO("MSI disabled using INTA\n");
}
priv->host_msi_data = readl(RUBY_PCIE_REG_BASE + MSI_CTL_OFFSET + 0xc) & 0xffff;
return ;
}
static SRAM_TEXT irqreturn_t qdpc_dsdma_isr(int irq, void *dev)
{
struct net_device *ndev = (struct net_device *)dev;
struct qdpc_priv *priv = netdev_priv(ndev);
uint32_t dsdma_rx_base = priv->pktq.pkt_dsq.pd_rx_basereg;
uint32_t dma_status = REG_READ(dsdma_rx_base + EMAC_DMASTATUS);
uint32_t isr_status = dma_status & DmaAllInts;
if (isr_status & DmaRxDone) {
tasklet_schedule(&priv->rx_tasklet);
}
/* Check for out of descriptor condition. Issue RX demand poll if desc avail */
if (isr_status & (DmaNoRxDesc | DmaRxMissedFrame)) {
priv->dsdma_desc = REG_READ(dsdma_rx_base + EMAC_DMARDP);
priv->dsdma_status = REG_READ(0x80000000 + priv->dsdma_desc );
if (QDPC_DMA_OWNED(priv->dsdma_status)) {
REG_WRITE(dsdma_rx_base + EMAC_DMARPD, 1);
}
}
/* Restart RX if stopped */
if ((isr_status & DmaRxStopped) && (QDPC_DMA_RX_STATUS(isr_status) == 0)) {
REG_WRITE(dsdma_rx_base + EMAC_DMACTRL, DmaStartRx);
}
if (isr_status & (QDPC_DMA_DS_ERROR)) {
if (isr_status & DmaMacInterrupt) {
uint32_t macintr_status = REG_READ(dsdma_rx_base + EMAC_MACINTR);
if (macintr_status & QDPC_MAC_DS_INTR) {
priv->dsisr_status |= (QDPC_MAC_DS_INTR << 9);
REG_WRITE(dsdma_rx_base + EMAC_MACINTR, QDPC_MAC_DS_INTR);
}
}
priv->dsisr_status = dma_status;
tasklet_schedule(&priv->dmastatus_tasklet);
printk(KERN_EMERG "%d:%s: out of rx descriptors\n", __LINE__, __FUNCTION__);
}
REG_WRITE(dsdma_rx_base + EMAC_DMASTATUS , isr_status);
return IRQ_HANDLED;
}
static SRAM_TEXT irqreturn_t qdpc_usdma_isr(int irq, void *dev)
{
struct net_device *ndev = (struct net_device *)dev;
struct qdpc_priv *priv = netdev_priv(ndev);
uint32_t usdma_rx_base = priv->pktq.pkt_usq.pd_rx_basereg;
uint32_t isr_status = (REG_READ(usdma_rx_base + EMAC_DMASTATUS)) & DmaAllInts;
if (isr_status & (DmaRxDone | DmaNoRxDesc | DmaRxMissedFrame)) {
qdpc_intr_ep2host_unlocked(priv, QDPC_EP_RXDONE);
/* Issue demand poll if RX is stuck because of no descriptors */
if (isr_status & (DmaNoRxDesc | DmaRxMissedFrame)) {
REG_WRITE(usdma_rx_base + EMAC_DMARPD, 1);
}
/* Schedule the tx done anyway to clear up tx descriptors and let flow control run */
tasklet_schedule(&priv->txd_tasklet);
}
/* Restart RX if stopped */
if ((isr_status & DmaRxStopped) && (QDPC_DMA_RX_STATUS(isr_status) == 0)) {
REG_WRITE(usdma_rx_base + EMAC_DMACTRL, DmaStartRx);
}
/* Clear DMA Rx Transfer Done IRQ */
if (isr_status & (QDPC_DMA_US_MISC)) {
if (isr_status & DmaMacInterrupt) {
uint32_t macintr_status = (REG_READ(usdma_rx_base + EMAC_MACINTR) & QDPC_MAC_US_INTR);
if (macintr_status) {
priv->usisr_status |= (macintr_status << 9);
REG_WRITE(usdma_rx_base + EMAC_MACINTR, macintr_status);
}
}
priv->usisr_status |= (isr_status & QDPC_DMA_US_INTR);
tasklet_schedule(&priv->dmastatus_tasklet);
printk(KERN_EMERG "%d:%s: out of rx descriptors\n", __LINE__, __FUNCTION__);
}
REG_WRITE(usdma_rx_base + EMAC_DMASTATUS, isr_status);
return IRQ_HANDLED;
}
static SRAM_TEXT irqreturn_t qdpc_dma_isr(int irq, void *dev)
{
qdpc_usdma_isr(irq, dev);
qdpc_dsdma_isr(irq, dev);
return IRQ_HANDLED;
}
static SRAM_TEXT irqreturn_t qdpc_host_isr(int irq, void *dev)
{
struct net_device *ndev = (struct net_device *)dev;
struct qdpc_priv *priv = netdev_priv(ndev);
qdpc_pdring_t *dsq = &priv->pktq.pkt_dsq;
uint32_t irqstatus = arc_read_uncached_32(priv->host2ep_irqstatus);
arc_write_uncached_32(priv->host2ep_irqstatus, 0);
qtn_mproc_sync_irq_ack(RUBY_SYS_CTL_D2L_INT, QDPC_H2EP_INTERRUPT_BIT);
if (irqstatus & QDPC_HOST_TXREADY) {
REG_WRITE(dsq->pd_rx_basereg + EMAC_DMARPD, 1);
udelay(1);
REG_WRITE(dsq->pd_tx_basereg + EMAC_DMATPD, 1);
}
if (irqstatus & QDPC_HOST_TXDONE) {
tasklet_schedule(&priv->txd_tasklet);
}
if (irqstatus & QDPC_HOST_START_RX) {
tasklet_schedule(&priv->rx_tasklet);
}
return IRQ_HANDLED;
}
static SRAM_TEXT void qdpc_dmastatus_tasklet(void *dev)
{
struct net_device *ndev = (struct net_device *)dev;
struct qdpc_priv *priv = netdev_priv(ndev);
static uint32_t count=0;
printk ("EDMA(%u) DS:0x%x/0x%x/0x%x US:0x%x\n",++count, priv->dsdma_desc, priv->dsdma_status,
priv->dsisr_status, priv->usisr_status);
priv->dsisr_status = 0;
priv->usisr_status = 0;
priv->dsdma_status = 0;
priv->usdma_status = 0;
priv->dsdma_desc = 0;
priv->usdma_desc = 0;
}
static SRAM_TEXT void qdpc_txd_tasklet(void *dev)
{
struct net_device *ndev = (struct net_device *)dev;
qdpc_veth_txdone(ndev);
return;
}
static SRAM_TEXT void qdpc_rx_tasklet(void *dev)
{
/* Data Rx function */
qdpc_veth_rx((struct net_device *)dev);
}
inline static __sram_text unsigned long qdpc_align_val_up(unsigned long val, unsigned long step)
{
return ((val + step - 1) & (~(step - 1)));
}
int qdpc_pcie_init_mem(struct net_device *ndev)
{
struct qdpc_priv *priv = netdev_priv(ndev);
const uint32_t align = MAX(dma_get_cache_alignment(), QDPC_DMA_ALIGN);
uint32_t offset;
uint32_t usdma_offset;
uint32_t dsdma_offset;
/* setup boot data area mapping */
priv->bda = (void *)(RUBY_PCIE_BDA_ADDR + 0x80000000);
/* ISR status area */
priv->host2ep_irqstatus = &priv->bda->bda_h2ep_irqstatus;
priv->ep2host_irqstatus = &priv->bda->bda_ep2h_irqstatus;
priv->host_ep2h_txd_budget = &priv->bda->bda_ep2h_txd_budget;
priv->host_h2ep_txd_budget = &priv->bda->bda_h2ep_txd_budget;
/* Setup descriptor shared memory area */
priv->shmem = (qdpc_epshmem_hdr_t *)&priv->bda[1];
priv->shmem_busaddr= (dma_addr_t)RUBY_PCIE_BDA_ADDR ;
printk("Shmem: VA:0x%p PA:0x%p\n", (void *)priv->shmem, (void *)priv->shmem_busaddr);
/* Initial offset past shared memory header aligned on a cache line size boundary */
offset = qdpc_align_val_up(sizeof(qdpc_pcie_bda_t) + sizeof(qdpc_epshmem_hdr_t), align);
/* Assign to upstream DMA host descriptors */
usdma_offset = offset;
/* Recalculate and align offset past upstream descriptors */
offset += (sizeof(qdpc_dmadesc_t) * priv->ep_config.cf_usdma_ndesc);
offset = qdpc_align_val_up(offset, align);
/* Assign to downstream descriptors */
dsdma_offset = offset;
/* Recalculate and align offset past downstream descriptors */
offset += (sizeof(qdpc_dmadesc_t) * priv->ep_config.cf_dsdma_ndesc);
offset = qdpc_align_val_up(offset, align);
arc_write_uncached_32(&priv->shmem->eps_dsdma_desc, dsdma_offset);
arc_write_uncached_32(&priv->shmem->eps_usdma_desc, usdma_offset);
arc_write_uncached_32(&priv->shmem->eps_dsdma_ndesc, priv->ep_config.cf_dsdma_ndesc);
arc_write_uncached_32(&priv->shmem->eps_usdma_ndesc,priv->ep_config.cf_usdma_ndesc);
arc_write_uncached_32(&priv->shmem->eps_size, offset);
arc_write_uncached_32(&priv->shmem->eps_dma_offset, arc_read_uncached_32(&priv->bda->bda_dma_offset));
arc_write_uncached_32(&priv->shmem->eps_ver, 1);
arc_write_uncached_32(&priv->shmem->eps_maxbuf, QDPC_DMA_MAXBUF);
arc_write_uncached_32(&priv->shmem->eps_minbuf, QDPC_DMA_MINBUF);
arc_write_uncached_32(&priv->shmem->eps_align, align);
printk("%s() Sz:%u US_offset:%u DS_offset:%u\n",
__func__, offset, usdma_offset, dsdma_offset);
return SUCCESS;
}
static void qdpc_pcie_enable_tasklets(struct qdpc_priv *priv)
{
tasklet_enable(&priv->rx_tasklet);
tasklet_enable(&priv->txd_tasklet);
tasklet_enable(&priv->dmastatus_tasklet);
}
static void qdpc_pcie_disable_tasklets(struct qdpc_priv *priv)
{
tasklet_disable(&priv->rx_tasklet);
tasklet_disable(&priv->txd_tasklet);
tasklet_disable(&priv->dmastatus_tasklet);
}
int qdpc_pcie_init_intr(struct net_device *ndev)
{
struct qdpc_priv *priv = netdev_priv(ndev);
int ret = 0;
ret = request_irq(RUBY_IRQ_DSP, qdpc_host_isr, IRQF_DISABLED, "PCIe(host)", ndev);
if (ret) {
PRINT_ERROR(KERN_ERR " Host Interrupt initialization failed\n");
return FAILURE;
}
ret = request_irq(RUBY_IRQ_ENET0, qdpc_dma_isr, IRQF_DISABLED, "PCIe(dsdma)", ndev);
if (ret) {
PRINT_ERROR(KERN_ERR " PCIe(dsdma) Interrupt initialization failed\n");
return FAILURE;
}
ret = request_irq(RUBY_IRQ_ENET1, qdpc_dma_isr, IRQF_DISABLED, "PCIe(usdma)", ndev);
if (ret) {
PRINT_ERROR(KERN_ERR " PCIe(usdma) Interrupt initialization failed\n");
return FAILURE;
}
/* Initialize tasklets */
tasklet_init(&priv->rx_tasklet, (void *)qdpc_rx_tasklet, (unsigned long)ndev);
tasklet_init(&priv->txd_tasklet, (void *)qdpc_txd_tasklet, (unsigned long)ndev);
tasklet_init(&priv->dmastatus_tasklet, (void *)qdpc_dmastatus_tasklet, (unsigned long)ndev);
priv->txq_enable_timer.function = qdpc_tx_runout_func;
priv->txq_enable_timer.data = (unsigned long)priv;
init_timer(&priv->txq_enable_timer);
qdpc_pcie_disable_tasklets(priv);
ret = qdpc_pcie_init_mem(priv->ndev);
if (ret == FAILURE) {
qdpc_free_interrupt(ndev);
return FAILURE;
}
ret = qdpc_emac_init(priv);
if (ret == FAILURE) {
qdpc_free_interrupt(ndev);
return FAILURE;
}
return SUCCESS;
}
void qdpc_free_interrupt(struct net_device *ndev)
{
struct qdpc_priv *priv = netdev_priv(ndev);
free_irq(RUBY_IRQ_ENET0, ndev);
free_irq(RUBY_IRQ_ENET1, ndev);
free_irq(RUBY_IRQ_DSP, ndev);
qdpc_pcie_disable_tasklets(priv);
tasklet_kill(&priv->rx_tasklet);
tasklet_kill(&priv->dmastatus_tasklet);
tasklet_kill(&priv->txd_tasklet);
del_timer(&priv->txq_enable_timer);
}
static inline void qdpc_setbootstate(struct qdpc_priv *p, uint32_t state) {
__iomem qdpc_pcie_bda_t *bda = p->bda;
qdpc_pcie_posted_write(state, &bda->bda_bootstate);
}
static int qdpc_bootpoll(struct qdpc_priv *p, uint32_t state) {
while (!kthread_should_stop() && (qdpc_isbootstate(p,state) == 0)) {
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(HZ/20);
}
return 0;
}
void qdpc_init_work(struct work_struct *task)
{
struct qdpc_priv *priv;
unsigned char macaddr[ETH_ALEN];
priv = container_of(task, struct qdpc_priv, init_work);
PRINT_INFO("Waiting for host start signal\n");
qdpc_bootpoll(priv, QDPC_BDA_FW_START);
qdpc_pcie_irqsetup(priv->ndev);
qdpc_setbootstate(priv, QDPC_BDA_FW_CONFIG);
PRINT_INFO("Enable DMA engines\n");
qdpc_bootpoll(priv, QDPC_BDA_FW_RUN);
qdpc_pcie_enable_tasklets(priv);
qdpc_emac_enable(priv);
netif_start_queue(priv->ndev);
/* Set MAC address used by host side */
memcpy(macaddr, qdpc_basemac, ETH_ALEN);
macaddr[0] = (macaddr[0] & 0x1F) | (((macaddr[0] & 0xE0) + 0x40) & 0xE0) | 0x02;
/*
* The bda_pci_pre_status and bda_pci_endian fields are not used at runtime, so the
* MAC address is stored here in order to avoid updating the bootloader.
*/
memcpy(&priv->bda->bda_pci_pre_status, macaddr, ETH_ALEN);
/* Enable IRQ */
writel(QDPC_H2EP_INTERRUPT_MASK, RUBY_SYS_CTL_D2L_INT_MASK);
qdpc_setbootstate(priv, QDPC_BDA_FW_RUNNING);
PRINT_INFO("Connection established with Host\n");
}

View File

@ -0,0 +1,28 @@
/**
* Copyright (c) 2011-2013 Quantenna Communications, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
**/
#ifndef __QDPC_REGS_H__
#define __QDPC_REGS_H__
#define QDPC_PCIE_REG_BASE 0xe9000000
#define QDPC_D2L_REG_BASE 0xe000003c
#define QDPC_D2L_REG_INTMASK 0xe0000040
#endif /*__QDPC_REGS_H__ */

View File

@ -0,0 +1,160 @@
/**
* Copyright (c) 2011-2013 Quantenna Communications, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
**/
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/version.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <common/topaz_platform.h>
#include <qtn/skb_recycle.h>
#include <qtn/qtn_global.h>
#include "qdpc_config.h"
#include "qdpc_debug.h"
#include "qdpc_init.h"
#include "qdpc_emac.h"
SRAM_TEXT struct sk_buff *qdpc_get_skb(size_t len)
{
const uint32_t align = dma_get_cache_alignment();
struct sk_buff *skb = NULL;
uint32_t off;
#if defined(QDPC_USE_SKB_RECYCLE)
struct qtn_skb_recycle_list *recycle_list = qtn_get_shared_recycle_list();
uint32_t size;
if (recycle_list) {
skb = qtn_skb_recycle_list_pop(recycle_list, &recycle_list->stats_pcie);
}
if (!skb) {
size = qtn_rx_buf_size();
if (len > size)
size = len;
skb = dev_alloc_skb(size + align);
}
#else
skb = dev_alloc_skb(len + align);
#endif
if (skb) {
off = ((uint32_t)((unsigned long)skb->data)) & (align - 1);
if (off) {
skb_reserve(skb, align - off);
}
}
return skb;
}
int qdpc_init_rxq(struct qdpc_priv *priv, size_t ringsize)
{
uint32_t i;
uint32_t dma_control = 0 ;
uint32_t dma_status = 0;
uint32_t dma_data = 0;
qdpc_dmadesc_t *rx_hwdesc = NULL;
uint32_t skb_bufsize = QDPC_DMA_MAXBUF;
uint32_t buffer_size = QDPC_DMA_MAXBUF;
qdpc_pdring_t *rxq = &priv->pktq.pkt_dsq;
spin_lock_init(&rxq->pd_lock);
rxq->pd_ringsize = ringsize;
rxq->pd_hwdesc = (qdpc_dmadesc_t*)dma_alloc_coherent(NULL,
(rxq->pd_ringsize * sizeof(qdpc_dmadesc_t)),
&rxq->pd_dst_busaddr, GFP_KERNEL | GFP_DMA);
memset(rxq->pd_desc, 0, sizeof(rxq->pd_desc));
for (i = 0 ; i < rxq->pd_ringsize ; i++ ) {
struct sk_buff *skb = qdpc_get_skb(skb_bufsize);
if (!skb)
break;
skb->len=0;
rx_hwdesc = &rxq->pd_hwdesc[i];
rxq->pd_desc[i].dd_hwdesc = &rxq->pd_hwdesc[i];
rxq->pd_desc[i].dd_metadata = (void*)skb;
memset(skb->data, 0, buffer_size);
dma_status = (QDPC_DMA_OWN);
dma_control = (buffer_size & QDPC_DMA_LEN_MASK);
dma_data = dma_map_single((struct device *)priv->ndev, (void*)skb->data, buffer_size, DMA_FROM_DEVICE);
arc_write_uncached_32(&rx_hwdesc->dma_control, dma_control);
arc_write_uncached_32(&rx_hwdesc->dma_data, dma_data);
arc_write_uncached_32(&rx_hwdesc->dma_ptr, 0);
arc_write_uncached_32(&rx_hwdesc->dma_status, dma_status);
}
/* Mark end of buffer */
arc_write_uncached_32(&rx_hwdesc->dma_control,(dma_control |QDPC_DMA_LAST_DESC));
rxq->pd_desc[rxq->pd_ringsize - 1].dd_flags |= QDPC_DMA_LAST_DESC;
rxq->pd_lastdesc = &rxq->pd_desc[rxq->pd_ringsize - 1];
rxq->pd_firstdesc = &rxq->pd_desc[0];
rxq->pd_nextdesc = rxq->pd_firstdesc;
return 0;
}
int qdpc_init_txq(struct qdpc_priv *priv, size_t ringsize)
{
uint32_t i;
qdpc_dmadesc_t *tx_hwdesc = NULL;
qdpc_pdring_t *txq = &priv->pktq.pkt_usq;
spin_lock_init(&txq->pd_lock);
STAILQ_INIT(&txq->pd_pending);
txq->pd_ringsize = ringsize;
txq->pd_nfree = txq->pd_ringsize;
txq->pd_npending = 0;
txq->pd_hwdesc = (qdpc_dmadesc_t*)dma_alloc_coherent(NULL,
(txq->pd_ringsize * sizeof(qdpc_dmadesc_t)),
&txq->pd_src_busaddr, GFP_KERNEL | GFP_DMA);
memset(txq->pd_desc, 0, sizeof(txq->pd_desc));
for (i = 0 ; i < txq->pd_ringsize ; i++ ) {
tx_hwdesc = &txq->pd_hwdesc[i];
txq->pd_desc[i].dd_hwdesc = &txq->pd_hwdesc[i];
txq->pd_desc[i].dd_metadata = NULL;
arc_write_uncached_32(&tx_hwdesc->dma_control, (QDPC_DMA_SINGLE_TXBUFFER));
arc_write_uncached_32(&tx_hwdesc->dma_data, 0);
arc_write_uncached_32(&tx_hwdesc->dma_ptr, 0);
arc_write_uncached_32(&tx_hwdesc->dma_status, 0);
}
/* Mark end of buffer */
arc_write_uncached_32(&tx_hwdesc->dma_control, QDPC_DMA_LAST_DESC);
txq->pd_desc[txq->pd_ringsize - 1].dd_flags |= QDPC_DMA_LAST_DESC;
txq->pd_lastdesc = &txq->pd_desc[txq->pd_ringsize - 1];
txq->pd_firstdesc = &txq->pd_desc[0];
txq->pd_nextdesc = txq->pd_firstdesc;
return 0;
}

View File

@ -0,0 +1,30 @@
#
# Makefile for arm platform
#
EXTRA_CFLAGS += -Wall -Wno-deprecated-declarations \
-I$(src) \
-I$(src)/../../include \
-I$(src)/../common $(DNI_KMOD_CFLAGS)
EXTRA_CFLAGS += -DQTN_TX_SKBQ_SUPPORT -DQTN_WAKEQ_SUPPORT
PWD := $(shell pwd)
default: all
COMMON_DIR := ../common
qdpc-host-objs := $(COMMON_DIR)/qdpc_init.o $(COMMON_DIR)/qdpc_pcie.o $(COMMON_DIR)/topaz_vnet.o qdpc_platform.o
obj-m := qdpc-host.o
qdpc_host.o: $(qdpc-host-objs)
ld -r $^ -o $@
all:
make -C $(KERNELDIR) $(CROSS) M=$(PWD) modules
clean:
rm -rf $(COMMON_DIR)/.*.cmd $(COMMON_DIR)/.tmp_versions
rm -rf Module.markers Module.symvers modules.order *~ $(qdpc-host-objs) *.o *.ko *.mod.o *.mod.c

View File

@ -0,0 +1,75 @@
/**
* Copyright (c) 2012-2012 Quantenna Communications, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
**/
/*
* Platform dependant implement. Customer needs to modify this file.
*/
#include <linux/interrupt.h>
#include <qdpc_platform.h>
#include <topaz_vnet.h>
#include <qdpc_regs.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
/*
* Enable MSI interrupt of PCIe.
*/
void enable_vmac_ints(struct vmac_priv *vmp)
{
volatile uint32_t *dma_wrd_imwr = QDPC_BAR_VADDR(vmp->dmareg_bar, TOPAZ_IMWR_DONE_ADDRLO_OFFSET);
writel(vmp->dma_msi_imwr, dma_wrd_imwr);
}
/*
* Disable MSI interrupt of PCIe.
*/
void disable_vmac_ints(struct vmac_priv *vmp)
{
volatile uint32_t *dma_wrd_imwr = QDPC_BAR_VADDR(vmp->dmareg_bar, TOPAZ_IMWR_DONE_ADDRLO_OFFSET);
writel(vmp->dma_msi_dummy, dma_wrd_imwr);
}
/*
* Enable interrupt for detecting EP reset.
*/
void enable_ep_rst_detection(struct net_device *ndev)
{
}
/*
* Disable interrupt for detecting EP reset.
*/
void disable_ep_rst_detection(struct net_device *ndev)
{
}
/*
* Interrupt context for detecting EP reset.
* This function should do:
* 1. check interrupt status to see if EP reset.
* 2. if EP reset, handle it.
*/
void handle_ep_rst_int(struct net_device *ndev)
{
}

View File

@ -0,0 +1,101 @@
/**
* Copyright (c) 2012-2012 Quantenna Communications, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
**/
/*
* Platform dependant implement. Customer needs to modify this file.
*/
#ifndef __QDPC_PFDEP_H__
#define __QDPC_PFDEP_H__
#include <linux/version.h>
#include <topaz_vnet.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
#define IOREMAP ioremap_wc
#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) */
#define IOREMAP ioremap
#endif
/* IO functions */
#ifndef readb
#define readb(addr) (*(volatile unsigned char *) (addr))
#endif
#ifndef readw
#define readw(addr) (*(volatile unsigned short *) (addr))
#endif
#ifndef readl
#define readl(addr) (*(volatile unsigned int *) (addr))
#endif
#ifndef writeb
#define writeb(b,addr) (*(volatile unsigned char *) (addr) = (b))
#endif
#ifndef writew
#define writew(b,addr) (*(volatile unsigned short *) (addr) = (b))
#endif
#ifndef writel
#define writel(b,addr) (*(volatile unsigned int *) (addr) = (b))
#endif
#ifndef virt_to_bus
#define virt_to_bus virt_to_phys
#endif
/* Bit number and mask of MSI in the interrupt mask and status register */
#define QDPC_INTR_MSI_BIT 0
#define QDPC_INTR_MSI_MASK (1 << QDPC_INTR_MSI_BIT)
/* Enable MSI interrupt of PCIe */
extern void enable_vmac_ints(struct vmac_priv *vmp);
/* Disable MSI interrupt of PCIe */
extern void disable_vmac_ints(struct vmac_priv *vmp);
/* Enable interrupt for detecting EP reset */
extern void enable_ep_rst_detection(struct net_device *ndev);
/* Disable interrupt for detecting EP reset */
extern void disable_ep_rst_detection(struct net_device *ndev);
/* Interrupt context for detecting EP reset */
extern void handle_ep_rst_int(struct net_device *ndev);
/* Allocated buffer size for a packet */
#define SKB_BUF_SIZE 2048
/* Transmit Queue Length */
#define QDPC_TX_QUEUE_SIZE 180
/* Receive Queue Length */
#define QDPC_RX_QUEUE_SIZE 384
/* Customer defined function */
#define qdpc_platform_init() 0
#define qdpc_platform_exit() do { } while(0)
/* PCIe driver update resource in PCI configure space after EP reset */
#define qdpc_update_hw_bar(pdev, index) do { } while(0)
/* TODO: If MSI IRQ-loss issue can be fixed, remove macro below */
/*#define QDPC_PLATFORM_IRQ_FIXUP*/
#endif /* __QDPC_PFDEP_H__ */

View File

@ -0,0 +1,887 @@
/**
* Copyright (c) 2012-2012 Quantenna Communications, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
**/
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/version.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/kthread.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <linux/netlink.h>
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/reboot.h>
#include "qdpc_config.h"
#include "qdpc_debug.h"
#include "qdpc_init.h"
#include "qdpc_regs.h"
#include "qdpc_platform.h"
#include "topaz_vnet.h"
#define QDPC_TOPAZ_IMG "topaz-linux.lzma.img"
#define QDPC_TOPAZ_UBOOT "u-boot.bin"
#define MAX_IMG_NUM 2
#define EP_BOOT_FROM_FLASH 1
#ifndef MEMORY_START_ADDRESS
#define MEMORY_START_ADDRESS virt_to_bus((void *)PAGE_OFFSET)
#endif
static unsigned int tlp_mps = 256;
module_param(tlp_mps, uint, 0644);
MODULE_PARM_DESC(tlp_mps, "Default PCIe Max_Payload_Size");
/* Quantenna PCIE vendor and device identifiers */
static struct pci_device_id qdpc_pcie_ids[] = {
{PCI_DEVICE(QDPC_VENDOR_ID, QDPC_DEVICE_ID),},
{0,}
};
MODULE_DEVICE_TABLE(pci, qdpc_pcie_ids);
static int qdpc_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id);
static void qdpc_pcie_remove(struct pci_dev *pdev);
static int qdpc_boot_thread(void *data);
static void qdpc_nl_recv_msg(struct sk_buff *skb);
int qdpc_init_netdev(struct net_device **net_dev, struct pci_dev *pdev);
static bool is_ep_reset = false;
#ifndef PCIE_HOTPLUG_SUPPORTED
static int link_monitor(void *data);
static struct task_struct *link_monitor_thread = NULL;
#endif
char qdpc_pcie_driver_name[] = "qdpc_host";
static struct pci_driver qdpc_pcie_driver = {
.name = qdpc_pcie_driver_name,
.id_table = qdpc_pcie_ids,
.probe = qdpc_pcie_probe,
.remove = qdpc_pcie_remove,
#ifdef CONFIG_QTN_PM
.suspend = qdpc_pcie_suspend,
.resume = qdpc_pcie_resume,
#endif
};
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0)
struct netlink_kernel_cfg qdpc_netlink_cfg = {
.groups = 0,
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
.flags = 0,
#endif
.input = qdpc_nl_recv_msg,
.cb_mutex = NULL,
.bind = NULL,
};
#endif
struct sock *qdpc_nl_sk = NULL;
int qdpc_clntPid = 0;
unsigned int (*qdpc_pci_readl)(void *addr) = qdpc_readl;
void (*qdpc_pci_writel)(unsigned int val, void *addr) = qdpc_writel;
static int qdpc_bootpoll(struct vmac_priv *p, uint32_t state)
{
while (!kthread_should_stop() && (qdpc_isbootstate(p,state) == 0)) {
if (qdpc_booterror(p))
return -1;
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(QDPC_SCHED_TIMEOUT);
}
return 0;
}
static void booterror(qdpc_pcie_bda_t *bda)
{
if (PCIE_BDA_TARGET_FWLOAD_ERR & qdpc_pci_readl(&bda->bda_flags))
printk("EP boot from download firmware failed!\n");
else if (PCIE_BDA_TARGET_FBOOT_ERR & qdpc_pci_readl(&bda->bda_flags))
printk("EP boot from flash failed! Please check if there is usable image in Target flash.\n");
else
printk("EP boot get in error, dba flag: 0x%x\n", qdpc_pci_readl(&bda->bda_flags));
}
static void qdpc_pci_endian_detect(struct vmac_priv *priv)
{
__iomem qdpc_pcie_bda_t *bda = priv->bda;
volatile uint32_t pci_endian;
writel(QDPC_PCI_ENDIAN_DETECT_DATA, &bda->bda_pci_endian);
mmiowb();
writel(QDPC_PCI_ENDIAN_VALID_STATUS, &bda->bda_pci_pre_status);
while (readl(&bda->bda_pci_post_status) != QDPC_PCI_ENDIAN_VALID_STATUS) {
if (kthread_should_stop())
break;
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(QDPC_SCHED_TIMEOUT);
}
pci_endian = readl(&bda->bda_pci_endian);
if (pci_endian == QDPC_PCI_LITTLE_ENDIAN) {
qdpc_pci_readl = qdpc_readl;
qdpc_pci_writel = qdpc_writel;
printk("PCI memory is little endian\n");
} else if (pci_endian == QDPC_PCI_BIG_ENDIAN) {
qdpc_pci_readl = qdpc_le32_readl;
qdpc_pci_writel = qdpc_le32_writel;
printk("PCI memory is big endian\n");
} else {
qdpc_pci_readl = qdpc_readl;
qdpc_pci_writel = qdpc_writel;
printk("PCI memory endian value:%08x is invalid - using little endian\n", pci_endian);
}
/* Clear endian flags */
writel(0, &bda->bda_pci_pre_status);
writel(0, &bda->bda_pci_post_status);
writel(0, &bda->bda_pci_endian);
}
static void qdpc_pci_dma_offset_reset(struct vmac_priv *priv)
{
__iomem qdpc_pcie_bda_t *bda = priv->bda;
uint32_t dma_offset;
/* Get EP Mapping address */
dma_offset = readl(&bda->bda_dma_offset);
if ((dma_offset & PCIE_DMA_OFFSET_ERROR_MASK) != PCIE_DMA_OFFSET_ERROR) {
printk("DMA offset : 0x%08x, no need to reset the value.\n", dma_offset);
return;
}
dma_offset &= ~PCIE_DMA_OFFSET_ERROR_MASK;
printk("EP map start addr : 0x%08x, Host memory start : 0x%08x\n",
dma_offset, (unsigned int)MEMORY_START_ADDRESS);
/* Reset DMA offset in bda */
dma_offset -= MEMORY_START_ADDRESS;
writel(dma_offset, &bda->bda_dma_offset);
}
static int qdpc_firmware_load(struct pci_dev *pdev, struct vmac_priv *priv, const char *name)
{
#define DMABLOCKSIZE (1 * 1024 * 1024)
#define NBLOCKS(size) ((size)/(DMABLOCKSIZE) + (((size)%(DMABLOCKSIZE) > 0) ? 1 : 0))
int result = SUCCESS;
const struct firmware *fw;
__iomem qdpc_pcie_bda_t *bda = priv->bda;
/* Request compressed firmware from user space */
if ((result = request_firmware(&fw, name, &pdev->dev)) == -ENOENT) {
/*
* No firmware found in the firmware directory, skip firmware downloading process
* boot from flash directly on target
*/
printk( "no firmware found skip fw downloading\n");
qdpc_pcie_posted_write((PCIE_BDA_HOST_NOFW_ERR |
qdpc_pci_readl(&bda->bda_flags)), &bda->bda_flags);
return FAILURE;
} else if (result == SUCCESS) {
uint32_t nblocks = NBLOCKS(fw->size);
uint32_t remaining = fw->size;
uint32_t count;
uint32_t dma_offset = qdpc_pci_readl(&bda->bda_dma_offset);
void *data =(void *) __get_free_pages(GFP_KERNEL | GFP_DMA,
get_order(DMABLOCKSIZE));
const uint8_t *curdata = fw->data;
dma_addr_t handle = 0;
if (!data) {
printk(KERN_ERR "Allocation failed for memory size[%u] Download firmware failed!\n", DMABLOCKSIZE);
release_firmware(fw);
qdpc_pcie_posted_write((PCIE_BDA_HOST_MEMALLOC_ERR |
qdpc_pci_readl(&bda->bda_flags)), &bda->bda_flags);
return FAILURE;
}
handle = pci_map_single(priv->pdev, data ,DMABLOCKSIZE, PCI_DMA_TODEVICE);
if (!handle) {
printk("Pci map for memory data block 0x%p error, Download firmware failed!\n", data);
free_pages((unsigned long)data, get_order(DMABLOCKSIZE));
release_firmware(fw);
qdpc_pcie_posted_write((PCIE_BDA_HOST_MEMMAP_ERR |
qdpc_pci_readl(&bda->bda_flags)), &bda->bda_flags);
return FAILURE;
}
qdpc_setbootstate(priv, QDPC_BDA_FW_HOST_LOAD);
qdpc_bootpoll(priv, QDPC_BDA_FW_EP_RDY);
/* Start loading firmware */
for (count = 0 ; count < nblocks; count++)
{
uint32_t size = (remaining > DMABLOCKSIZE) ? DMABLOCKSIZE : remaining;
memcpy(data, curdata, size);
/* flush dcache */
pci_dma_sync_single_for_device(priv->pdev, handle ,size, PCI_DMA_TODEVICE);
qdpc_pcie_posted_write(handle + dma_offset, &bda->bda_img);
qdpc_pcie_posted_write(size, &bda->bda_img_size);
printk("FW Data[%u]: VA:0x%p PA:0x%p Sz=%u..\n", count, (void *)curdata, (void *)handle, size);
qdpc_setbootstate(priv, QDPC_BDA_FW_BLOCK_RDY);
qdpc_bootpoll(priv, QDPC_BDA_FW_BLOCK_DONE);
remaining = (remaining < size) ? remaining : (remaining - size);
curdata += size;
printk("done!\n");
}
pci_unmap_single(priv->pdev,handle, DMABLOCKSIZE, PCI_DMA_TODEVICE);
/* Mark end of block */
qdpc_pcie_posted_write(0, &bda->bda_img);
qdpc_pcie_posted_write(0, &bda->bda_img_size);
qdpc_setbootstate(priv, QDPC_BDA_FW_BLOCK_RDY);
qdpc_bootpoll(priv, QDPC_BDA_FW_BLOCK_DONE);
qdpc_setbootstate(priv, QDPC_BDA_FW_BLOCK_END);
PRINT_INFO("Image. Sz:%u State:0x%x\n", (uint32_t)fw->size, qdpc_pci_readl(&bda->bda_bootstate));
qdpc_bootpoll(priv, QDPC_BDA_FW_LOAD_DONE);
free_pages((unsigned long)data, get_order(DMABLOCKSIZE));
release_firmware(fw);
PRINT_INFO("Image downloaded....!\n");
} else {
PRINT_ERROR("Failed to load firmware:%d\n", result);
return result;
}
return result;
}
static void qdpc_pcie_dev_init(struct vmac_priv *priv, struct pci_dev *pdev, struct net_device *ndev)
{
SET_NETDEV_DEV(ndev, &pdev->dev);
priv->pdev = pdev;
priv->ndev = ndev;
pci_set_drvdata(pdev, ndev);
}
static void qdpc_tune_pcie_mps(struct pci_dev *pdev, int pos)
{
struct pci_dev *parent = NULL;
int ppos = 0;
uint32_t dev_cap, pcap;
uint16_t dev_ctl, pctl;
unsigned int mps = tlp_mps;
#define BIT_TO_MPS(m) (1 << ((m) + 7))
if (pdev->bus && pdev->bus->self) {
parent = pdev->bus->self;
if (likely(parent)) {
ppos = pci_find_capability(parent, PCI_CAP_ID_EXP);
if (ppos) {
pci_read_config_dword(parent, ppos + PCI_EXP_DEVCAP, &pcap);
pci_read_config_dword(pdev, pos + PCI_EXP_DEVCAP, &dev_cap);
printk(KERN_INFO "parent cap:%u, dev cap:%u\n",\
BIT_TO_MPS(pcap & PCI_EXP_DEVCAP_PAYLOAD), BIT_TO_MPS(dev_cap & PCI_EXP_DEVCAP_PAYLOAD));
mps = min(BIT_TO_MPS(dev_cap & PCI_EXP_DEVCAP_PAYLOAD), BIT_TO_MPS(pcap & PCI_EXP_DEVCAP_PAYLOAD));
}
}
}
printk(KERN_INFO"Setting MPS to %u\n", mps);
/*
* Set Max_Payload_Size
* Max_Payload_Size_in_effect = 1 << ( ( (dev_ctl >> 5) & 0x07) + 7);
*/
mps = (((mps >> 7) - 1) << 5);
pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &dev_ctl);
dev_ctl = ((dev_ctl & ~PCI_EXP_DEVCTL_PAYLOAD) | mps);
pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, dev_ctl);
if (parent && ppos) {
pci_read_config_word(parent, pos + PCI_EXP_DEVCTL, &pctl);
pctl = ((pctl & ~PCI_EXP_DEVCTL_PAYLOAD) | mps);
pci_write_config_word(parent, pos + PCI_EXP_DEVCTL, pctl);
}
}
static struct net_device *g_ndev = NULL;
static int qdpc_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct vmac_priv *priv = NULL;
struct net_device *ndev = NULL;
int result = SUCCESS;
int pos;
/* Allocate device structure */
if (!(ndev = vmac_alloc_ndev()))
return -ENOMEM;
g_ndev = ndev;
priv = netdev_priv(ndev);
qdpc_pcie_dev_init(priv, pdev, ndev);
/* allocate netlink data buffer */
priv->nl_buf = kmalloc(VMAC_NL_BUF_SIZE, GFP_KERNEL);
if (!priv->nl_buf) {
result = -ENOMEM;
goto out;
}
/* Check if the device has PCI express capability */
pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
if (!pos) {
PRINT_ERROR(KERN_ERR "The device %x does not have PCI Express capability\n",
pdev->device);
result = -ENOSYS;
goto out;
} else {
PRINT_DBG(KERN_INFO "The device %x has PCI Express capability\n", pdev->device);
}
qdpc_tune_pcie_mps(pdev, pos);
/* Wake up the device if it is in suspended state and allocate IO,
* memory regions and IRQ if not
*/
if (pci_enable_device(pdev)) {
PRINT_ERROR(KERN_ERR "Failed to initialize PCI device with device ID %x\n",
pdev->device);
result = -EIO;
goto out;
} else {
PRINT_DBG(KERN_INFO "Initialized PCI device with device ID %x\n", pdev->device);
}
/*
* Check if the PCI device can support DMA addressing properly.
* The mask gives the bits that the device can address
*/
pci_set_master(pdev);
/* Initialize PCIE layer */
if (( result = qdpc_pcie_init_intr_and_mem(priv)) < 0) {
PRINT_DBG("Interrupt & Memory Initialization failed \n");
goto release_memory;
}
if (!!(result = vmac_net_init(pdev))) {
PRINT_DBG("Vmac netdev init fail\n");
goto free_mem_interrupt;
}
/* Create and start the thread to initiate the INIT Handshake*/
priv->init_thread = kthread_run(qdpc_boot_thread, priv, "qdpc_init_thread");
if (priv->init_thread == NULL) {
PRINT_ERROR("Init thread creation failed \n");
goto free_mem_interrupt;
}
/* Create netlink & register with kernel */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
priv->nl_socket = netlink_kernel_create(&init_net,
QDPC_NETLINK_RPC_PCI_CLNT, &qdpc_netlink_cfg);
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0)
priv->nl_socket = netlink_kernel_create(&init_net,
QDPC_NETLINK_RPC_PCI_CLNT, THIS_MODULE, &qdpc_netlink_cfg);
#else
priv->nl_socket = netlink_kernel_create(&init_net,
QDPC_NETLINK_RPC_PCI_CLNT, 0, qdpc_nl_recv_msg,
NULL, THIS_MODULE);
#endif
if (priv->nl_socket) {
return SUCCESS;
}
PRINT_ERROR(KERN_ALERT "Error creating netlink socket.\n");
result = FAILURE;
free_mem_interrupt:
qdpc_pcie_free_mem(pdev);
qdpc_free_interrupt(pdev);
release_memory:
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)
/* Releasing the memory region if any error occured */
pci_clear_master(pdev);
#endif
pci_disable_device(pdev);
out:
kfree(priv->nl_buf);
free_netdev(ndev);
/* Any failure in probe, so it can directly return in remove */
pci_set_drvdata(pdev, NULL);
return result;
}
static void qdpc_pcie_remove(struct pci_dev *pdev)
{
struct net_device *ndev = pci_get_drvdata(pdev);
struct vmac_priv *vmp;
if (ndev == NULL)
return;
vmp = netdev_priv(ndev);
if (vmp->init_thread)
kthread_stop(vmp->init_thread);
if (vmp->nl_socket)
netlink_kernel_release(vmp->nl_socket);
kfree(vmp->nl_buf);
vmac_clean(ndev);
qdpc_free_interrupt(pdev);
qdpc_pcie_free_mem(pdev);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)
pci_clear_master(pdev);
#endif
pci_disable_device(pdev);
writel(TOPAZ_SET_INT(IPC_RESET_EP), vmp->ep_ipc_reg);
qdpc_unmap_iomem(vmp);
free_netdev(ndev);
g_ndev = NULL;
return;
}
static inline int qdpc_pcie_set_power_state(struct pci_dev *pdev, pci_power_t state)
{
uint16_t pmcsr;
pci_read_config_word(pdev, TOPAZ_PCI_PM_CTRL_OFFSET, &pmcsr);
switch (state) {
case PCI_D0:
pci_write_config_word(pdev, TOPAZ_PCI_PM_CTRL_OFFSET,(pmcsr & ~PCI_PM_CTRL_STATE_MASK) | PCI_D0);
break;
case PCI_D3hot:
pci_write_config_word(pdev, TOPAZ_PCI_PM_CTRL_OFFSET,(pmcsr & ~PCI_PM_CTRL_STATE_MASK) | (PCI_D3hot | PCI_PM_CTRL_PME_ENABLE));
break;
default:
return -EINVAL;
}
return 0;
}
int qdpc_pcie_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct net_device *ndev = pci_get_drvdata(pdev);
struct vmac_priv *priv;
if (ndev == NULL)
return -EINVAL;
priv = netdev_priv(ndev);
if (le32_to_cpu(*priv->ep_pmstate) == PCI_D3hot) {
return 0;
}
printk("%s start power management suspend\n", qdpc_pcie_driver_name);
/* Set ep not ready to drop packets in low power mode */
priv->ep_ready = 0;
ndev->flags &= ~IFF_RUNNING;
*priv->ep_pmstate = cpu_to_le32(PCI_D3hot);
barrier();
writel(TOPAZ_SET_INT(IPC_EP_PM_CTRL), priv->ep_ipc_reg);
msleep(100);
pci_save_state(pdev);
pci_disable_device(pdev);
qdpc_pcie_set_power_state(pdev, PCI_D3hot);
return 0;
}
int qdpc_pcie_resume(struct pci_dev *pdev)
{
struct net_device *ndev = pci_get_drvdata(pdev);
struct vmac_priv *priv;
int ret;
if (ndev == NULL)
return -EINVAL;
priv = netdev_priv(ndev);
if (le32_to_cpu(*priv->ep_pmstate) == PCI_D0) {
return 0;
}
printk("%s start power management resume\n", qdpc_pcie_driver_name);
ret = pci_enable_device(pdev);
if (ret) {
PRINT_ERROR("%s: pci_enable_device failed on resume\n", __func__);
return ret;
}
pci_restore_state(pdev);
qdpc_pcie_set_power_state(pdev, PCI_D0);
{
*priv->ep_pmstate = cpu_to_le32(PCI_D0);
barrier();
writel(TOPAZ_SET_INT(IPC_EP_PM_CTRL), priv->ep_ipc_reg);
msleep(5000);
}
/* Set ep_ready to resume tx traffic */
priv->ep_ready = 1;
ndev->flags |= IFF_RUNNING;
return 0;
}
static int __init qdpc_init_module(void)
{
int ret;
PRINT_DBG(KERN_INFO "Quantenna pcie driver initialization\n");
if (qdpc_platform_init()) {
PRINT_ERROR("Platform initilization failed \n");
ret = FAILURE;
return ret;
}
/* Register the pci driver with device*/
if ((ret = pci_register_driver(&qdpc_pcie_driver)) < 0 ) {
PRINT_ERROR("Could not register the driver to pci : %d\n", ret);
ret = -ENODEV;
return ret;
}
#ifndef PCIE_HOTPLUG_SUPPORTED
link_monitor_thread = kthread_run(link_monitor, NULL, "link_monitor");
#endif
return ret;
}
static void __exit qdpc_exit_module(void)
{
/* Release netlink */
qdpc_platform_exit();
#ifndef PCIE_HOTPLUG_SUPPORTED
kthread_stop(link_monitor_thread);
link_monitor_thread = NULL;
#endif
/* Unregister the pci driver with the device */
pci_unregister_driver(&qdpc_pcie_driver);
return;
}
static inline bool is_pcie_linkup(struct pci_dev *pdev)
{
uint32_t cs = 0;
pci_read_config_dword(pdev, QDPC_VENDOR_ID_OFFSET, &cs);
if (cs == QDPC_LINK_UP) {
msleep(10000);
printk("%s: PCIe link up!\n", __func__);
return true;
}
return false;
}
static inline void qdpc_pcie_print_config_space(struct pci_dev *pdev)
{
int i = 0;
uint32_t cs = 0;
/* Read PCIe configuration space header */
for (i = QDPC_VENDOR_ID_OFFSET; i <= QDPC_INT_LINE_OFFSET; i += QDPC_ROW_INCR_OFFSET) {
pci_read_config_dword(pdev, i, &cs);
printk("%s: pdev:0x%p config_space offset:0x%02x value:0x%08x\n", __func__, pdev, i, cs);
}
printk("\n");
}
static inline void qdpc_pcie_check_link(struct pci_dev *pdev, struct vmac_priv *priv)
{
__iomem qdpc_pcie_bda_t *bda = priv->bda;
uint32_t cs = 0;
pci_read_config_dword(pdev, QDPC_VENDOR_ID_OFFSET, &cs);
/* Endian value will be all 1s if link went down */
if (readl(&bda->bda_pci_endian) == QDPC_LINK_DOWN) {
is_ep_reset = true;
printk("Reset detected\n");
}
}
#ifndef PCIE_HOTPLUG_SUPPORTED
static int link_monitor(void *data)
{
struct net_device *ndev = NULL;
struct vmac_priv *priv = NULL;
__iomem qdpc_pcie_bda_t *bda = NULL;
struct pci_dev *pdev = NULL;
uint32_t cs = 0;
set_current_state(TASK_RUNNING);
while (!kthread_should_stop()) {
__set_current_state(TASK_INTERRUPTIBLE);
schedule();
set_current_state(TASK_RUNNING);
ndev = g_ndev;
priv = netdev_priv(ndev);
bda = priv->bda;
pdev = priv->pdev;
#ifdef QDPC_CS_DEBUG
qdpc_pcie_print_config_space(pdev);
msleep(5000);
#endif
/* Check if reset to EP occurred */
while (!pci_read_config_dword(pdev, QDPC_VENDOR_ID_OFFSET, &cs)) {
if (kthread_should_stop())
do_exit(0);
qdpc_pcie_check_link(pdev, priv);
if (is_ep_reset) {
is_ep_reset = false;
/* add code to reboot the while QCA system here*/
printk("%s: Attempting to reboot QCA system.\n", __func__);
machine_restart(NULL);
break;
}
msleep(500);
}
while(!is_pcie_linkup(pdev)) {
}
#ifdef QDPC_CS_DEBUG
qdpc_pcie_print_config_space(pdev);
#endif
qdpc_pcie_probe(pdev, NULL);
}
do_exit(0);
}
#endif
static int qdpc_bringup_fw(struct vmac_priv *priv)
{
__iomem qdpc_pcie_bda_t *bda = priv->bda;
uint32_t bdaflg;
char *fwname;
qdpc_pci_endian_detect(priv);
qdpc_pci_dma_offset_reset(priv);
printk("Setting HOST ready...\n");
qdpc_setbootstate(priv, QDPC_BDA_FW_HOST_RDY);
qdpc_bootpoll(priv, QDPC_BDA_FW_TARGET_RDY);
#ifndef SKIP_PCI_DMA_MASK
if (qdpc_set_dma_mask(priv)){
printk("Failed to map DMA mask.\n");
priv->init_thread = NULL;
do_exit(-1);
}
#endif
bdaflg = qdpc_pci_readl(&bda->bda_flags);
if ((PCIE_BDA_FLASH_PRESENT & bdaflg) && EP_BOOT_FROM_FLASH) {
printk("EP have fw in flash, boot from flash\n");
qdpc_pcie_posted_write((PCIE_BDA_FLASH_BOOT |
qdpc_pci_readl(&bda->bda_flags)), &bda->bda_flags);
qdpc_setbootstate(priv, QDPC_BDA_FW_TARGET_BOOT);
qdpc_bootpoll(priv, QDPC_BDA_FW_FLASH_BOOT);
goto fw_start;
}
bdaflg &= PCIE_BDA_XMIT_UBOOT;
fwname = bdaflg ? QDPC_TOPAZ_UBOOT : QDPC_TOPAZ_IMG;
qdpc_setbootstate(priv, QDPC_BDA_FW_TARGET_BOOT);
printk("EP FW load request...\n");
qdpc_bootpoll(priv, QDPC_BDA_FW_LOAD_RDY);
printk("Start download Firmware %s...\n", fwname);
if (qdpc_firmware_load(priv->pdev, priv, fwname)){
printk("Failed to download firmware.\n");
priv->init_thread = NULL;
do_exit(-1);
}
fw_start:
qdpc_setbootstate(priv, QDPC_BDA_FW_START);
printk("Start booting EP...\n");
if (bdaflg != PCIE_BDA_XMIT_UBOOT) {
if (qdpc_bootpoll(priv,QDPC_BDA_FW_CONFIG)) {
booterror(bda);
priv->init_thread = NULL;
do_exit(-1);
}
printk("EP boot successful, starting config...\n");
/* Save target-side MSI address for later enable/disable irq*/
priv->dma_msi_imwr = readl(QDPC_BAR_VADDR(priv->dmareg_bar, TOPAZ_IMWR_DONE_ADDRLO_OFFSET));
priv->dma_msi_dummy = virt_to_bus(&priv->dma_msi_data) + qdpc_pci_readl(&bda->bda_dma_offset);
priv->ep_pciecfg0_val = readl(QDPC_BAR_VADDR(priv->sysctl_bar, TOPAZ_PCIE_CFG0_OFFSET));
qdpc_setbootstate(priv, QDPC_BDA_FW_RUN);
qdpc_bootpoll(priv,QDPC_BDA_FW_RUNNING);
priv->ep_ready = 1;
}
return (int)bdaflg;
}
static int qdpc_boot_done(struct vmac_priv *priv)
{
struct net_device *ndev;
ndev = priv->ndev;
PRINT_INFO("Connection established with Target BBIC4 board\n");
#ifndef PCIE_HOTPLUG_SUPPORTED
if (link_monitor_thread)
wake_up_process(link_monitor_thread);
#endif
priv->init_thread = NULL;
do_exit(0);
}
static int qdpc_boot_thread(void *data)
{
struct vmac_priv *priv = (struct vmac_priv *)data;
int i;
for (i = 0; i < MAX_IMG_NUM; i++) {
if (qdpc_bringup_fw(priv) <= 0)
break;
}
qdpc_boot_done(priv);
return 0;
}
static void qdpc_nl_recv_msg(struct sk_buff *skb)
{
struct vmac_priv *priv = netdev_priv(g_ndev);
struct nlmsghdr *nlh = (struct nlmsghdr*)skb->data;
struct sk_buff *skb2;
unsigned int data_len;
unsigned int offset;
qdpc_cmd_hdr_t *cmd_hdr;
uint16_t rpc_type;
/* Parsing the netlink message */
PRINT_DBG(KERN_INFO "%s line %d Netlink received pid:%d, size:%d, type:%d\n",
__FUNCTION__, __LINE__, nlh->nlmsg_pid, nlh->nlmsg_len, nlh->nlmsg_type);
switch (nlh->nlmsg_type) {
case QDPC_NL_TYPE_CLNT_STR_REG:
case QDPC_NL_TYPE_CLNT_LIB_REG:
if (nlh->nlmsg_type == QDPC_NL_TYPE_CLNT_STR_REG)
priv->str_call_nl_pid = nlh->nlmsg_pid;
else
priv->lib_call_nl_pid = nlh->nlmsg_pid;
return;
case QDPC_NL_TYPE_CLNT_STR_REQ:
case QDPC_NL_TYPE_CLNT_LIB_REQ:
break;
default:
PRINT_DBG(KERN_INFO "%s line %d Netlink Invalid type %d\n",
__FUNCTION__, __LINE__, nlh->nlmsg_type);
return;
}
/*
* make new skbs; Fragment if necessary.
* The original skb will be freed in netlink_unicast_kernel,
* we hold the new skbs until DMA transfer is done
*/
offset = sizeof(struct nlmsghdr);
data_len = nlh->nlmsg_len;
while (data_len > 0) {
unsigned int len = min_t(unsigned int, data_len, priv->ndev->mtu);
unsigned int skb2_len = len + sizeof(qdpc_cmd_hdr_t);
skb2 = alloc_skb(skb2_len, GFP_ATOMIC);
if (!skb2) {
printk(KERN_INFO "%s: skb alloc failed\n", __func__);
return;
}
data_len -= len;
rpc_type = nlh->nlmsg_type & QDPC_RPC_TYPE_MASK;
rpc_type |= (data_len > 0 ? QDPC_RPC_TYPE_FRAG : 0);
cmd_hdr = (qdpc_cmd_hdr_t *)skb2->data;
memcpy(cmd_hdr->dst_magic, QDPC_NETLINK_DST_MAGIC, ETH_ALEN);
memcpy(cmd_hdr->src_magic, QDPC_NETLINK_SRC_MAGIC, ETH_ALEN);
cmd_hdr->type = __constant_htons(QDPC_APP_NETLINK_TYPE);
cmd_hdr->len = htons((uint16_t)len);
cmd_hdr->rpc_type = htons(rpc_type);
cmd_hdr->total_len = htons((uint16_t)(nlh->nlmsg_len));
memcpy((uint8_t *)(cmd_hdr + 1), skb->data + offset, len);
offset += len;
skb_put(skb2, skb2_len);
skb2->dev = priv->ndev;
dev_queue_xmit(skb2);
}
}
module_init(qdpc_init_module);
module_exit(qdpc_exit_module);

View File

@ -0,0 +1,119 @@
/**
* Copyright (c) 2012-2012 Quantenna Communications, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
**/
#ifndef __QDPC_INIT_H_
#define __QDPC_INIT_H_
#include <asm/io.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
#include "topaz_vnet.h"
#define QDPC_MODULE_NAME "qdpc_ruby"
#define QDPC_DEV_NAME "qdpc_ruby"
#define QDPC_MODULE_VERSION "1.0"
/* PCIe device information declarations */
#define QDPC_VENDOR_ID 0x1bb5
#define QDPC_DEVICE_ID 0x0008
#define QDPC_PCIE_NUM_BARS 6
/* PCIe Configuration Space Defines */
#define QDPC_LINK_UP ((QDPC_DEVICE_ID << 16) | QDPC_VENDOR_ID) /* Used to indicate CS is valid and link is up */
#define QDPC_LINK_DOWN 0xffffffff /* Used to indicate link went down */
#define QDPC_VENDOR_ID_OFFSET 0x00
#define QDPC_INT_LINE_OFFSET 0x3C
#define QDPC_ROW_INCR_OFFSET 0x04
#undef QDPC_CS_DEBUG
extern unsigned int (*qdpc_pci_readl)(void *addr);
extern void (*qdpc_pci_writel)(unsigned int val, void *addr);
/*
* End-point(EP) is little-Endian.
* These two macros are used for host side outbound window memory access.
* Outbound here is host side view-point. So memory accessed by these two macros
* should be on EP side.
* NOTE: On some platforms, outbound hardware swap(byte order swap) should be
* enabled for outbound memory access correctly. If enabled, Endian translation
* will be done by hardware, and software Endian translation should be disabled.
* */
#ifdef OUTBOUND_HW_SWAP
#define le32_readl(x) readl(x)
#define le32_writel(x, addr) writel(x, addr)
#else
#define le32_readl(x) le32_to_cpu(readl((x)))
#define le32_writel(x, addr) writel(cpu_to_le32((x)), addr)
#endif
static inline unsigned int qdpc_readl(void *addr)
{
return readl(addr);
}
static inline void qdpc_writel(unsigned int val, void *addr)
{
writel(val, addr);
}
static inline unsigned int qdpc_le32_readl(void *addr)
{
return le32_to_cpu(readl((addr)));
}
static inline void qdpc_le32_writel(unsigned int val, void *addr)
{
writel(cpu_to_le32((val)), addr);
}
static inline void qdpc_pcie_posted_write(uint32_t val, __iomem void *basereg)
{
qdpc_pci_writel(val,basereg);
/* flush posted write */
qdpc_pci_readl(basereg);
}
static inline int qdpc_isbootstate(struct vmac_priv *p, uint32_t state) {
__iomem uint32_t *status = &p->bda->bda_bootstate;
uint32_t s = qdpc_pci_readl(status);
return (s == state);
}
static inline int qdpc_booterror(struct vmac_priv *p) {
__iomem uint32_t *status = &p->bda->bda_flags;
uint32_t s = qdpc_pci_readl(status);
return (s & PCIE_BDA_ERROR_MASK);
}
static inline void qdpc_setbootstate(struct vmac_priv *p, uint32_t state) {
__iomem qdpc_pcie_bda_t *bda = p->bda;
qdpc_pcie_posted_write(state, &bda->bda_bootstate);
}
/* Function prototypes */
int qdpc_pcie_init_intr_and_mem(struct vmac_priv *priv);
void qdpc_interrupt_target(struct vmac_priv *priv, uint32_t intr);
void qdpc_disable_irqs(struct vmac_priv *priv);
void qdpc_enable_irqs(struct vmac_priv *priv);
void qdpc_free_interrupt(struct pci_dev *pdev);
void qdpc_pcie_free_mem(struct pci_dev *pdev);
void qdpc_init_target_buffers(void *data);
int qdpc_send_packet(struct sk_buff *skb, struct net_device *ndev);
void *qdpc_map_pciemem(unsigned long busaddr, size_t len);
void qdpc_unmap_pciemem(unsigned long busaddr, void *vaddr, size_t len);
int qdpc_unmap_iomem(struct vmac_priv *priv);
int32_t qdpc_set_dma_mask(struct vmac_priv *priv);
#endif

View File

@ -0,0 +1,356 @@
/**
* Copyright (c) 2012-2012 Quantenna Communications, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
**/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/version.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <asm/byteorder.h>
#include <linux/pci.h>
#include <linux/moduleparam.h>
#include <asm-generic/pci-dma-compat.h>
#include <linux/module.h>
#include "qdpc_config.h"
#include "qdpc_debug.h"
#include "qdpc_init.h"
#include "qdpc_regs.h"
#include <qdpc_platform.h>
static int use_msi = 1;
module_param(use_msi, int, 0644);
MODULE_PARM_DESC(use_msi, "Set 0 to use Legacy interrupt");
static int qdpc_pcie_init_intr(struct vmac_priv *priv);
static int qdpc_pcie_init_mem(struct vmac_priv *priv);
static int g_msi = 1;
int32_t qdpc_pcie_init_intr_and_mem(struct vmac_priv *priv)
{
struct pci_dev *pdev = priv->pdev;
int result = 0;
/* Initialize interrupts */
if (( result = qdpc_pcie_init_intr(priv)) < 0) {
PRINT_ERROR("PCIe Interrupt Initialization failed \n");
return result;
}
/* Memory Initialization */
if (( result = qdpc_pcie_init_mem(priv)) < 0) {
PRINT_ERROR("PCIe Memory Initialization failed \n");
qdpc_free_interrupt(pdev);
}
return result;
}
static int32_t qdpc_pcie_init_intr(struct vmac_priv *priv)
{
struct pci_dev *pdev = priv->pdev;
priv->msi_enabled = 0; /* Set default to use Legacy INTx interrupt */
/* Check if the device has MSI capability */
if (use_msi) {
if (!pci_enable_msi(pdev)) {
PRINT_INFO("PCIe MSI Interrupt Enabled\n");
priv->msi_enabled = 1;
} else {
PRINT_ERROR("PCIe MSI Interrupt enabling failed. Fall back to Legacy IRQ\n");
}
}
if(!priv->msi_enabled) {
PRINT_INFO("PCIe Legacy Interrupt Enabled\n");
pci_intx(pdev, 1);
}
return 0;
}
static bool qdpc_bar_check(struct vmac_priv *priv, qdpc_bar_t *bar)
{
uint32_t offset = bar->b_offset;
size_t len = bar->b_len;
dma_addr_t busaddr = bar->b_busaddr;
uint8_t index = bar->b_index;
if (index > 5) {
printk("Invalid BAR index:%u. Must be between 0 and 5\n", index);
return 0;
}
if (!len) {
/* NOTE:
* Do not use an implicit length such as the BAR length
* if the map length is too large say > 16Mb this leaves
* the implementation vulnerable to
* Linux and the attack of the Silent "S" (one between the n and u)
*/
printk("Zero length BAR\n");
return 0;
}
if (busaddr) { /*initialized BAR */
unsigned long bar_start = pci_resource_start(priv->pdev , index);
unsigned long bar_end = pci_resource_end(priv->pdev , index);
if (!bar_start) {
printk("Invalid BAR address: 0x%p.\n", (void *)busaddr);
return 0;
}
if ((busaddr - offset) != bar_start) {
printk("Invalid BAR offset:0x%p. BAR starts at 0x%p\n",
(void *)(busaddr -offset), (void *)bar_start);
return 0;
}
/* Check the span of the BAR including the offset + length, bar_end points to the last byte of BAR */
if ((busaddr + len - 1) > bar_end) {
printk("Invalid BAR end address:0x%p. BAR ends at 0x%p\n",
(void *)(busaddr + len), (void *)bar_end);
return 0;
}
} else { /* Unitialized bar */
unsigned long bar_end = pci_resource_end(priv->pdev , index);
busaddr = pci_resource_start(priv->pdev , index);
if (!busaddr) {
printk("Invalid BAR address: 0x%p.\n", (void *)busaddr);
return 0;
}
/* Checks that offset area is within bar */
if ( (busaddr + offset) > bar_end) {
printk("Invalid BAR offset 0x%p, extends beyond end of BAR(0x%p).\n",
(void *)(busaddr + offset), (void *)bar_end);
return 0;
}
/* Checks that mapped area is within bar */
if ((busaddr + len + offset - 1) > bar_end) {
printk("Mapped area 0x%p, extends beyond end of BAR(0x%p).\n",
(void *)(busaddr + len + offset - 1), (void *)bar_end);
return 0;
}
}
return 1;
}
static qdpc_bar_t *qdpc_map_bar(struct vmac_priv *priv, qdpc_bar_t *bar,
uint8_t index, size_t len, uint32_t offset)
{
void *vaddr = NULL;
dma_addr_t busaddr = 0;
qdpc_bar_t temp_bar;
memset(&temp_bar, 0 ,sizeof(qdpc_bar_t));
temp_bar.b_len = len;
temp_bar.b_offset = offset;
temp_bar.b_index = index;
if (!qdpc_bar_check(priv, &temp_bar)) {
printk("Failed bar mapping sanity check in %s\n", __FUNCTION__);
return NULL;
}
/* Reserve PCIe memory region*/
busaddr = pci_resource_start(priv->pdev , index) + offset;
if (!request_mem_region(busaddr, len , QDPC_DEV_NAME)) {
printk("Failed to reserve %u bytes of PCIe memory "
"region starting at 0x%p\n", (uint32_t)len, (void *)busaddr);
return NULL;
}
#ifndef DISABLE_PCIE_UPDATA_HW_BAR
qdpc_update_hw_bar(priv->pdev, index);
#endif
vaddr = ioremap_nocache(busaddr, len);
if (!vaddr) {
printk("Failed to map %u bytes at BAR%u at bus address 0x%p.\n",
(uint32_t)len, index, (void *)busaddr);
release_mem_region(busaddr, len);
return NULL;
}
memset(&temp_bar, 0 ,sizeof(qdpc_bar_t));
bar->b_vaddr = vaddr;
bar->b_busaddr = busaddr;
bar->b_len = len;
bar->b_index = index;
bar->b_offset = offset;
printk("BAR:%u vaddr=0x%p busaddr=%p offset=%u len=%u\n",
bar->b_index, bar->b_vaddr, (void *)bar->b_busaddr,
bar->b_offset, (uint32_t)bar->b_len);
return bar;
}
static bool qdpc_unmap_bar(struct vmac_priv *priv, qdpc_bar_t *bar)
{
if (!qdpc_bar_check(priv, bar)) {
PRINT_ERROR("Failed bar mapping sanity check in %s\n", __FUNCTION__);
return 0;
}
iounmap(bar->b_vaddr);
release_mem_region(bar->b_busaddr - bar->b_offset, bar->b_len);
memset(bar, 0 , sizeof(qdpc_bar_t));
return 1;
}
static void qdpc_map_epmem(struct vmac_priv *priv)
{
printk("%s() Mapping epmem\n", __FUNCTION__);
qdpc_map_bar(priv, &priv->epmem_bar, QDPC_SHMEM_BAR,
pci_resource_len(priv->pdev, QDPC_SHMEM_BAR) , 0);
priv->bda =(qdpc_pcie_bda_t *)QDPC_BAR_VADDR(priv->epmem_bar, 0);
priv->bda->bda_rc_msi_enabled = g_msi;
}
static void qdpc_map_sysctl_regs(struct vmac_priv *priv)
{
printk("%s() Mapping sysctl\n", __FUNCTION__);
qdpc_map_bar(priv, &priv->sysctl_bar, QDPC_SYSCTL_BAR, pci_resource_len(priv->pdev, QDPC_SYSCTL_BAR) , 0);
}
static void qdpc_map_dma_regs(struct vmac_priv *priv)
{
printk("%s() Mapping dma registers\n", __FUNCTION__);
qdpc_map_bar(priv, &priv->dmareg_bar, QDPC_DMA_BAR, pci_resource_len(priv->pdev, QDPC_DMA_BAR), 0);
}
static void qdpc_unmap_epmem(struct vmac_priv *priv)
{
printk("%s() Unmapping sysctl\n", __FUNCTION__);
priv->bda = NULL;
qdpc_unmap_bar(priv, &priv->epmem_bar);
}
static void qdpc_unmap_sysctl_regs(struct vmac_priv *priv)
{
printk("%s() Unmapping sysctl\n", __FUNCTION__);
qdpc_unmap_bar(priv, &priv->sysctl_bar);
}
static void qdpc_unmap_dma_regs(struct vmac_priv *priv)
{
printk("%s() Unmapping dma regs\n", __FUNCTION__);
qdpc_unmap_bar(priv, &priv->dmareg_bar);
}
int32_t qdpc_set_dma_mask(struct vmac_priv *priv) {
int result = 0;
uint64_t dma_mask = qdpc_pci_readl(&priv->bda->bda_dma_mask);
printk("Requested DMA mask:0x%llx\n", dma_mask);
result = pci_set_dma_mask(priv->pdev, dma_mask);
if (!result) {
result = pci_set_consistent_dma_mask(priv->pdev, dma_mask);
if (result) {
printk(" pci_set_consistent_dma_mask() error %d. Mask:0x%llx\n", result, dma_mask);
return 1;
}
} else {
printk(" pci_set_dma_mask() error %d. Mask:0x%llx\n", result, dma_mask);
return 1;
}
return 0;
}
static int32_t qdpc_pcie_init_mem(struct vmac_priv *priv)
{
int ret = 0;
/* Map SynControl registers and Host to Endpoint interrupt registers to BAR-2 */
qdpc_map_sysctl_regs(priv);
qdpc_map_epmem(priv);
qdpc_map_dma_regs(priv);
return ret;
}
int qdpc_unmap_iomem(struct vmac_priv *priv)
{
qdpc_unmap_dma_regs(priv);
qdpc_unmap_epmem(priv);
qdpc_unmap_sysctl_regs(priv);
return SUCCESS;
}
void qdpc_free_interrupt(struct pci_dev *pdev)
{
struct net_device *ndev = pci_get_drvdata(pdev);
struct vmac_priv *priv;
if (ndev == NULL)
return;
priv = netdev_priv(ndev);
if(priv->msi_enabled)
pci_disable_msi(pdev);
else
pci_intx(pdev, 0);
}
void qdpc_pcie_free_mem(struct pci_dev *pdev)
{
return;
}
void *qdpc_map_pciemem(unsigned long busaddr, size_t len)
{
/* Reserve PCIe memory region*/
if (!request_mem_region(busaddr, len, QDPC_DEV_NAME)) {
PRINT_ERROR(KERN_ERR "Failed to reserve %u bytes of "
"PCIe memory region starting at 0x%lx\n", (uint32_t)len, busaddr);
return NULL;
}
return ioremap_nocache(busaddr, len);
}
void qdpc_unmap_pciemem(unsigned long busaddr, void *vaddr, size_t len)
{
if (!vaddr || !busaddr)
return;
iounmap(vaddr);
release_mem_region(busaddr, len);
}
void qdpc_deassert_intx(struct vmac_priv *priv)
{
void *basereg = QDPC_BAR_VADDR(priv->sysctl_bar, TOPAZ_PCIE_CFG0_OFFSET);
qdpc_pcie_posted_write(priv->ep_pciecfg0_val & ~TOPAZ_ASSERT_INTX, basereg);
}

View File

@ -0,0 +1,56 @@
/**
* Copyright (c) 2012-2012 Quantenna Communications, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
**/
#ifndef __QDPC_REGS_H__
#define __QDPC_REGS_H__
#include <linux/bitops.h>
#include <qdpc_platform.h>
#define QDPC_SYSCTL_BAR 0
#define QDPC_SHMEM_BAR 2
#define QDPC_DMA_BAR 3
/*
* NOTE: Below registers are at EP but accessed and written by RC
* Make sure EP codes do not write them, otherwise we have race conditions
*/
/*
* The register is one of registers of Endpoint. Root Complex uses it
* to interrupt Endpoint to transmit packets.
*/
#define TOPAZ_IPC_OFFSET (0x13C)
/* Used to deassert Legacy INTx */
#define TOPAZ_PCIE_CFG0_OFFSET (0x6C)
#define TOPAZ_ASSERT_INTX BIT(9)
/* This macro is used to set interrupt bit of register QDPC_EP_SYS_CTL_IPC4_INT */
#define TOPAZ_SET_INT(x) ((x) | ((x) << 16))
/* "DMA Write Done IMWr Address Low" register at EP side*/
#define TOPAZ_IMWR_DONE_ADDRLO_OFFSET (0x700 + 0x2D0)
#define TOPAZ_IMWR_ABORT_ADDRLO_OFFSET (0x700 + 0x2D8)
/* Power management control status register */
#define TOPAZ_PCI_PM_CTRL_OFFSET (0x44)
#endif //__QDPC_REGS_H__

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,242 @@
/**
* Copyright (c) 2012-2012 Quantenna Communications, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
**/
#ifndef __DRIVERS_NET_TOPAZ_VNET_H
#define __DRIVERS_NET_TOPAZ_VNET_H 1
#define ETH_TX_TIMEOUT (100*HZ)
#define MULTICAST_FILTER_LIMIT 64
#include <linux/slab.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <qdpc_config.h>
#include <topaz_netcom.h>
#define PROC_NAME_SIZE (32)
#define VMAC_BD_EMPTY ((uint32_t)0x00000001)
#define VMAC_BD_WRAP ((uint32_t)0x00000002)
#define VMAC_BD_MASK_LEN ((uint32_t)0xFFFF0000)
#define VMAC_BD_MASK_OFFSET ((uint32_t)0x0000FF00)
#define VMAC_GET_LEN(x) (((x) >> 16) & 0xFFFF)
#define VMAC_GET_OFFSET(x) (((x) >> 8) & 0xFF)
#define VMAC_SET_LEN(len) (((len) & 0xFFFF) << 16)
#define VMAC_SET_OFFSET(of) (((of) & 0xFF) << 8)
#define VMAC_INDX_MINUS(x, y, m) (((x) + (m) - (y)) % (m))
#define VMAC_INDX_INC(index, m) do { \
if (++(index) >= (m)) \
(index) = 0; \
} while(0)
/*
* Helper macros handling memory mapped area access
*/
#define VMAC_REG_TST(reg, val) ( *((volatile unsigned int *)(reg)) & (val) )
#define VMAC_REG_SET(reg, val) { volatile unsigned int *r = (unsigned int *)(reg); *r = (*r | (val)); }
#define VMAC_REG_CLR(reg, val) { volatile unsigned int *r = (unsigned int *)(reg); *r = (*r & ~(val)); }
#define VMAC_REG_WRITE(reg, val) { *(volatile unsigned int *)reg = (val); }
#define VMAC_REG_READ(reg) {*(volatile unsigned int *)(reg); }
#define QTN_RC_TX_BUDGET (16)
#define QTN_RC_TX_TASKLET_BUDGET (32)
#define QTN_RX_SKB_FREELIST_FILL_SIZE (1024)
#define QTN_RX_SKB_FREELIST_MAX_SIZE (8192)
#define QTN_RX_BUF_MIN_SIZE (1536)
#define VMAC_NL_BUF_SIZE USHRT_MAX
typedef struct qdpc_bar {
void *b_vaddr; /* PCIe bar virtual address */
dma_addr_t b_busaddr; /* PCIe bar physical address */
size_t b_len; /* Bar resource length */
uint32_t b_offset; /* Offset from start of map */
uint8_t b_index; /* Bar Index */
} qdpc_bar_t;
#define QDPC_BAR_VADDR(bar, off) ((bar).b_vaddr +(off))
struct vmac_cfg {
uint16_t rx_bd_num;
uint16_t tx_bd_num;
char ifname[PROC_NAME_SIZE];
struct net_device *dev;
};
#if defined(QTN_RC_ENABLE_HDP)
enum pkt_type {
PKT_SKB = 0,
PKT_TQE
};
#endif
struct vmac_tx_buf {
uint32_t handle;
uint16_t len;
#if defined(QTN_RC_ENABLE_HDP)
uint8_t type; /* 1 payload only, 0 skb */
uint8_t rsv;
#else
uint16_t rsv;
#endif
};
struct vmac_priv {
struct sk_buff **tx_skb;/* skb having post to PCIe DMA */
volatile struct vmac_bd *tx_bd_base; /* Tx buffer descriptor */
volatile uint32_t *ep_next_rx_pkt;
uint16_t tx_bd_index;
uint16_t tx_reclaim_start;
uint16_t tx_bd_num;
uint8_t txqueue_stopped;
volatile uint32_t *txqueue_wake; /* shared variable with EP */
spinlock_t txqueue_op_lock;
unsigned long ep_ipc_reg;
uint32_t tx_bd_busy_cnt; /* tx BD unavailable */
uint32_t tx_stop_queue_cnt;
#ifdef RC_TXDONE_TIMER
struct timer_list tx_timer;
spinlock_t tx_lock;
#endif
uint32_t vmac_tx_queue_len;
struct sk_buff **rx_skb;
volatile struct vmac_bd *rx_bd_base; /* Rx buffer descriptor */
uint16_t rx_bd_index;
uint16_t rx_bd_num;
uint32_t rx_skb_alloc_failures;
uint32_t intr_cnt; /* msi/legacy interrupt counter */
uint32_t vmac_xmit_cnt;
uint32_t vmac_skb_free;
struct sock *nl_socket;
uint32_t str_call_nl_pid;
uint32_t lib_call_nl_pid;
struct napi_struct napi;
uint32_t dbg_flg;
struct net_device *ndev;
struct pci_dev *pdev;
int mac_id;
uint32_t dma_msi_imwr;
uint32_t dma_msi_data;
uint32_t dma_msi_dummy;
uint32_t ep_pciecfg0_val; /* used to deassert Legacy irq from RC */
/* The following members aren't related to datapath */
struct vmac_cfg *pcfg;
uint8_t show_item;
uint32_t addr_uncache;
uint32_t uncache_len;
struct task_struct *init_thread; /* Initialization thread */
uint8_t msi_enabled; /* PCIe MSI: 1 - Enabled, 0 - Disabled */
qdpc_bar_t sysctl_bar;
qdpc_bar_t epmem_bar;
qdpc_bar_t dmareg_bar;
uint32_t dma_imwr;
/* io memory pointers */
__iomem qdpc_pcie_bda_t *bda;
uint32_t ep_ready;
#ifdef QCA_NSS_PLATFORM
void *nssctx; /* QCA NSS platform specific handler*/
uint32_t nss_enable;
struct ctl_table nss_sysctls[14]; /* Some system control for control and statistic */
struct ctl_table_header *nss_sysctl_header;
#endif
#ifdef DNI_EXTRA_FUNCTIONS
#ifdef DNI_5G_LED
uint32_t led_enable;
struct ctl_table dni_sysctls[14];
struct ctl_table_header *dni_sysctl_header;
#endif
#endif
#ifdef QTN_TX_SKBQ_SUPPORT
struct sk_buff_head tx_skb_queue;
spinlock_t tx_skbq_lock;
struct tasklet_struct tx_skbq_tasklet;
uint32_t tx_skbq_budget;
uint32_t tx_skbq_tasklet_budget;
uint32_t tx_skbq_max_size;
#endif
#ifdef QTN_SKB_RECYCLE_SUPPORT
struct sk_buff_head rx_skb_freelist;
spinlock_t rx_skb_freelist_lock;
uint32_t rx_skb_freelist_fill_level;
uint32_t skb_recycle_cnt;
uint32_t skb_recycle_failures;
#endif
volatile uint32_t *ep_pmstate;
uint8_t *nl_buf;
size_t nl_len;
};
#define QTN_DISABLE_SOFTIRQ (0xABCD)
static inline void qtn_spin_lock_bh_save(spinlock_t *lock, unsigned long *flag)
{
if (likely(irqs_disabled() || in_softirq())) {
spin_lock(lock);
*flag = 0;
} else {
spin_lock_bh(lock);
*flag = QTN_DISABLE_SOFTIRQ;
}
}
static inline void qtn_spin_unlock_bh_restore(spinlock_t *lock, unsigned long *flag)
{
if (unlikely(*flag == QTN_DISABLE_SOFTIRQ)) {
*flag = 0;
spin_unlock_bh(lock);
} else {
spin_unlock(lock);
}
}
extern struct net_device *vmac_alloc_ndev(void);
extern int vmac_net_init(struct pci_dev *pdev);
extern void vmac_clean(struct net_device *ndev);
extern int vmac_tx(void *pkt_handle, struct net_device *ndev);
#define PCIE_REG_CFG_BASE 0x0
#define PCIE_LOGIC_PORT_CFG_BASE (PCIE_REG_CFG_BASE + 0x700)
#define PCIE_DMA_WR_INTR_MASK 0x2c4
void vmac_pcie_edma_enable(struct vmac_priv *priv);
void qdpc_deassert_intx(struct vmac_priv *priv);
void qdpc_pcie_edma_enable(struct vmac_priv *priv);
int qdpc_pcie_suspend(struct pci_dev *pdev, pm_message_t state);
int qdpc_pcie_resume(struct pci_dev *pdev);
#endif

View File

@ -0,0 +1,33 @@
#
# Makefile for Intel platform
#
EXTRA_CFLAGS += -Wall \
-I$(src) \
-I$(src)/../../include \
-I$(src)/../common
EXTRA_CFLAGS += -DRC_TXDONE_TIMER -DQTN_WAKEQ_SUPPORT
ifneq ($(CONFIG_HOTPLUG_PCI_PCIE),)
EXTRA_CFLAGS += -DPCIE_HOTPLUG_SUPPORTED
endif
KVERSION = $(shell uname -r)
default: all
COMMON_DIR := ../common
qdpc-host-objs := $(COMMON_DIR)/qdpc_init.o $(COMMON_DIR)/qdpc_pcie.o $(COMMON_DIR)/topaz_vnet.o qdpc_platform.o
obj-m := qdpc-host.o
qdpc_host.o: $(qdpc-host-objs)
ld -r $^ -o $@
all:
make -C /lib/modules/$(KVERSION)/build M=$(PWD) modules
clean:
make -C /lib/modules/$(KVERSION)/build M=$(PWD) clean
rm -rf $(COMMON_DIR)/.*.cmd $(COMMON_DIR)/.tmp_versions
rm -rf Module.markers modules.order *~ $(qdpc-host-objs)

View File

@ -0,0 +1,102 @@
/**
* Copyright (c) 2012-2012 Quantenna Communications, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
**/
/*
* Platform dependant implement. Customer needs to modify this file.
*/
#include <linux/interrupt.h>
#include <qdpc_platform.h>
#include <topaz_vnet.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
/*
* Enable MSI interrupt of PCIe.
*/
void enable_vmac_ints(struct vmac_priv *vmp)
{
enable_irq(vmp->pdev->irq);
}
/*
* Disable MSI interrupt of PCIe.
*/
void disable_vmac_ints(struct vmac_priv *vmp)
{
disable_irq_nosync(vmp->pdev->irq);
}
/*
* Enable interrupt for detecting EP reset.
*/
void enable_ep_rst_detection(struct net_device *ndev)
{
}
/*
* Disable interrupt for detecting EP reset.
*/
void disable_ep_rst_detection(struct net_device *ndev)
{
}
/*
* Interrupt context for detecting EP reset.
* This function should do:
* 1. check interrupt status to see if EP reset.
* 2. if EP reset, handle it.
*/
void handle_ep_rst_int(struct net_device *ndev)
{
}
/*
* PCIe driver update resource in PCI configure space after EP reset.
* This function should be called in such case:
* 1. The PCI configure space can be accessed after EP reset;
* 2. Kernel does not support PCIe hot-plug.
*/
void qdpc_update_hw_bar(struct pci_dev *pdev, uint8_t index)
{
struct pci_bus_region region;
uint32_t addr, new;
int offset = PCI_BASE_ADDRESS_0 + 4 * index;
struct resource *res = pdev->resource + index;
if (!res->flags)
return;
pcibios_resource_to_bus(pdev, &region, res);
new = region.start | (res->flags & PCI_REGION_FLAG_MASK);
pci_read_config_dword(pdev, offset, &addr);
if (addr != new) {
printk("PCI region %d: reset to PCI address %#llx", index, (unsigned long long)region.start);
pci_write_config_dword(pdev, offset, new);
if ((new & (PCI_BASE_ADDRESS_SPACE|PCI_BASE_ADDRESS_MEM_TYPE_MASK)) ==
(PCI_BASE_ADDRESS_SPACE_MEMORY|PCI_BASE_ADDRESS_MEM_TYPE_64)) {
printk(" (64bit address)");
new = region.start >> 16 >> 16;
pci_write_config_dword(pdev, offset + 4, new);
}
printk("\n");
}
}

View File

@ -0,0 +1,91 @@
/**
* Copyright (c) 2012-2012 Quantenna Communications, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
**/
#ifndef __QDPC_PFDEP_H__
#define __QDPC_PFDEP_H__
#include <linux/version.h>
#include <topaz_vnet.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
#define IOREMAP ioremap_wc
#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) */
#define IOREMAP ioremap
#endif
/* IO functions */
#ifndef readb
#define readb(addr) (*(volatile unsigned char *) (addr))
#endif
#ifndef readw
#define readw(addr) (*(volatile unsigned short *) (addr))
#endif
#ifndef readl
#define readl(addr) (*(volatile unsigned int *) (addr))
#endif
#ifndef writeb
#define writeb(b,addr) (*(volatile unsigned char *) (addr) = (b))
#endif
#ifndef writew
#define writew(b,addr) (*(volatile unsigned short *) (addr) = (b))
#endif
#ifndef writel
#define writel(b,addr) (*(volatile unsigned int *) (addr) = (b))
#endif
/* Bit number and mask of MSI in the interrupt mask and status register */
#define QDPC_INTR_MSI_BIT 0
#define QDPC_INTR_MSI_MASK (1 << QDPC_INTR_MSI_BIT)
/* Enable MSI interrupt of PCIe */
extern void enable_vmac_ints(struct vmac_priv *vmp);
/* Disable MSI interrupt of PCIe */
extern void disable_vmac_ints(struct vmac_priv *vmp);
/* Enable interrupt for detecting EP reset */
extern void enable_ep_rst_detection(struct net_device *ndev);
/* Disable interrupt for detecting EP reset */
extern void disable_ep_rst_detection(struct net_device *ndev);
/* Interrupt context for detecting EP reset */
extern void handle_ep_rst_int(struct net_device *ndev);
/* PCIe driver update resource in PCI configure space after EP reset */
extern void qdpc_update_hw_bar(struct pci_dev *pdev, uint8_t index);
/* Allocated buffer size for a packet */
#define SKB_BUF_SIZE 2048
/* Transmit Queue Length */
#define QDPC_TX_QUEUE_SIZE 180
/* Receive Queue Length */
#define QDPC_RX_QUEUE_SIZE 384
/* Customer defined function */
#define qdpc_platform_init() 0
#define qdpc_platform_exit() do { } while(0)
#endif /* __QDPC_PFDEP_H__ */

View File

@ -0,0 +1,30 @@
#
# Makefile for mipsr2 platform
#
EXTRA_CFLAGS += -Wall \
-I$(src) \
-I$(src)/../../include \
-I$(src)/../common \
-D__BIG_ENDIAN
EXTRA_CFLAGS += -DQTN_TX_SKBQ_SUPPORT -DQTN_WAKEQ_SUPPORT
PWD := $(shell pwd)
default: all
COMMON_DIR := ../common
qdpc-host-objs := $(COMMON_DIR)/qdpc_init.o $(COMMON_DIR)/qdpc_pcie.o $(COMMON_DIR)/topaz_vnet.o qdpc_platform.o
obj-m := qdpc-host.o
qdpc_host.o: $(qdpc-host-objs)
ld -r $^ -o $@
all:
make -C $(KERNELDIR) $(CROSS) M=$(PWD) modules
clean:
rm -rf $(COMMON_DIR)/.*.cmd $(COMMON_DIR)/.tmp_versions
rm -rf Module.markers Module.symvers modules.order *~ $(qdpc-host-objs) *.o *.ko *.mod.o *.mod.c

View File

@ -0,0 +1,74 @@
/**
* Copyright (c) 2012-2012 Quantenna Communications, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
**/
/*
* Platform dependant implement. Customer needs to modify this file.
*/
#include <linux/interrupt.h>
#include <qdpc_platform.h>
#include <topaz_vnet.h>
#include <qdpc_regs.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
/*
* Enable MSI interrupt of PCIe.
*/
void enable_vmac_ints(struct vmac_priv *vmp)
{
volatile uint32_t *dma_wrd_imwr = QDPC_BAR_VADDR(vmp->dmareg_bar, TOPAZ_IMWR_DONE_ADDRLO_OFFSET);
writel(vmp->dma_msi_imwr, dma_wrd_imwr);
}
/*
* Disable MSI interrupt of PCIe.
*/
void disable_vmac_ints(struct vmac_priv *vmp)
{
volatile uint32_t *dma_wrd_imwr = QDPC_BAR_VADDR(vmp->dmareg_bar, TOPAZ_IMWR_DONE_ADDRLO_OFFSET);
writel(vmp->dma_msi_dummy, dma_wrd_imwr);
}
/*
* Enable interrupt for detecting EP reset.
*/
void enable_ep_rst_detection(struct net_device *ndev)
{
}
/*
* Disable interrupt for detecting EP reset.
*/
void disable_ep_rst_detection(struct net_device *ndev)
{
}
/*
* Interrupt context for detecting EP reset.
* This function should do:
* 1. check interrupt status to see if EP reset.
* 2. if EP reset, handle it.
*/
void handle_ep_rst_int(struct net_device *ndev)
{
}

View File

@ -0,0 +1,101 @@
/**
* Copyright (c) 2012-2012 Quantenna Communications, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
**/
/*
* Platform dependant implement. Customer needs to modify this file.
*/
#ifndef __QDPC_PFDEP_H__
#define __QDPC_PFDEP_H__
#include <linux/version.h>
#include <topaz_vnet.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
#define IOREMAP ioremap_wc
#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) */
#define IOREMAP ioremap
#endif
/* IO functions */
#ifndef readb
#define readb(addr) (*(volatile unsigned char *) (addr))
#endif
#ifndef readw
#define readw(addr) (*(volatile unsigned short *) (addr))
#endif
#ifndef readl
#define readl(addr) (*(volatile unsigned int *) (addr))
#endif
#ifndef writeb
#define writeb(b,addr) (*(volatile unsigned char *) (addr) = (b))
#endif
#ifndef writew
#define writew(b,addr) (*(volatile unsigned short *) (addr) = (b))
#endif
#ifndef writel
#define writel(b,addr) (*(volatile unsigned int *) (addr) = (b))
#endif
#ifndef virt_to_bus
#define virt_to_bus virt_to_phys
#endif
/* Bit number and mask of MSI in the interrupt mask and status register */
#define QDPC_INTR_MSI_BIT 0
#define QDPC_INTR_MSI_MASK (1 << QDPC_INTR_MSI_BIT)
/* Enable MSI interrupt of PCIe */
extern void enable_vmac_ints(struct vmac_priv *vmp);
/* Disable MSI interrupt of PCIe */
extern void disable_vmac_ints(struct vmac_priv *vmp);
/* Enable interrupt for detecting EP reset */
extern void enable_ep_rst_detection(struct net_device *ndev);
/* Disable interrupt for detecting EP reset */
extern void disable_ep_rst_detection(struct net_device *ndev);
/* Interrupt context for detecting EP reset */
extern void handle_ep_rst_int(struct net_device *ndev);
/* Allocated buffer size for a packet */
#define SKB_BUF_SIZE 2048
/* Transmit Queue Length */
#define QDPC_TX_QUEUE_SIZE 180
/* Receive Queue Length */
#define QDPC_RX_QUEUE_SIZE 384
/* Customer defined function */
#define qdpc_platform_init() 0
#define qdpc_platform_exit() do { } while(0)
/* PCIe driver update resource in PCI configure space after EP reset */
#define qdpc_update_hw_bar(pdev, index) do { } while(0)
/* TODO: If MSI IRQ-loss issue can be fixed, remove macro below */
/*#define QDPC_PLATFORM_IRQ_FIXUP*/
#endif /* __QDPC_PFDEP_H__ */

View File

@ -0,0 +1,67 @@
#
# Makefile for Quantenna RC paltform
#
#
EXTRA_CFLAGS += -Wall -Werror -Wno-unknown-pragmas \
-I$(src) \
-I$(src)/../../include \
-I../drivers/include/shared \
-I../drivers/include/kernel \
-I$(src)/../common
EXTRA_CFLAGS += -mlong-calls -DQTN_WAKEQ_SUPPORT
ifeq ($(board_config),topaz_host_realign_config)
EXTRA_CFLAGS += -DQTN_BYTEALIGN
endif
ifneq ($(CONFIG_HOTPLUG_PCI_PCIE),)
EXTRA_CFLAGS += -DPCIE_HOTPLUG_SUPPORTED
endif
ifeq (${PCIE_HOST_CRUMBS},1)
EXTRA_CFLAGS += -finstrument-functions
endif
#EXTRA_CFLAGS += -DDEBUG
ifneq ($(KERNELRELEASE),)
COMMON_DIR := ../common
TQE_DIR_TO_WORK := ../../tqe
TQE_DIR_TO_LINUX:= ../drivers/pcie2/tqe
EXTRA_CFLAGS += -I.
qdpc-host-objs := $(COMMON_DIR)/topaz_vnet.o $(COMMON_DIR)/qdpc_init.o \
$(COMMON_DIR)/qdpc_pcie.o qdpc_platform.o
qdpc-host-objs += $(if $(wildcard $(TQE_DIR_TO_LINUX)), $(TQE_DIR_TO_WORK)/topaz_pcie_tqe.o)
qdpc-host-objs += qdpc_dspload.o
obj-m := qdpc-host.o
else
KERNELDIR ?= ../../../../linux
INSTALL = INSTALL_MOD_PATH=../linux/modules
CROSS = ARCH=arc CROSS_COMPILE=/usr/local/ARC/gcc/bin/arc-linux-uclibc-
PWD := $(shell pwd)
default:
$(MAKE) -C $(KERNELDIR) $(CROSS) M=$(PWD) modules
install:
$(MAKE) -C $(KERNELDIR) $(CROSS) $(INSTALL) M=$(PWD) modules_install
endif
clean:
rm -rf *.o *~ core .depend .*.cmd *.ko *.mod.c .tmp_versions Module.symvers modules.order
rm -rf $(COMMON_DIR)/.*.cmd $(COMMON_DIR)/.tmp_versions $(COMMON_DIR)/*.o
depend .depend dep:
$(CC) $(CFLAGS) -M *.c > .depend
ifeq (.depend,$(wildcard .depend))
include .depend
endif

View File

@ -0,0 +1,169 @@
#include <linux/kernel.h>
#include <linux/reboot.h>
#include <linux/netdevice.h>
#include <linux/workqueue.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/syscalls.h>
#include <asm/unistd.h>
#include <asm/uaccess.h>
#include <linux/device.h>
#include <linux/firmware.h>
#include <asm/io.h>
#include <qtn/registers.h>
#include <qtn/shared_params.h>
#include <qtn/topaz_fwt_sw.h>
#include "qdpc_dspload.h"
static inline unsigned long
qdpc_dsp_to_host_addr(unsigned long dsp_addr)
{
void *ret = bus_to_virt(dsp_addr);
if (RUBY_BAD_VIRT_ADDR == ret) {
panic("Converting out of range DSP address 0x%lx to host address\n", dsp_addr);
}
return virt_to_phys(ret);
}
static char* qdpc_dsp_read(struct file *file, int buflen)
{
char *p = NULL;
if (!file)
return NULL;
p = kmalloc(buflen*sizeof(unsigned char), GFP_ATOMIC);
if (p)
file->f_op->read(file, p, buflen, &file->f_pos);
return p;
}
static int qdpc_dsp_install_firmware(struct file *file, u32 *dsp_start_addr)
{
Elf32_Ehdr *ehdr;
Elf32_Phdr *phdr, *phdr_o;
u8* vaddr;
int i, buflen;
char *pdata = NULL;
int e_phnum = 0;
buflen = sizeof(Elf32_Ehdr);
pdata = qdpc_dsp_read(file, buflen);
if (!pdata) {
return -1;
}
ehdr = (Elf32_Ehdr *)pdata;
e_phnum = ehdr->e_phnum;
kfree(pdata);
buflen = e_phnum * sizeof(Elf32_Phdr);
pdata = qdpc_dsp_read(file, buflen);
if (!pdata) {
return -1;
}
phdr = (Elf32_Phdr *)pdata;
phdr_o = (Elf32_Phdr *)pdata;
for(i = 0; i < e_phnum; i++, phdr++)
{
pdata = qdpc_dsp_read(file, phdr->p_filesz);
if (!pdata) {
return -1;
}
/* Skip blocks for DSP X/Y memory */
if ((phdr->p_vaddr >= RUBY_DSP_XYMEM_BEGIN) && (phdr->p_vaddr <= RUBY_DSP_XYMEM_END)) {
if (pdata)
kfree(pdata);
continue;
}
unsigned long p_muc = qdpc_dsp_to_host_addr(phdr->p_vaddr);
printk("p_vaddr in ELF header is %p, "
"remapping to 0x%lx\n", (void *)phdr->p_vaddr, p_muc);
/* Copy segment to right location */
vaddr = ioremap_nocache(p_muc, phdr->p_memsz);
/* Copy data */
memcpy(vaddr, pdata, phdr->p_filesz);
/* Clear BSS */
memset(vaddr + phdr->p_filesz, 0, phdr->p_memsz - phdr->p_filesz);
iounmap(vaddr);
if (pdata)
kfree(pdata);
}
if (phdr_o)
kfree(phdr_o);
*dsp_start_addr = ehdr->e_entry;
return(0);
}
static void hal_dsp_start(u32 dsp_start_addr)
{
#ifdef CONFIG_ARCH_ARC
/* Check that we can start this address */
if (dsp_start_addr & ((1 << RUBY_SYS_CTL_DSP_REMAP_SHIFT) - 1)) {
panic("DSP address 0x%x cannot be used as entry point\n", (unsigned)dsp_start_addr);
}
/* Tells DSP from which address start execution */
writel(RUBY_SYS_CTL_DSP_REMAP_VAL(dsp_start_addr), RUBY_SYS_CTL_DSP_REMAP);
#else
/* Swap upper and lower half words for DSP instruction */
dsp_start_addr = ((dsp_start_addr >> 16) & 0xFFFF) | (dsp_start_addr << 16);
/* Push the jump instr and location into the mbx */
*(volatile u32*)IO_ADDRESS(UMS_REGS_MB + UMS_MBX_DSP_PUSH)
= DSP_JUMP_INSTR_SWAP;
*(volatile u32*)IO_ADDRESS(UMS_REGS_MB + UMS_MBX_DSP_PUSH)
= dsp_start_addr;
#endif
}
void hal_enable_dsp(void)
{
#ifdef CONFIG_ARCH_ARC
const unsigned long reset = RUBY_SYS_CTL_RESET_DSP_ALL;
qtn_txbf_lhost_init();
writel(reset, RUBY_SYS_CTL_CPU_VEC_MASK);
writel(reset, RUBY_SYS_CTL_CPU_VEC);
writel(0, RUBY_SYS_CTL_CPU_VEC_MASK);
#else
/* Bring the DSP out of reset */
*(volatile u32 *)IO_ADDRESS(SYS_RESET_VECTOR_MASK) = DSP_RESET;
*(volatile u32 *)IO_ADDRESS(SYS_RESET_VECTOR) = DSP_RESET;
#endif
}
int qdpc_dsp_open(void)
{
struct file *file = NULL;
mm_segment_t fs;
u32 dsp_start_addr = 0;
file = filp_open(QDCP_DSP_FILE_NAME, O_RDONLY, 0);
if(IS_ERR(file)) {
printk("error occured while opening file %s, exiting...\n", QDCP_DSP_FILE_NAME);
return -1;
}
fs = get_fs();
set_fs(KERNEL_DS);
qdpc_dsp_install_firmware(file, &dsp_start_addr);
hal_dsp_start(dsp_start_addr);
hal_enable_dsp();
filp_close(file, NULL);
set_fs(fs);
return 0;
}

View File

@ -0,0 +1,62 @@
/**
* Copyright (c) 2012-2012 Quantenna Communications, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
**/
#ifndef __QDPC_DSP_H__
#define __QDPC_DSP_H__
#include <linux/version.h>
#include <topaz_vnet.h>
#include <qtn/topaz_tqe_cpuif.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
#define IOREMAP ioremap_nocache
#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) */
#define IOREMAP ioremap
#endif
#define QTN_TXBF_MUC_TO_DSP_MBOX_INT (0)
#define QTN_TXBF_DSP_TO_HOST_MBOX_INT (0)
#define QDCP_DSP_FILE_NAME "/etc/firmware/rdsp_driver.0.bin"
RUBY_INLINE void
qtn_txbf_lhost_init(void)
{
#if CONFIG_USE_SPI1_FOR_IPC
/* Initialize SPI controller, keep IRQ disabled */
qtn_mproc_sync_mem_write(RUBY_SPI1_SPCR,
RUBY_SPI1_SPCR_SPE | RUBY_SPI1_SPCR_MSTR |
RUBY_SPI1_SPCR_SPR(0));
qtn_mproc_sync_mem_write(RUBY_SPI1_SPER,
RUBY_SPI1_SPER_ESPR(0));
#else
/* Ack, and keep IRQ disabled */
qtn_mproc_sync_mem_write(RUBY_SYS_CTL_D2L_INT,
qtn_mproc_sync_mem_read(RUBY_SYS_CTL_D2L_INT));
qtn_mproc_sync_mem_write(RUBY_SYS_CTL_D2L_INT_MASK,
~(1 << QTN_TXBF_DSP_TO_HOST_MBOX_INT));
#endif
}
extern int qdpc_dsp_open(void);
#endif /* __QDPC_PFDEP_H__ */

View File

@ -0,0 +1,360 @@
/**
* Copyright (c) 2012-2013 Quantenna Communications, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
**/
/*
* Platform dependant implement. Customer needs to modify this file.
*/
#include <qdpc_platform.h>
#include <topaz_vnet.h>
#include <linux/kernel.h>
#include <linux/reboot.h>
#include <linux/netdevice.h>
#include <linux/workqueue.h>
#include <asm/gpio.h>
#include <../drivers/pcie2/host/common/qdpc_regs.h>
/* WPS button event reported to user space process */
typedef enum {
MODE_LED_INIT = 0,
MODE_LED_EXIT,
MODE_LED_FLASH,
} MODE_LED_OPS;
#define MODE_LED_GPIO 6
typedef enum {
WPS_BUTTON_NONE_EVENT = 0,
WPS_BUTTON_WIRELESS_EVENT,
WPS_BUTTON_DBGDUMP_EVENT,
WPS_BUTTON_INVALIDE_EVENT
} WPS_Button_Event;
#define WPS_BUTTON_VALID(e) (WPS_BUTTON_NONE_EVENT < (e) && (e) < WPS_BUTTON_INVALIDE_EVENT)
#define WPS_BUTTON_GPIO 4
#define QDPC_WPS_BUTTON_ACTIVE_LEVEL 0
#define WPS_BUTTON_TIMER_INTERVAL ((3 * HZ) / 10) /* timer interval */
/*
* Queue of processes who access wps_button file
*/
DECLARE_WAIT_QUEUE_HEAD(WPS_Button_WaitQ);
static WPS_Button_Event wps_button_event = WPS_BUTTON_NONE_EVENT;
struct timer_list qdpc_wps_button_timer;
static u32 qdpc_wps_button_last_level = ~QDPC_WPS_BUTTON_ACTIVE_LEVEL;
static u32 qdpc_wps_button_down_jiffies = 0; /* records the jiffies when button down, back to 0 after button released */
static int vmac_rst_rc_en = 1;
struct work_struct detect_ep_rst_work;
void enable_vmac_ints(struct vmac_priv *vmp)
{
uint32_t temp = readl(QDPC_RC_SYS_CTL_PCIE_INT_MASK);
if(vmp->msi_enabled) {
temp |= BIT(10); /* MSI */
} else {
temp |= BIT(11); /* Legacy INTx */
}
writel(temp, QDPC_RC_SYS_CTL_PCIE_INT_MASK);
}
void disable_vmac_ints(struct vmac_priv *vmp)
{
uint32_t temp = readl(QDPC_RC_SYS_CTL_PCIE_INT_MASK);
if(vmp->msi_enabled) {
temp &= ~BIT(10); /* MSI */
} else {
temp &= ~BIT(11); /* Legacy INTx */
}
writel(temp, QDPC_RC_SYS_CTL_PCIE_INT_MASK);
}
static ssize_t vmac_reset_get(struct device *dev, struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%u\n", vmac_rst_rc_en);
}
static ssize_t vmac_reset_set(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
uint8_t cmd;
cmd = (uint8_t)simple_strtoul(buf, NULL, 10);
if (cmd == 0)
vmac_rst_rc_en = 0;
else
vmac_rst_rc_en = 1;
return count;
}
DEVICE_ATTR(enable_reset, S_IWUSR | S_IRUSR, vmac_reset_get, vmac_reset_set);
static void detect_ep_rst(struct work_struct *data)
{
kernel_restart(NULL);
}
void enable_ep_rst_detection(struct net_device *ndev)
{
uint32_t temp = readl(QDPC_RC_SYS_CTL_PCIE_INT_MASK);
temp |= QDPC_INTR_EP_RST_MASK;
writel(temp, QDPC_RC_SYS_CTL_PCIE_INT_MASK);
device_create_file(&ndev->dev, &dev_attr_enable_reset);
INIT_WORK(&detect_ep_rst_work, detect_ep_rst);
}
void disable_ep_rst_detection(struct net_device *ndev)
{
uint32_t temp = readl(QDPC_RC_SYS_CTL_PCIE_INT_MASK);
temp &= ~QDPC_INTR_EP_RST_MASK;
writel(temp, QDPC_RC_SYS_CTL_PCIE_INT_MASK);
device_remove_file(&ndev->dev, &dev_attr_enable_reset);
}
void handle_ep_rst_int(struct net_device *ndev)
{
uint32_t status = readl(QDPC_RC_SYS_CTL_PCIE_INT_STAT);
if ((status & QDPC_INTR_EP_RST_MASK) == 0)
return;
/* Clear pending interrupt */
writel(QDPC_INTR_EP_RST_MASK, QDPC_RC_SYS_CTL_PCIE_INT_STAT);
printk("Detected reset of Endpoint\n");
if (vmac_rst_rc_en == 1) {
netif_stop_queue(ndev);
schedule_work(&detect_ep_rst_work);
}
}
static void qdpc_mode_led(struct net_device *ndev, MODE_LED_OPS op)
{
static int inited = 0;
static u32 led_bk = 0;
switch(op) {
case MODE_LED_INIT:
if (gpio_request(MODE_LED_GPIO, ndev->name) < 0)
printk(KERN_INFO "%s: Failed to request GPIO%d for GPIO reset\n",
ndev->name, MODE_LED_GPIO);
led_bk = gpio_get_value(MODE_LED_GPIO);
gpio_direction_output(MODE_LED_GPIO, led_bk);
inited = 1;
break;
case MODE_LED_EXIT:
if (inited) {
gpio_set_value(MODE_LED_GPIO, led_bk);
gpio_free(MODE_LED_GPIO);
inited = 0;
}
break;
case MODE_LED_FLASH:
if (inited)
gpio_set_value(MODE_LED_GPIO, ~gpio_get_value(MODE_LED_GPIO) & 0x01);
break;
}
}
static void qdpc_wps_button_event_wakeup(struct net_device *ndev, WPS_Button_Event event)
{
struct vmac_priv *priv = netdev_priv(ndev);
if (!WPS_BUTTON_VALID(event))
return;
/* notify local watcher */
wps_button_event = event;
wake_up_all(&WPS_Button_WaitQ);
/* notify ep the offline dbg info, if ep is ready*/
if (priv->ep_ready && event == WPS_BUTTON_DBGDUMP_EVENT)
writel(TOPAZ_SET_INT(IPC_OFFLINE_DBG), priv->ep_ipc_reg);
}
static ssize_t qdpc_wps_button_read(struct device *dev,
struct device_attribute *attr,
char *buff)
{
int i = 0;
/* As usual, this read is always blocked untill wps button is pressed
* so increase the module reference to prevent it being unload during
* blocking read
*/
if (!try_module_get(THIS_MODULE))
return 0;
/* wait for valid WPS button event */
wait_event_interruptible(WPS_Button_WaitQ, WPS_BUTTON_VALID(wps_button_event));
/* read back empty string in signal wakeup case */
for (i = 0; i < _NSIG_WORDS; i++) {
if (current->pending.signal.sig[i] & ~current->blocked.sig[i]) {
module_put(THIS_MODULE);
return 0;
}
}
sprintf(buff, "%d\n", wps_button_event);
/* after new event been handled, reset to none event */
wps_button_event = WPS_BUTTON_NONE_EVENT;
module_put(THIS_MODULE);
return strlen(buff);
}
static ssize_t qdpc_wps_button_write(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
int input = 0;
struct net_device *ndev;
input = simple_strtoul(buf, NULL, 10);
ndev = (struct net_device*)dev_get_drvdata(dev);
switch (input) {
case 1:
qdpc_mode_led(ndev, MODE_LED_INIT);
qdpc_mode_led(ndev, MODE_LED_FLASH);
msleep(300);
qdpc_mode_led(ndev, MODE_LED_FLASH);
msleep(300);
qdpc_mode_led(ndev, MODE_LED_FLASH);
msleep(300);
qdpc_mode_led(ndev, MODE_LED_EXIT);
break;
default:
printk(KERN_INFO "WPS button: unknow cmd (%d)\n", input);
}
return count;
}
DEVICE_ATTR(wps_button, S_IWUSR | S_IRUSR, qdpc_wps_button_read, qdpc_wps_button_write); /* dev_attr_wps_button */
static void qdpc_wps_button_device_file_create(struct net_device *ndev)
{
device_create_file(&(ndev->dev), &dev_attr_wps_button);
}
static void qdpc_wps_polling_button_notifier(unsigned long data)
{
struct net_device *dev = (struct net_device *)data;
u32 current_level;
current_level = gpio_get_value(WPS_BUTTON_GPIO);
/* records the falling edge jiffies */
if ((current_level == QDPC_WPS_BUTTON_ACTIVE_LEVEL)
&& (qdpc_wps_button_last_level != QDPC_WPS_BUTTON_ACTIVE_LEVEL)) {
qdpc_mode_led(dev, MODE_LED_INIT);
qdpc_wps_button_down_jiffies = jiffies;
}
/* at rising edge */
if ((current_level != QDPC_WPS_BUTTON_ACTIVE_LEVEL)
&& (qdpc_wps_button_last_level == QDPC_WPS_BUTTON_ACTIVE_LEVEL)) {
/* WPS button event is rising triggered -- when button
* being changed from active to inactive level.
*
* Different press time trigger different event
*/
if ((jiffies - qdpc_wps_button_down_jiffies) >= 10 * HZ) {
/* wakeup the event waiting processes */
qdpc_wps_button_event_wakeup(dev, WPS_BUTTON_DBGDUMP_EVENT);
printk(KERN_INFO "WPS: button long press polling at %u\n", (unsigned int) jiffies);
} else {
/* wakeup the event waiting processes */
qdpc_wps_button_event_wakeup(dev, WPS_BUTTON_WIRELESS_EVENT);
printk(KERN_INFO "WPS: button short press polling at %u\n", (unsigned int) jiffies);
}
/* back to 0 after rising edge */
qdpc_wps_button_down_jiffies = 0;
qdpc_mode_led(dev, MODE_LED_EXIT);
}
/* after button down more than 10s, begin change the mode led's state to notify user to release button */
if (qdpc_wps_button_down_jiffies != 0 && ((jiffies - qdpc_wps_button_down_jiffies) >= 10 * HZ)) {
qdpc_mode_led(dev, MODE_LED_FLASH);
}
/* Restart the timer */
mod_timer(&qdpc_wps_button_timer, jiffies + WPS_BUTTON_TIMER_INTERVAL);
qdpc_wps_button_last_level = current_level;
return;
}
int qdpc_wps_button_init(struct net_device *dev)
{
/*
* Set up timer to poll the button.
* Request the GPIO resource and export it for userspace
*/
if (gpio_request(WPS_BUTTON_GPIO, dev->name) < 0)
printk(KERN_INFO "%s: Failed to request GPIO%d for GPIO reset\n",
dev->name, WPS_BUTTON_GPIO);
init_timer(&qdpc_wps_button_timer);
qdpc_wps_button_timer.function = qdpc_wps_polling_button_notifier;
qdpc_wps_button_timer.data = (unsigned long)dev;
qdpc_wps_button_timer.expires = jiffies + WPS_BUTTON_TIMER_INTERVAL;
add_timer(&qdpc_wps_button_timer);
/* creeate the device file for user space use */
qdpc_wps_button_device_file_create(dev);
return 0;
}
void qdpc_wps_button_exit(void)
{
del_timer(&qdpc_wps_button_timer);
}

View File

@ -0,0 +1,110 @@
/**
* Copyright (c) 2012-2012 Quantenna Communications, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
**/
#ifndef __QDPC_PFDEP_H__
#define __QDPC_PFDEP_H__
#include <linux/version.h>
#include <topaz_vnet.h>
#include <qtn/topaz_tqe_cpuif.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
#define IOREMAP ioremap_nocache
#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) */
#define IOREMAP ioremap
#endif
/* IO functions */
#ifndef readb
#define readb(addr) (*(volatile unsigned char *) (addr))
#endif
#ifndef readw
#define readw(addr) (*(volatile unsigned short *) (addr))
#endif
#ifndef readl
#define readl(addr) (*(volatile unsigned int *) (addr))
#endif
#ifndef writeb
#define writeb(b,addr) (*(volatile unsigned char *) (addr) = (b))
#endif
#ifndef writew
#define writew(b,addr) (*(volatile unsigned short *) (addr) = (b))
#endif
#ifndef writel
#define writel(b,addr) (*(volatile unsigned int *) (addr) = (b))
#endif
/*
* Interrupt
*/
/* Interrupt Mask and Status Reigster */
#define QDPC_RC_SYS_CTL_BASE 0xe0000000
#define QDPC_RC_SYS_CTL_PCIE_INT_MASK (QDPC_RC_SYS_CTL_BASE + 0xC0)
#define QDPC_RC_SYS_CTL_PCIE_INT_STAT (QDPC_RC_SYS_CTL_BASE + 0x17C)
/* Bit number and mask of MSI in the interrupt mask and status register */
#define QDPC_INTR_MSI_BIT 12
#define QDPC_INTR_MSI_MASK (1 << QDPC_INTR_MSI_BIT)
/* Bit number and mask of EP-reset-detect Interrupt in the mask and status register */
#define QDPC_INTR_EP_RST_BIT 3
#define QDPC_INTR_EP_RST_MASK (1 << QDPC_INTR_EP_RST_BIT)
extern void enable_vmac_ints(struct vmac_priv *vmp);
extern void disable_vmac_ints(struct vmac_priv *vmp);
extern void enable_ep_rst_detection(struct net_device *ndev);
extern void disable_ep_rst_detection(struct net_device *ndev);
extern void handle_ep_rst_int(struct net_device *ndev);
extern int qdpc_wps_button_init(struct net_device *dev);
extern void qdpc_wps_button_exit(void);
/* Allocated buffer size for a packet */
#define SKB_BUF_SIZE RX_BUF_SIZE
/* Transmit Queue Length */
#if defined(QTN_BYTEALIGN)
#define QDPC_TX_QUEUE_SIZE 180
#else
#define QDPC_TX_QUEUE_SIZE 200
#endif
/* Receive Queue Length */
#define QDPC_RX_QUEUE_SIZE 384
/* SDP requires packets show up at Lhost */
#define QDPC_PLATFORM_IFPORT TOPAZ_TQE_LHOST_PORT
/* Customer defined function */
#define qdpc_platform_init() 0
#define qdpc_platform_exit() do { } while(0)
/* PCIe driver update resource in PCI configure space after EP reset */
#define qdpc_update_hw_bar(pdev, index) do { } while(0)
#endif /* __QDPC_PFDEP_H__ */

View File

@ -0,0 +1,28 @@
#
# Makefile for ST platform
#
EXTRA_CFLAGS += -Wall \
-I$(src) \
-I$(src)/../../include \
-I$(src)/../common $(DNI_KMOD_CFLAGS)
EXTRA_CFLAGS += -DRC_TXDONE_TIMER -DQTN_WAKEQ_SUPPORT
default: all
COMMON_DIR := ../common
qdpc-host-objs := $(COMMON_DIR)/qdpc_init.o $(COMMON_DIR)/qdpc_pcie.o $(COMMON_DIR)/topaz_vnet.o qdpc_platform.o
obj-m := qdpc-host.o
qdpc_host.o: $(qdpc-host-objs)
ld -r $^ -o $@
all:
make -C /lib/modules/$(KVERSION)/build M=$(PWD) modules
clean:
make -C /lib/modules/$(KVERSION)/build M=$(PWD) clean
rm -rf $(COMMON_DIR)/.*.cmd $(COMMON_DIR)/.tmp_versions
rm -rf Module.markers modules.order *~ $(qdpc-host-objs)

View File

@ -0,0 +1,72 @@
/**
* Copyright (c) 2012-2012 Quantenna Communications, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
**/
/*
* Platform dependant implement. Customer needs to modify this file.
*/
#include <linux/interrupt.h>
#include <qdpc_platform.h>
#include <topaz_vnet.h>
#include <qdpc_regs.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
/*
* Enable MSI interrupt of PCIe.
*/
void enable_vmac_ints(struct vmac_priv *vmp)
{
volatile uint32_t *dma_wrd_imwr = QDPC_BAR_VADDR(vmp->dmareg_bar, TOPAZ_IMWR_DONE_ADDRLO_OFFSET);
writel(vmp->dma_msi_imwr, dma_wrd_imwr);
}
/*
* Disable MSI interrupt of PCIe.
*/
void disable_vmac_ints(struct vmac_priv *vmp)
{
volatile uint32_t *dma_wrd_imwr = QDPC_BAR_VADDR(vmp->dmareg_bar, TOPAZ_IMWR_DONE_ADDRLO_OFFSET);
writel(vmp->dma_msi_dummy, dma_wrd_imwr);
}
/*
* Enable interrupt for detecting EP reset.
*/
void enable_ep_rst_detection(struct net_device *ndev)
{
}
/*
* Disable interrupt for detecting EP reset.
*/
void disable_ep_rst_detection(struct net_device *ndev)
{
}
/*
* Interrupt context for detecting EP reset.
* This function should do:
* 1. check interrupt status to see if EP reset.
* 2. if EP reset, handle it.
*/
void handle_ep_rst_int(struct net_device *ndev)
{
}

View File

@ -0,0 +1,101 @@
/**
* Copyright (c) 2012-2012 Quantenna Communications, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
**/
/*
* Platform dependant implement. Customer needs to modify this file.
*/
#ifndef __QDPC_PFDEP_H__
#define __QDPC_PFDEP_H__
#include <linux/version.h>
#include <topaz_vnet.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
#define IOREMAP ioremap_wc
#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) */
#define IOREMAP ioremap
#endif
/* IO functions */
#ifndef readb
#define readb(addr) (*(volatile unsigned char *) (addr))
#endif
#ifndef readw
#define readw(addr) (*(volatile unsigned short *) (addr))
#endif
#ifndef readl
#define readl(addr) (*(volatile unsigned int *) (addr))
#endif
#ifndef writeb
#define writeb(b,addr) (*(volatile unsigned char *) (addr) = (b))
#endif
#ifndef writew
#define writew(b,addr) (*(volatile unsigned short *) (addr) = (b))
#endif
#ifndef writel
#define writel(b,addr) (*(volatile unsigned int *) (addr) = (b))
#endif
#ifndef virt_to_bus
#define virt_to_bus virt_to_phys
#endif
/* Bit number and mask of MSI in the interrupt mask and status register */
#define QDPC_INTR_MSI_BIT 0
#define QDPC_INTR_MSI_MASK (1 << QDPC_INTR_MSI_BIT)
/* Enable MSI interrupt of PCIe */
extern void enable_vmac_ints(struct vmac_priv *vmp);
/* Disable MSI interrupt of PCIe */
extern void disable_vmac_ints(struct vmac_priv *vmp);
/* Enable interrupt for detecting EP reset */
extern void enable_ep_rst_detection(struct net_device *ndev);
/* Disable interrupt for detecting EP reset */
extern void disable_ep_rst_detection(struct net_device *ndev);
/* Interrupt context for detecting EP reset */
extern void handle_ep_rst_int(struct net_device *ndev);
/* Allocated buffer size for a packet */
#define SKB_BUF_SIZE 2048
/* Transmit Queue Length */
#define QDPC_TX_QUEUE_SIZE 180
/* Receive Queue Length */
#define QDPC_RX_QUEUE_SIZE 384
/* Customer defined function */
#define qdpc_platform_init() 0
#define qdpc_platform_exit() do { } while(0)
/* PCIe driver update resource in PCI configure space after EP reset */
#define qdpc_update_hw_bar(pdev, index) do { } while(0)
/* TODO: If IRQ-loss issue can be fixed, remove macro below */
#define QDPC_PLATFORM_IRQ_FIXUP
#endif /* __QDPC_PFDEP_H__ */

View File

@ -0,0 +1,66 @@
/**
* Copyright (c) 2012-2012 Quantenna Communications, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
**/
#ifndef __QDPC_CONFIG_H__
#define __QDPC_CONFIG_H__
#include "ruby_pcie_bda.h"
#define QDPC_MAC_ADDR_SIZE 6
/*
* Using Type/Length field for checking if data packet or
* netlink packet(call_qcsapi remote interface).
* Using 0x0601 as netlink packet type and MAC magic number(Quantenna OUI)
* to distinguish netlink packet
*/
#define QDPC_APP_NETLINK_TYPE 0x0601
#define QDPC_NETLINK_DST_MAGIC "\x00\x26\x86\x00\x00\x00"
#define QDPC_NETLINK_SRC_MAGIC "\x00\x26\x86\x00\x00\x00"
#define QDPC_RPC_TYPE_MASK 0x0f00
#define QDPC_RPC_TYPE_STRCALL 0x0100
#define QDPC_RPC_TYPE_LIBCALL 0x0200
#define QDPC_RPC_TYPE_FRAG_MASK 0x8000
#define QDPC_RPC_TYPE_FRAG 0x8000
/* Used on RC side */
#define QDPC_NETLINK_RPC_PCI_CLNT 30
#define QDPC_NL_TYPE_CLNT_STR_REG (QDPC_RPC_TYPE_STRCALL | 0x0010)
#define QDPC_NL_TYPE_CLNT_STR_REQ (QDPC_RPC_TYPE_STRCALL | 0x0011)
#define QDPC_NL_TYPE_CLNT_LIB_REG (QDPC_RPC_TYPE_LIBCALL | 0x0010)
#define QDPC_NL_TYPE_CLNT_LIB_REQ (QDPC_RPC_TYPE_LIBCALL | 0x0011)
/* Used on EP side */
#define QDPC_NETLINK_RPC_PCI_SVC 31
#define QDPC_NL_TYPE_SVC_STR_REG (QDPC_RPC_TYPE_STRCALL | 0x0010)
#define QDPC_NL_TYPE_SVC_STR_REQ (QDPC_RPC_TYPE_STRCALL | 0x0011)
#define QDPC_NL_TYPE_SVC_LIB_REG (QDPC_RPC_TYPE_LIBCALL | 0x0010)
#define QDPC_NL_TYPE_SVC_LIB_REQ (QDPC_RPC_TYPE_LIBCALL | 0x0011)
typedef struct qdpc_cmd_hdr {
uint8_t dst_magic[ETH_ALEN];
uint8_t src_magic[ETH_ALEN];
__be16 type;
__be16 len;
__be16 rpc_type;
__be16 total_len;
} qdpc_cmd_hdr_t;
#endif /* __QDPC_CONFIG_H__ */

View File

@ -0,0 +1,65 @@
/**
* Copyright (c) 2012-2012 Quantenna Communications, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
**/
#ifndef __QDPC_DEBUG_H__
#define __QDPC_DEBUG_H__
/* Debug macros */
#define SUCCESS 0
#define FAILURE -1
#ifdef DEBUG
#define PRINT_DBG(format, ...) printk(KERN_DEBUG format, ##__VA_ARGS__)
#else
#define PRINT_DBG(format, ...) do { } while(0);
#endif
#define PRINT_ERROR(format, ...) printk(KERN_ERR format, ##__VA_ARGS__)
#define PRINT_INFO(format, ...) printk(KERN_INFO format, ##__VA_ARGS__)
#define DBGFMT "%s-%d: "
#define DBGARG __func__, __LINE__
#define DBGPRINTF(fmt, ...) \
do { \
if(printk_ratelimit()) { \
printk(DBGFMT fmt, DBGARG, ##__VA_ARGS__); \
} \
} while(0)
#ifdef DEBUG
#define qdpc_print_dump(str_, buf_, len_) \
{ \
u32 i = 0; \
printk("%s\n", str_); \
printk("0x%04X : ", i*8); \
for (i=0; i < (u32)(len_); i++) { \
if (i && ((i%8) == 0)) { \
printk( "%s", "\n"); \
printk("0x%04X : ", (i));\
} \
printk("%02x ", (buf_)[i]); \
} \
printk("\n%s\n", str_); \
}
#else
#define qdpc_print_dump(str_, buf_, len_)
#endif
#endif

View File

@ -0,0 +1 @@
#define DRV_VERSION "v37.3.2.44"

View File

@ -0,0 +1,136 @@
/*
* (C) Copyright 2011 Quantenna Communications Inc.
*
* See file CREDITS for list of people who contributed to this
* project.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston,
* MA 02111-1307 USA
*/
/*
* Header file which describes Ruby PCI Express Boot Data Area
* Has to be used by both kernel and bootloader.
*/
#ifndef RUBY_PCIE_BDA_H
#define RUBY_PCIE_BDA_H
/* Area mapped by via the BAR visible to the host */
#define RUBY_PCIE_BDA_ADDR CONFIG_ARC_PCIE_BASE
#define RUBY_PCIE_BDA_SIZE CONFIG_ARC_PCIE_SIZE
#define RUBY_BDA_VADDR (RUBY_PCIE_BDA_ADDR + 0x80000000)
#define QDPC_PCIE_BDA_VERSION 0x1000
#define QDPC_BDA_PCIE_INIT 0x01
#define QDPC_BDA_PCIE_RDY 0x02
#define QDPC_BDA_FW_LOAD_RDY 0x03
#define QDPC_BDA_FW_LOAD_DONE 0x04
#define QDPC_BDA_FW_START 0x05
#define QDPC_BDA_FW_RUN 0x06
#define QDPC_BDA_FW_HOST_RDY 0x07
#define QDPC_BDA_FW_TARGET_RDY 0x11
#define QDPC_BDA_FW_TARGET_BOOT 0x12
#define QDPC_BDA_FW_FLASH_BOOT 0x13
#define QDPC_BDA_FW_HOST_LOAD 0x08
#define QDPC_BDA_FW_BLOCK_DONE 0x09
#define QDPC_BDA_FW_BLOCK_RDY 0x0A
#define QDPC_BDA_FW_EP_RDY 0x0B
#define QDPC_BDA_FW_BLOCK_END 0x0C
#define QDPC_BDA_FW_CONFIG 0x0D
#define QDPC_BDA_FW_RUNNING 0x0E
#define QDPC_BDA_PCIE_FAIL 0x82
#define QDPC_BDA_FW_LOAD_FAIL 0x85
#define PCIE_BDA_RCMODE BIT(1)
#define PCIE_BDA_MSI BIT(2)
#define PCIE_BDA_BAR64 BIT(3)
#define PCIE_BDA_FLASH_PRESENT BIT(4) /* Tell the Host if EP have flash contain firmware */
#define PCIE_BDA_FLASH_BOOT BIT(5) /* Tell TARGET to boot from flash */
#define PCIE_BDA_XMIT_UBOOT BIT(6) /* EP ask for u-boot.bin */
#define PCIE_BDA_TARGET_FBOOT_ERR BIT(8) /* TARGET flash boot failed */
#define PCIE_BDA_TARGET_FWLOAD_ERR BIT(9) /* TARGET firmware load failed */
#define PCIE_BDA_HOST_NOFW_ERR BIT(12) /* Host not find any firmware */
#define PCIE_BDA_HOST_MEMALLOC_ERR BIT(13) /* Host malloc firmware download memory block failed */
#define PCIE_BDA_HOST_MEMMAP_ERR BIT(14) /* Host pci map download memory block failed */
#define PCIE_BDA_VER(x) (((x) >> 4) & 0xFF)
#define PCIE_BDA_ERROR_MASK 0xFF00 /* take the second 8 bits as error flag */
#define PCIE_DMA_OFFSET_ERROR 0xFFFF
#define PCIE_DMA_OFFSET_ERROR_MASK 0xFFFF
#define PCIE_BDA_NAMELEN 32
#define QDPC_PCI_ENDIAN_DETECT_DATA 0x12345678
#define QDPC_PCI_ENDIAN_REVERSE_DATA 0x78563412
#define QDPC_PCI_ENDIAN_VALID_STATUS 0x3c3c3c3c
#define QDPC_PCI_ENDIAN_INVALID_STATUS 0
#define QDPC_PCI_LITTLE_ENDIAN 0
#define QDPC_PCI_BIG_ENDIAN 0xffffffff
#define QDPC_SCHED_TIMEOUT (HZ / 20)
#define PCIE_DMA_ISSUE_LOG_NUM 128
#define PCIE_RC_TX_QUEUE_LEN 256
#define PCIE_TX_VALID_PKT 0x80000000
#define PCIE_PKT_LEN_MASK 0xffff
struct vmac_pkt_info {
uint32_t addr;
uint32_t info;
};
typedef struct qdpc_pcie_bda {
uint16_t bda_len; /* Size of BDA block */
uint16_t bda_version; /* BDA version */
uint32_t bda_bootstate; /* Boot state of device */
uint32_t bda_dma_mask; /* Number of addressable DMA bits */
uint32_t bda_dma_offset; /* HW specific offset for DMA engine */
uint32_t bda_flags;
uint32_t bda_img; /* Current load image block */
uint32_t bda_img_size; /* Current load image block size */
uint32_t bda_ep2h_irqstatus; /* Added here to allow boot loader to use irqs if desired */
uint32_t bda_h2ep_irqstatus; /* Added here to allow boot loader to use irqs if desired */
uint32_t bda_msi_addr;
uint8_t reserved1[56]; /* Reserve 56 bytes to make it compatible with older version */
uint32_t bda_flashsz;
char bda_boardname[PCIE_BDA_NAMELEN];
uint32_t bda_pci_pre_status; /* PCI endian check previous status */
uint32_t bda_pci_endian; /* Check pci memory endian format */
uint32_t bda_pci_post_status; /* PCI endian check post status */
int32_t bda_h2ep_txd_budget; /* txdone replenish budget for ep */
int32_t bda_ep2h_txd_budget; /* txdone replenish budget for host */
uint32_t bda_rc_rx_bd_base; /* EP rx buffer descriptors base address */
uint32_t bda_rc_rx_bd_num;
uint32_t bda_rc_tx_bd_base; /* RC rx buffer descriptors base address */
uint32_t bda_rc_tx_bd_num;
uint8_t bda_ep_link_state;
uint8_t bda_rc_link_state;
uint8_t bda_rc_msi_enabled;
uint8_t reserved2;
uint32_t bda_ep_next_pkt; /* A pointer to RC's memory specifying next packet to be handled by EP */
struct vmac_pkt_info request[PCIE_RC_TX_QUEUE_LEN];
} qdpc_pcie_bda_t;
#endif

View File

@ -0,0 +1,51 @@
/**
* Copyright (c) 2012-2012 Quantenna Communications, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
**/
#ifndef __DRIVES_NET_TOPAZ_NETCOM_H
#define __DRIVES_NET_TOPAZ_NETCOM_H
#define IPC_BIT_EP_RX_PKT (0)
#define IPC_BIT_RESET_EP (1)
#define IPC_BIT_RC_STOP_TX (2)
#define IPC_BIT_RC_RX_DONE (3)
#define IPC_BIT_EP_PM_CTRL (4)
#define IPC_BIT_OFFLINE_DBG (5)
#define IPC_EP_RX_PKT (BIT(IPC_BIT_EP_RX_PKT))
#define IPC_RESET_EP (BIT(IPC_BIT_RESET_EP))
#define IPC_RC_STOP_TX (BIT(IPC_BIT_RC_STOP_TX))
#define IPC_RC_RX_DONE (BIT(IPC_BIT_RC_RX_DONE))
#define IPC_EP_PM_CTRL (BIT(IPC_BIT_EP_PM_CTRL))
#define IPC_OFFLINE_DBG (BIT(IPC_BIT_OFFLINE_DBG))
#define TQE_NAPI_SCHED (0x3)
#define TQE_ENABLE_INTR (0x1)
struct vmac_bd {
uint32_t buff_addr;
uint32_t buff_info;
};
struct vmac_rx_buf {
uint32_t baddr;
uint16_t offset;
uint16_t len;
};
#endif /* __DRIVES_NET_TOPAZ_NETCOM_H */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,211 @@
/*-
* Copyright (c) 2001 Atsushi Onoe
* Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $Id: ieee80211_crypto.h 1441 2006-02-06 16:03:21Z mrenzmann $
*/
#ifndef _NET80211_IEEE80211_CRYPTO_H_
#define _NET80211_IEEE80211_CRYPTO_H_
/*
* 802.11 protocol crypto-related definitions.
*/
#define IEEE80211_KEYBUF_SIZE 16
#define IEEE80211_MICBUF_SIZE (8 + 8) /* space for both tx+rx keys */
#define IEEE80211_QOS_TID_MAX 16
#define IEEE80211_RSC_NON_QOS (IEEE80211_QOS_TID_MAX)
#define IEEE80211_RSC_ROBUST_MGMT (IEEE80211_QOS_TID_MAX + 1)
#define IEEE80211_RSC_MAX (IEEE80211_QOS_TID_MAX + 2)
/*
* Old WEP-style key. Deprecated.
*/
struct ieee80211_wepkey {
u_int wk_len; /* key length in bytes */
u_int8_t wk_key[IEEE80211_KEYBUF_SIZE];
};
struct ieee80211_cipher;
/*
* Crypto key state. There is sufficient room for all supported
* ciphers (see below). The underlying ciphers are handled
* separately through loadable cipher modules that register with
* the generic crypto support. A key has a reference to an instance
* of the cipher; any per-key state is hung off wk_private by the
* cipher when it is attached. Ciphers are automatically called
* to detach and cleanup any such state when the key is deleted.
*
* The generic crypto support handles encap/decap of cipher-related
* frame contents for both hardware- and software-based implementations.
* A key requiring software crypto support is automatically flagged and
* the cipher is expected to honor this and do the necessary work.
* Ciphers such as TKIP may also support mixed hardware/software
* encrypt/decrypt and MIC processing.
*
* Note: This definition must be the same as qtn_key.
*/
struct ieee80211_key {
u_int8_t wk_keylen; /* key length in bytes */
u_int8_t wk_flags;
#define IEEE80211_KEY_XMIT 0x01 /* key used for xmit */
#define IEEE80211_KEY_RECV 0x02 /* key used for recv */
#define IEEE80211_KEY_GROUP 0x04 /* key used for WPA group operation */
#define IEEE80211_KEY_SWCRYPT 0x10 /* host-based encrypt/decrypt */
#define IEEE80211_KEY_SWMIC 0x20 /* host-based enmic/demic */
#define IEEE80211_KEY_VLANGROUP 0x40 /* VLAN group key */
u_int16_t wk_keyix; /* key index */
u_int8_t wk_key[IEEE80211_KEYBUF_SIZE+IEEE80211_MICBUF_SIZE];
#define wk_txmic wk_key + IEEE80211_KEYBUF_SIZE + 0
#define wk_rxmic wk_key + IEEE80211_KEYBUF_SIZE + 8
u_int64_t wk_keyrsc[IEEE80211_RSC_MAX]; /* key receive sequence counter */
u_int64_t wk_keytsc; /* key transmit sequence counter */
u_int32_t wk_ciphertype;
const struct ieee80211_cipher *wk_cipher;
void *wk_private; /* private cipher state */
};
#define IEEE80211_KEY_COMMON /* common flags passed in by apps */\
(IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV | IEEE80211_KEY_GROUP)
/*
* NB: these values are ordered carefully; there are lots of
* of implications in any reordering. In particular beware
* that 4 is not used to avoid conflicting with IEEE80211_F_PRIVACY.
*/
#define IEEE80211_CIPHER_WEP 0
#define IEEE80211_CIPHER_TKIP 1
#define IEEE80211_CIPHER_AES_OCB 2
#define IEEE80211_CIPHER_AES_CCM 3
#define IEEE80211_CIPHER_AES_CMAC 4
#define IEEE80211_CIPHER_CKIP 5
#define IEEE80211_CIPHER_NONE 6 /* pseudo value */
#define IEEE80211_CIPHER_MAX (IEEE80211_CIPHER_NONE+1)
#define IEEE80211_KEYIX_NONE ((u_int8_t) - 1)
#if defined(__KERNEL__) || defined(_KERNEL)
struct ieee80211com;
struct ieee80211vap;
struct ieee80211_node;
struct sk_buff;
void ieee80211_crypto_attach(struct ieee80211com *);
void ieee80211_crypto_detach(struct ieee80211com *);
void ieee80211_crypto_vattach(struct ieee80211vap *);
void ieee80211_crypto_vdetach(struct ieee80211vap *);
int ieee80211_crypto_newkey(struct ieee80211vap *, int, int,
struct ieee80211_key *);
int ieee80211_crypto_delkey(struct ieee80211vap *, struct ieee80211_key *,
struct ieee80211_node *);
int ieee80211_crypto_setkey(struct ieee80211vap *, struct ieee80211_key *,
const u_int8_t macaddr[IEEE80211_ADDR_LEN], struct ieee80211_node *);
void ieee80211_crypto_delglobalkeys(struct ieee80211vap *);
/*
* Template for a supported cipher. Ciphers register with the
* crypto code and are typically loaded as separate modules
* (the null cipher is always present).
* XXX may need refcnts
*/
struct ieee80211_cipher {
const char *ic_name; /* printable name */
u_int ic_cipher; /* IEEE80211_CIPHER_* */
u_int ic_header; /* size of privacy header (bytes) */
u_int ic_trailer; /* size of privacy trailer (bytes) */
u_int ic_miclen; /* size of mic trailer (bytes) */
void *(*ic_attach)(struct ieee80211vap *, struct ieee80211_key *);
void (*ic_detach)(struct ieee80211_key *);
int (*ic_setkey)(struct ieee80211_key *);
int (*ic_encap)(struct ieee80211_key *, struct sk_buff *, u_int8_t);
int (*ic_decap)(struct ieee80211_key *, struct sk_buff *, int);
int (*ic_enmic)(struct ieee80211_key *, struct sk_buff *, int);
int (*ic_demic)(struct ieee80211_key *, struct sk_buff *, int);
};
extern const struct ieee80211_cipher ieee80211_cipher_none;
void ieee80211_crypto_register(const struct ieee80211_cipher *);
void ieee80211_crypto_unregister(const struct ieee80211_cipher *);
int ieee80211_crypto_available(u_int);
struct ieee80211_key *ieee80211_crypto_encap(struct ieee80211_node *,
struct sk_buff *);
struct ieee80211_key *ieee80211_crypto_decap(struct ieee80211_node *,
struct sk_buff *, int);
/*
* Check and remove any MIC.
*/
static __inline int
ieee80211_crypto_demic(struct ieee80211vap *vap, struct ieee80211_key *k,
struct sk_buff *skb, int hdrlen)
{
const struct ieee80211_cipher *cip = k->wk_cipher;
return (cip->ic_miclen > 0 ? cip->ic_demic(k, skb, hdrlen) : 1);
}
/*
* Add any MIC.
*/
static __inline int
ieee80211_crypto_enmic(struct ieee80211vap *vap, struct ieee80211_key *k,
struct sk_buff *skb, int force)
{
const struct ieee80211_cipher *cip = k->wk_cipher;
return (cip->ic_miclen > 0 ? cip->ic_enmic(k, skb, force) : 1);
}
/*
* Reset key state to an unused state. The crypto
* key allocation mechanism ensures other state (e.g.
* key data) is properly setup before a key is used.
*/
static __inline void
ieee80211_crypto_resetkey(struct ieee80211vap *vap, struct ieee80211_key *k,
u_int16_t ix)
{
k->wk_cipher = &ieee80211_cipher_none;;
k->wk_private = k->wk_cipher->ic_attach(vap, k);
k->wk_keyix = ix;
k->wk_flags = IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV;
}
/*
* Crypto-related notification methods.
*/
void ieee80211_notify_replay_failure(struct ieee80211vap *,
const struct ieee80211_frame *, const struct ieee80211_key *,
u_int64_t rsc);
void ieee80211_notify_michael_failure(struct ieee80211vap *,
const struct ieee80211_frame *, u_int keyix);
#endif /* defined(__KERNEL__) || defined(_KERNEL) */
#endif /* _NET80211_IEEE80211_CRYPTO_H_ */

View File

@ -0,0 +1,33 @@
/*
* Copyright (c) 2012 Quantenna Communications, Inc.
* All rights reserved.
*
* Common DFS re-entry definitions.
*/
#ifndef _IEEE80211_DFS_REENTRY_H
#define _IEEE80211_DFS_REENTRY_H
/*
* DFS-reentry
*/
#define IEEE80211_PICK_DOMIAN_MASK 0x0007
#define IEEE80211_PICK_ALL 0x0001 /* pick channel from all available channels */
#define IEEE80211_PICK_DFS 0x0002 /* pick channel from available DFS channel */
#define IEEE80211_PICK_NONDFS 0x0004 /* pick channel from available non-DFS channel */
#define IEEE80211_PICK_CONTROL_MASK 0x00F8
#define IEEE80211_PICK_SCAN_FLUSH 0x0008
#define IEEE80211_PICK_BG_ACTIVE 0x0010
#define IEEE80211_PICK_BG_PASSIVE_FAST 0x0020
#define IEEE80211_PICK_BG_PASSIVE_NORMAL 0x0040
#define IEEE80211_PICK_BG_PASSIVE_SLOW 0x0080
#define IEEE80211_PICK_BG_MODE_MASK 0x00F0
#define IEEE80211_PICK_ALGORITHM_MASK 0xFF00
#define IEEE80211_PICK_CLEAREST 0x0100 /* pick clearest channel */
#define IEEE80211_PICK_REENTRY 0x0200 /* pick channel again after DFS process */
#define IEEE80211_PICK_NOPICK 0x0400 /* do not pick channel */
#define IEEE80211_PICK_NOPICK_BG 0x0800 /* scan background and do not pick channel */
#define IEEE80211_PICK_DEFAULT (IEEE80211_PICK_ALL | IEEE80211_PICK_CLEAREST)
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,25 @@
/*
* Copyright (c) 2012 Quantenna Communications, Inc.
* All rights reserved.
*
* Common QOS definitions.
*/
#ifndef _IEEE80211_QOS_H
#define _IEEE80211_QOS_H
/* WME stream classes */
#define WME_AC_BE 0 /* best effort */
#define WME_AC_BK 1 /* background */
#define WME_AC_VI 2 /* video */
#define WME_AC_VO 3 /* voice */
enum {
IEEE80211_WMMPARAMS_CWMIN = 1,
IEEE80211_WMMPARAMS_CWMAX = 2,
IEEE80211_WMMPARAMS_AIFS = 3,
IEEE80211_WMMPARAMS_TXOPLIMIT = 4,
IEEE80211_WMMPARAMS_ACM = 5,
IEEE80211_WMMPARAMS_NOACKPOLICY = 6,
};
#endif

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,740 @@
/*SH1
*******************************************************************************
** **
** Copyright (c) 2009 - 2012 Quantenna Communications, Inc. **
** **
** File : qcsapi_driver.h **
** Description : **
** **
*******************************************************************************
** **
** Redistribution and use in source and binary forms, with or without **
** modification, are permitted provided that the following conditions **
** are met: **
** 1. Redistributions of source code must retain the above copyright **
** notice, this list of conditions and the following disclaimer. **
** 2. Redistributions in binary form must reproduce the above copyright **
** notice, this list of conditions and the following disclaimer in the **
** documentation and/or other materials provided with the distribution. **
** 3. The name of the author may not be used to endorse or promote products **
** derived from this software without specific prior written permission. **
** **
** Alternatively, this software may be distributed under the terms of the **
** GNU General Public License ("GPL") version 2, or (at your option) any **
** later version as published by the Free Software Foundation. **
** **
** In the case this software is distributed under the GPL license, **
** you should have received a copy of the GNU General Public License **
** along with this software; if not, write to the Free Software **
** Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA **
** **
** THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR **
** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES**
** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. **
** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, **
** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT **
** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,**
** DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY **
** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT **
** (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF **
** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. **
** **
*******************************************************************************
EH1*/
#ifndef _QCSAPI_DRIVER_H
#define _QCSAPI_DRIVER_H
#include "qcsapi_output.h"
/*
* get_api and set_api expect an interface (wifi0, eth1_0, etc.)
* get_system_value and set_system_value do NOT expect an interface. They apply to the entire device.
*/
typedef enum {
e_qcsapi_get_api = 1,
e_qcsapi_set_api,
e_qcsapi_get_system_value,
e_qcsapi_set_system_value,
/*GET API without interface name and other parameters as input*/
e_qcsapi_get_api_without_ifname_parameter,
/*SET API with interface name as parameter, but without other parameters as input*/
e_qcsapi_set_api_without_parameter,
/*GET API without interface name as parameter*/
e_qcsapi_get_api_without_ifname,
/*SET API without interface name as parameter*/
e_qcsapi_set_api_without_ifname,
e_qcsapi_nosuch_typeof_api = 0,
} qcsapi_typeof_api;
typedef enum {
e_qcsapi_option = 1,
e_qcsapi_counter,
e_qcsapi_rates,
e_qcsapi_modulation,
e_qcsapi_index,
e_qcsapi_select_SSID,
e_qcsapi_SSID_index,
e_qcsapi_LED,
e_qcsapi_file_path_config,
e_qcsapi_tdls_params,
e_qcsapi_tdls_oper,
e_qcsapi_board_parameter,
e_qcsapi_extender_params,
e_qcsapi_none,
e_qcsapi_nosuch_generic_parameter = 0
} qcsapi_generic_parameter_type;
/* enum to describe type of configuration and monitoring parameters */
typedef enum {
e_qcsapi_integer = 1,
e_qcsapi_unsigned_int,
e_qcsapi_wifi_mode,
e_qcsapi_SSID_param,
e_qcsapi_mac_addr,
e_qcsapi_string,
e_qcsapi_nosuch_specific_parameter = 0
} qcsapi_specific_parameter_type;
/*
* Abstract handle to reference a QCSAPI.
* Each QCSAPI entry point has a corresponding enum,
* but a few additional enum's are defined.
*/
typedef enum {
e_qcsapi_errno_get_message = 1,
e_qcsapi_first_entry_point = e_qcsapi_errno_get_message,
e_qcsapi_store_ipaddr,
e_qcsapi_interface_enable,
e_qcsapi_interface_get_BSSID,
e_qcsapi_interface_get_mac_addr,
e_qcsapi_interface_set_mac_addr,
e_qcsapi_interface_get_counter,
e_qcsapi_interface_get_counter64,
e_qcsapi_interface_get_status,
e_qcsapi_interface_get_ip4,
e_qcsapi_pm_get_counter,
e_qcsapi_pm_get_elapsed_time,
e_qcsapi_flash_image_update,
e_qcsapi_firmware_get_version,
e_qcsapi_system_get_time_since_start,
e_qcsapi_get_system_status,
e_qcsapi_get_random_seed,
e_qcsapi_set_random_seed,
e_qcsapi_led_get,
e_qcsapi_led_set,
e_qcsapi_led_pwm_enable,
e_qcsapi_led_brightness,
e_qcsapi_gpio_get_config,
e_qcsapi_gpio_set_config,
e_qcsapi_gpio_monitor_reset_device,
e_qcsapi_gpio_enable_wps_push_button,
e_qcsapi_file_path_get_config,
e_qcsapi_file_path_set_config,
e_qcsapi_wifi_set_wifi_macaddr,
e_qcsapi_wifi_create_restricted_bss,
e_qcsapi_wifi_create_bss,
e_qcsapi_wifi_remove_bss,
e_qcsapi_wifi_get_primary_interface,
e_qcsapi_wifi_get_interface_by_index,
e_qcsapi_wifi_get_mode,
e_qcsapi_wifi_set_mode,
e_qcsapi_wifi_reload_in_mode,
e_qcsapi_wifi_rfenable,
e_qcsapi_service_control,
e_qcsapi_wfa_cert,
e_qcsapi_wifi_rfstatus,
e_qcsapi_wifi_startprod,
e_qcsapi_wifi_get_bw,
e_qcsapi_wifi_set_bw,
e_qcsapi_wifi_get_BSSID,
e_qcsapi_wifi_get_config_BSSID,
e_qcsapi_wifi_ssid_set_bssid,
e_qcsapi_wifi_ssid_get_bssid,
e_qcsapi_wifi_get_SSID,
e_qcsapi_wifi_set_SSID,
e_qcsapi_wifi_get_channel,
e_qcsapi_wifi_set_channel,
e_qcsapi_wifi_get_auto_channel,
e_qcsapi_wifi_set_auto_channel,
e_qcsapi_wifi_get_standard,
e_qcsapi_wifi_get_dtim,
e_qcsapi_wifi_set_dtim,
e_qcsapi_wifi_get_assoc_limit,
e_qcsapi_wifi_set_assoc_limit,
e_qcsapi_wifi_get_bss_assoc_limit,
e_qcsapi_wifi_set_bss_assoc_limit,
e_qcsapi_interface_set_ip4,
e_qcsapi_wifi_get_list_channels,
e_qcsapi_wifi_get_mode_switch,
e_qcsapi_wifi_get_noise,
e_qcsapi_wifi_get_rssi_by_chain,
e_qcsapi_wifi_get_avg_snr,
e_qcsapi_wifi_get_phy_mode,
e_qcsapi_wifi_set_phy_mode,
e_qcsapi_wifi_get_option,
e_qcsapi_wifi_set_option,
e_qcsapi_wifi_get_rates,
e_qcsapi_wifi_set_rates,
e_qcsapi_wifi_get_max_bitrate,
e_qcsapi_wifi_set_max_bitrate,
e_qcsapi_wifi_get_beacon_type,
e_qcsapi_wifi_set_beacon_type,
e_qcsapi_wifi_get_beacon_interval,
e_qcsapi_wifi_set_beacon_interval,
e_qcsapi_get_board_parameter,
e_qcsapi_wifi_get_list_regulatory_regions,
e_qcsapi_wifi_get_regulatory_tx_power,
e_qcsapi_wifi_get_configured_tx_power,
e_qcsapi_wifi_set_regulatory_channel,
e_qcsapi_wifi_set_regulatory_region,
e_qcsapi_wifi_get_regulatory_region,
e_qcsapi_wifi_overwrite_country_code,
e_qcsapi_wifi_get_list_regulatory_channels,
e_qcsapi_wifi_get_list_regulatory_bands,
e_qcsapi_wifi_get_regulatory_db_version,
e_qcsapi_wifi_set_regulatory_tx_power_cap,
e_qcsapi_wifi_restore_regulatory_tx_power,
e_qcsapi_wifi_set_chan_pri_inactive,
e_qcsapi_wifi_set_chan_disabled,
e_qcsapi_wifi_get_chan_disabled,
e_qcsapi_wifi_get_tx_power,
e_qcsapi_wifi_set_tx_power,
e_qcsapi_wifi_get_tx_power_ext,
e_qcsapi_wifi_set_tx_power_ext,
e_qcsapi_wifi_get_chan_power_table,
e_qcsapi_wifi_set_chan_power_table,
e_qcsapi_wifi_get_bw_power,
e_qcsapi_wifi_set_bw_power,
e_qcsapi_wifi_get_bf_power,
e_qcsapi_wifi_set_bf_power,
e_qcsapi_wifi_get_power_selection,
e_qcsapi_wifi_set_power_selection,
e_qcsapi_wifi_get_carrier_interference,
e_qcsapi_wifi_get_congestion_idx,
e_qcsapi_wifi_get_supported_tx_power_levels,
e_qcsapi_wifi_get_current_tx_power_level,
e_qcsapi_wifi_set_power_constraint,
e_qcsapi_wifi_get_power_constraint,
e_qcsapi_wifi_set_tpc_interval,
e_qcsapi_wifi_get_tpc_interval,
e_qcsapi_wifi_get_assoc_records,
e_qcsapi_wifi_get_list_DFS_channels,
e_qcsapi_wifi_is_channel_DFS,
e_qcsapi_wifi_get_DFS_alt_channel,
e_qcsapi_wifi_set_DFS_alt_channel,
e_qcsapi_wifi_set_DFS_reentry,
e_qcsapi_wifi_get_scs_cce_channels,
e_qcsapi_wifi_get_dfs_cce_channels,
e_qcsapi_wifi_get_csw_records,
e_qcsapi_wifi_get_radar_status,
e_qcsapi_wifi_get_WEP_key_index,
e_qcsapi_wifi_set_WEP_key_index,
e_qcsapi_wifi_get_WEP_key_passphrase,
e_qcsapi_wifi_set_WEP_key_passphrase,
e_qcsapi_wifi_get_WEP_encryption_level,
e_qcsapi_wifi_get_basic_encryption_modes,
e_qcsapi_wifi_set_basic_encryption_modes,
e_qcsapi_wifi_get_basic_authentication_mode,
e_qcsapi_wifi_set_basic_authentication_mode,
e_qcsapi_wifi_get_WEP_key,
e_qcsapi_wifi_set_WEP_key,
e_qcsapi_wifi_get_WPA_encryption_modes,
e_qcsapi_wifi_set_WPA_encryption_modes,
e_qcsapi_wifi_get_WPA_authentication_mode,
e_qcsapi_wifi_set_WPA_authentication_mode,
e_qcsapi_wifi_get_interworking,
e_qcsapi_wifi_set_interworking,
e_qcsapi_wifi_get_80211u_params,
e_qcsapi_wifi_set_80211u_params,
e_qcsapi_security_get_nai_realms,
e_qcsapi_security_add_nai_realm,
e_qcsapi_security_del_nai_realm,
e_qcsapi_security_add_roaming_consortium,
e_qcsapi_security_del_roaming_consortium,
e_qcsapi_security_get_roaming_consortium,
e_qcsapi_security_get_venue_name,
e_qcsapi_security_add_venue_name,
e_qcsapi_security_del_venue_name,
e_qcsapi_security_get_oper_friendly_name,
e_qcsapi_security_add_oper_friendly_name,
e_qcsapi_security_del_oper_friendly_name,
e_qcsapi_security_add_hs20_conn_capab,
e_qcsapi_security_get_hs20_conn_capab,
e_qcsapi_security_del_hs20_conn_capab,
e_qcsapi_wifi_get_hs20_status,
e_qcsapi_wifi_set_hs20_status,
e_qcsapi_wifi_get_hs20_params,
e_qcsapi_wifi_set_hs20_params,
e_qcsapi_wifi_get_proxy_arp,
e_qcsapi_wifi_set_proxy_arp,
e_qcsapi_wifi_get_l2_ext_filter,
e_qcsapi_wifi_set_l2_ext_filter,
e_qcsapi_remove_11u_param,
e_qcsapi_remove_hs20_param,
e_qcsapi_wifi_get_IEEE11i_encryption_modes,
e_qcsapi_wifi_set_IEEE11i_encryption_modes,
e_qcsapi_wifi_get_IEEE11i_authentication_mode,
e_qcsapi_wifi_set_IEEE11i_authentication_mode,
e_qcsapi_wifi_get_michael_errcnt,
e_qcsapi_wifi_get_pre_shared_key,
e_qcsapi_wifi_set_pre_shared_key,
e_qcsapi_wifi_add_radius_auth_server_cfg,
e_qcsapi_wifi_del_radius_auth_server_cfg,
e_qcsapi_wifi_get_radius_auth_server_cfg,
e_qcsapi_wifi_set_own_ip_addr,
e_qcsapi_wifi_set_own_ip_address,
e_qcsapi_wifi_get_psk_auth_failures,
e_qcsapi_wifi_get_key_passphrase,
e_qcsapi_wifi_set_key_passphrase,
e_qcsapi_wifi_get_group_key_interval,
e_qcsapi_wifi_set_group_key_interval,
e_qcsapi_wifi_get_pmf,
e_qcsapi_wifi_set_pmf,
e_qcsapi_wifi_get_count_associations,
e_qcsapi_wifi_get_associated_device_mac_addr,
e_qcsapi_wifi_get_associated_device_ip_addr,
e_qcsapi_wifi_get_link_quality,
e_qcsapi_wifi_get_rssi_per_association,
e_qcsapi_wifi_get_rssi_in_dbm_per_association,
e_qcsapi_wifi_get_snr_per_association,
e_qcsapi_wifi_get_hw_noise_per_association,
e_qcsapi_wifi_get_rx_bytes_per_association,
e_qcsapi_wifi_get_tx_bytes_per_association,
e_qcsapi_wifi_get_rx_packets_per_association,
e_qcsapi_wifi_get_tx_packets_per_association,
e_qcsapi_wifi_get_tx_err_packets_per_association,
e_qcsapi_wifi_get_bw_per_association,
e_qcsapi_wifi_get_tx_phy_rate_per_association,
e_qcsapi_wifi_get_rx_phy_rate_per_association,
e_qcsapi_wifi_get_tx_mcs_per_association,
e_qcsapi_wifi_get_rx_mcs_per_association,
e_qcsapi_wifi_get_achievable_tx_phy_rate_per_association,
e_qcsapi_wifi_get_achievable_rx_phy_rate_per_association,
e_qcsapi_wifi_get_auth_enc_per_association,
e_qcsapi_wifi_get_tput_caps,
e_qcsapi_wifi_get_connection_mode,
e_qcsapi_wifi_get_vendor_per_association,
e_qcsapi_wifi_get_max_mimo,
e_qcsapi_wifi_get_node_counter,
e_qcsapi_wifi_get_node_param,
e_qcsapi_wifi_get_node_stats,
e_qcsapi_wifi_get_max_queued,
e_qcsapi_wifi_disassociate,
e_qcsapi_wifi_associate,
e_qcsapi_wifi_get_wpa_status,
e_qcsapi_wifi_get_auth_state,
e_qcsapi_wifi_get_disconn_info,
e_qcsapi_wifi_reset_disconn_info,
e_qcsapi_wps_registrar_report_button_press,
e_qcsapi_wps_registrar_report_pin,
e_qcsapi_wps_registrar_get_pp_devname,
e_qcsapi_wps_registrar_set_pp_devname,
e_qcsapi_wps_enrollee_report_button_press,
e_qcsapi_wps_enrollee_report_pin,
e_qcsapi_wps_enrollee_generate_pin,
e_qcsapi_wps_get_ap_pin,
e_qcsapi_wps_set_ap_pin,
e_qcsapi_wps_save_ap_pin,
e_qcsapi_wps_enable_ap_pin,
e_qcsapi_wps_get_sta_pin,
e_qcsapi_wps_get_state,
e_qcsapi_wps_get_configured_state,
e_qcsapi_wps_set_configured_state,
e_qcsapi_wps_get_runtime_state,
e_qcsapi_wps_get_allow_pbc_overlap_status,
e_qcsapi_wps_allow_pbc_overlap,
e_qcsapi_wps_get_param,
e_qcsapi_wps_set_param,
e_qcsapi_wps_set_access_control,
e_qcsapi_wps_get_access_control,
e_qcsapi_non_wps_set_pp_enable,
e_qcsapi_non_wps_get_pp_enable,
e_qcsapi_wps_cancel,
e_qcsapi_wps_set_pbc_in_srcm,
e_qcsapi_wps_get_pbc_in_srcm,
e_qcsapi_wps_timeout,
e_qcsapi_wps_on_hidden_ssid,
e_qcsapi_wps_on_hidden_ssid_status,
e_qcsapi_wps_upnp_enable,
e_qcsapi_wps_upnp_status,
e_qcsapi_wps_registrar_set_dfl_pbc_bss,
e_qcsapi_wps_registrar_get_dfl_pbc_bss,
e_qcsapi_wifi_set_dwell_times,
e_qcsapi_wifi_get_dwell_times,
e_qcsapi_wifi_set_bgscan_dwell_times,
e_qcsapi_wifi_get_bgscan_dwell_times,
e_qcsapi_wifi_start_scan,
e_qcsapi_wifi_cancel_scan,
e_qcsapi_wifi_get_scan_status,
e_qcsapi_wifi_get_cac_status,
e_qcsapi_wifi_wait_scan_completes,
e_qcsapi_wifi_set_scan_chk_inv,
e_qcsapi_wifi_get_scan_chk_inv,
e_qcsapi_SSID_create_SSID,
e_qcsapi_SSID_remove_SSID,
e_qcsapi_SSID_verify_SSID,
e_qcsapi_SSID_rename_SSID,
e_qcsapi_SSID_get_SSID_list,
e_qcsapi_SSID_get_protocol,
e_qcsapi_SSID_set_protocol,
e_qcsapi_SSID_get_encryption_modes,
e_qcsapi_SSID_set_encryption_modes,
e_qcsapi_SSID_get_group_encryption,
e_qcsapi_SSID_set_group_encryption,
e_qcsapi_SSID_get_authentication_mode,
e_qcsapi_SSID_set_authentication_mode,
e_qcsapi_SSID_get_pre_shared_key,
e_qcsapi_SSID_set_pre_shared_key,
e_qcsapi_SSID_get_key_passphrase,
e_qcsapi_SSID_set_key_passphrase,
e_qcsapi_SSID_get_pmf,
e_qcsapi_SSID_set_pmf,
e_qcsapi_SSID_get_wps_SSID,
e_qcsapi_wifi_vlan_config,
e_qcsapi_wifi_show_vlan_config,
e_qcsapi_enable_vlan_pass_through,
e_qcsapi_br_vlan_promisc,
e_qcsapi_add_ipff,
e_qcsapi_del_ipff,
e_qcsapi_get_ipff,
e_qcsapi_wifi_disable_wps,
e_qcsapi_wifi_get_results_AP_scan,
e_qcsapi_wifi_get_count_APs_scanned,
e_qcsapi_wifi_get_properties_AP,
e_qcsapi_wifi_get_mac_address_filtering,
e_qcsapi_wifi_set_mac_address_filtering,
e_qcsapi_wifi_is_mac_address_authorized,
e_qcsapi_wifi_get_authorized_mac_addresses,
e_qcsapi_wifi_get_denied_mac_addresses,
e_qcsapi_wifi_authorize_mac_address,
e_qcsapi_wifi_deny_mac_address,
e_qcsapi_wifi_remove_mac_address,
e_qcsapi_wifi_clear_mac_address_filters,
e_qcsapi_wifi_set_mac_address_reserve,
e_qcsapi_wifi_get_mac_address_reserve,
e_qcsapi_wifi_clear_mac_address_reserve,
e_qcsapi_wifi_backoff_fail_max,
e_qcsapi_wifi_backoff_timeout,
e_qcsapi_wifi_get_time_associated_per_association,
e_qcsapi_wifi_wds_add_peer,
e_qcsapi_wifi_wds_remove_peer,
e_qcsapi_wifi_wds_get_peer_address,
e_qcsapi_wifi_wds_set_psk,
e_qcsapi_wifi_wds_set_mode,
e_qcsapi_wifi_wds_get_mode,
e_qcsapi_wifi_qos_get_param,
e_qcsapi_wifi_qos_set_param,
e_qcsapi_wifi_get_wmm_ac_map,
e_qcsapi_wifi_set_wmm_ac_map,
e_qcsapi_wifi_get_dscp_8021p_map,
e_qcsapi_wifi_set_dscp_8021p_map,
e_qcsapi_wifi_get_dscp_ac_map,
e_qcsapi_wifi_set_dscp_ac_map,
e_qcsapi_wifi_get_priority,
e_qcsapi_wifi_set_priority,
e_qcsapi_wifi_get_airfair,
e_qcsapi_wifi_set_airfair,
e_qcsapi_config_get_parameter,
e_qcsapi_config_update_parameter,
e_qcsapi_config_get_ssid_parameter,
e_qcsapi_config_update_ssid_parameter,
e_qcsapi_bootcfg_get_parameter,
e_qcsapi_bootcfg_update_parameter,
e_qcsapi_bootcfg_commit,
e_qcsapi_wifi_start_cca,
e_qcsapi_wifi_get_mcs_rate,
e_qcsapi_wifi_set_mcs_rate,
e_qcsapi_wifi_enable_scs,
e_qcsapi_wifi_scs_switch_channel,
e_qcsapi_wifi_set_scs_verbose,
e_qcsapi_wifi_get_scs_status,
e_qcsapi_wifi_set_scs_smpl_enable,
e_qcsapi_wifi_set_scs_smpl_dwell_time,
e_qcsapi_wifi_set_scs_smpl_intv,
e_qcsapi_wifi_set_scs_intf_detect_intv,
e_qcsapi_wifi_set_scs_thrshld,
e_qcsapi_wifi_set_scs_report_only,
e_qcsapi_wifi_get_scs_report_stat,
e_qcsapi_wifi_set_scs_cca_intf_smth_fctr,
e_qcsapi_wifi_set_scs_chan_mtrc_mrgn,
e_qcsapi_wifi_get_scs_dfs_reentry_request,
e_qcsapi_wifi_get_scs_cca_intf,
e_qcsapi_wifi_get_scs_param,
e_qcsapi_wifi_set_scs_stats,
e_qcsapi_wifi_start_ocac,
e_qcsapi_wifi_stop_ocac,
e_qcsapi_wifi_get_ocac_status,
e_qcsapi_wifi_set_ocac_threshold,
e_qcsapi_wifi_set_ocac_dwell_time,
e_qcsapi_wifi_set_ocac_duration,
e_qcsapi_wifi_set_ocac_cac_time,
e_qcsapi_wifi_set_ocac_report_only,
e_qcsapi_wifi_start_dfs_s_radio,
e_qcsapi_wifi_stop_dfs_s_radio,
e_qcsapi_wifi_get_dfs_s_radio_status,
e_qcsapi_wifi_get_dfs_s_radio_availability,
e_qcsapi_wifi_set_dfs_s_radio_threshold,
e_qcsapi_wifi_set_dfs_s_radio_dwell_time,
e_qcsapi_wifi_set_dfs_s_radio_duration,
e_qcsapi_wifi_set_dfs_s_radio_cac_time,
e_qcsapi_wifi_set_dfs_s_radio_report_only,
e_qcsapi_wifi_set_dfs_s_radio_wea_duration,
e_qcsapi_wifi_set_dfs_s_radio_wea_cac_time,
e_qcsapi_wifi_set_ap_isolate,
e_qcsapi_wifi_get_ap_isolate,
e_qcsapi_wifi_get_pairing_id,
e_qcsapi_wifi_set_pairing_id,
e_qcsapi_wifi_get_pairing_enable,
e_qcsapi_wifi_set_pairing_enable,
e_qcsapi_wifi_get_rts_threshold,
e_qcsapi_wifi_set_rts_threshold,
e_qcsapi_wifi_set_txqos_sched_tbl,
e_qcsapi_wifi_get_txqos_sched_tbl,
e_qcsapi_wifi_set_vendor_fix,
e_qcsapi_power_save,
e_qcsapi_qpm_level,
e_qcsapi_get_interface_stats,
e_qcsapi_get_phy_stats,
e_qcsapi_reset_all_stats,
e_qcsapi_eth_phy_power_off,
e_qcsapi_test_traffic,
e_qcsapi_aspm_l1,
e_qcsapi_l1,
e_qcsapi_get_temperature,
e_qcsapi_telnet_enable,
e_qcsapi_restore_default_config,
e_qcsapi_set_soc_macaddr,
e_qcsapi_run_script,
e_qcsapi_qtm,
e_qcsapi_set_accept_oui_filter,
e_qcsapi_get_accept_oui_filter,
e_qcsapi_get_swfeat_list,
e_qcsapi_wifi_set_vht,
e_qcsapi_wifi_get_vht,
e_qcsapi_last_entry_point = e_qcsapi_wifi_get_vht,
e_qcsapi_help, /* dummy APIs; used to send messages within the driver programs */
e_qcsapi_exit,
e_qcsapi_aging,
/* qcsapi cal mode */
e_qcsapi_calcmd_set_test_mode,
e_qcsapi_calcmd_show_test_packet,
e_qcsapi_calcmd_send_test_packet,
e_qcsapi_calcmd_stop_test_packet,
e_qcsapi_calcmd_send_dc_cw_signal,
e_qcsapi_calcmd_stop_dc_cw_signal,
e_qcsapi_calcmd_get_test_mode_antenna_sel,
e_qcsapi_calcmd_get_test_mode_mcs,
e_qcsapi_calcmd_get_test_mode_bw,
e_qcsapi_calcmd_get_tx_power,
e_qcsapi_calcmd_set_tx_power,
e_qcsapi_calcmd_get_test_mode_rssi,
e_qcsapi_calcmd_set_mac_filter,
e_qcsapi_calcmd_get_antenna_count,
e_qcsapi_calcmd_clear_counter,
e_qcsapi_calcmd_get_info,
e_qcsapi_wifi_disable_dfs_channels,
e_qcsapi_get_carrier_id,
e_qcsapi_set_carrier_id,
e_qcsapi_wifi_enable_tdls,
e_qcsapi_wifi_enable_tdls_over_qhop,
e_qcsapi_wifi_get_tdls_status,
e_qcsapi_wifi_set_tdls_params,
e_qcsapi_wifi_get_tdls_params,
e_qcsapi_wifi_tdls_operate,
e_qcsapi_get_spinor_jedecid,
e_qcsapi_get_custom_value,
e_qcsapi_wifi_get_mlme_stats_per_mac,
e_qcsapi_wifi_get_mlme_stats_per_association,
e_qcsapi_wifi_get_mlme_stats_macs_list,
e_qcsapi_get_nss_cap,
e_qcsapi_set_nss_cap,
e_qcsapi_get_security_defer_mode,
e_qcsapi_set_security_defer_mode,
e_qcsapi_apply_security_config,
e_qcsapi_wifi_set_intra_bss_isolate,
e_qcsapi_wifi_get_intra_bss_isolate,
e_qcsapi_wifi_set_bss_isolate,
e_qcsapi_wifi_get_bss_isolate,
e_qcsapi_wowlan_host_state,
e_qcsapi_wowlan_match_type,
e_qcsapi_wowlan_L2_type,
e_qcsapi_wowlan_udp_port,
e_qcsapi_wowlan_pattern,
e_qcsapi_wowlan_get_host_state,
e_qcsapi_wowlan_get_match_type,
e_qcsapi_wowlan_get_L2_type,
e_qcsapi_wowlan_get_udp_port,
e_qcsapi_wowlan_get_pattern,
e_qcsapi_wifi_set_extender_params,
e_qcsapi_wifi_get_extender_status,
e_qcsapi_wifi_enable_bgscan,
e_qcsapi_wifi_get_bgscan_status,
e_qcsapi_get_uboot_info,
e_qcsapi_wifi_get_disassoc_reason,
e_qcsapi_wifi_get_tx_amsdu,
e_qcsapi_wifi_set_tx_amsdu,
e_qcsapi_is_startprod_done,
e_qcsapi_wifi_disassociate_sta,
e_qcsapi_wifi_reassociate,
e_qcsapi_get_bb_param,
e_qcsapi_set_bb_param,
e_qcsapi_wifi_set_scan_buf_max_size,
e_qcsapi_wifi_get_scan_buf_max_size,
e_qcsapi_wifi_set_scan_table_max_len,
e_qcsapi_wifi_get_scan_table_max_len,
e_qcsapi_wifi_set_enable_mu,
e_qcsapi_wifi_get_enable_mu,
e_qcsapi_wifi_set_mu_use_precode,
e_qcsapi_wifi_get_mu_use_precode,
e_qcsapi_wifi_set_mu_use_eq,
e_qcsapi_wifi_get_mu_use_eq,
e_qcsapi_wifi_get_mu_groups,
e_qcsapi_get_emac_switch,
e_qcsapi_set_emac_switch,
e_qcsapi_eth_dscp_map,
e_qcsapi_send_file,
e_qcsapi_wifi_verify_repeater_mode,
e_qcsapi_wifi_set_ap_interface_name,
e_qcsapi_wifi_get_ap_interface_name,
e_qcsapi_set_optim_stats,
e_qcsapi_set_sys_time,
e_qcsapi_get_sys_time,
e_qcsapi_get_eth_info,
e_qcsapi_wifi_block_bss,
e_qcsapi_nosuch_api = 0
} qcsapi_entry_point;
typedef struct qcsapi_generic_parameter {
qcsapi_generic_parameter_type generic_parameter_type;
/*
* Selected QCSAPI entry points take BOTH a Service Set ID (SSID) AND and index
*/
qcsapi_unsigned_int index;
union
{
qcsapi_counter_type counter;
qcsapi_option_type option;
qcsapi_rate_type typeof_rates;
qcsapi_mimo_type modulation;
qcsapi_board_parameter_type board_param;
char the_SSID[ IW_ESSID_MAX_SIZE + 10 ];
qcsapi_tdls_type type_of_tdls;
qcsapi_tdls_oper tdls_oper;
qcsapi_extender_type type_of_extender;
} parameter_type;
} qcsapi_generic_parameter;
typedef struct call_qcsapi_bundle {
qcsapi_entry_point caller_qcsapi;
const char *caller_interface;
qcsapi_generic_parameter caller_generic_parameter;
qcsapi_output *caller_output;
} call_qcsapi_bundle;
typedef struct qcsapi_entry
{
qcsapi_entry_point e_entry_point;
qcsapi_typeof_api e_typeof_api;
qcsapi_generic_parameter_type e_generic_param_type;
qcsapi_specific_parameter_type e_specific_param_type;
} qcsapi_entry;
#ifdef __cplusplus
extern "C" {
#endif
extern const struct qcsapi_entry *entry_point_enum_to_table_entry( qcsapi_entry_point this_entry_point );
extern int lookup_generic_parameter_type(
qcsapi_entry_point qcsapi_selection,
qcsapi_generic_parameter_type *p_generic_parameter_type
);
#ifdef __cplusplus
}
#endif
#endif /* _QCSAPI_DRIVER_H */

View File

@ -0,0 +1,144 @@
/*SH0
*******************************************************************************
** **
** Copyright (c) 2009 - 2011 Quantenna Communications Inc **
** **
** File : call_qcsapi_local.c **
** Description : tiny wrapper to invoke call_qcsapi locally, from main() **
** **
*******************************************************************************
** **
** Redistribution and use in source and binary forms, with or without **
** modification, are permitted provided that the following conditions **
** are met: **
** 1. Redistributions of source code must retain the above copyright **
** notice, this list of conditions and the following disclaimer. **
** 2. Redistributions in binary form must reproduce the above copyright **
** notice, this list of conditions and the following disclaimer in the **
** documentation and/or other materials provided with the distribution. **
** 3. The name of the author may not be used to endorse or promote products **
** derived from this software without specific prior written permission. **
** **
** Alternatively, this software may be distributed under the terms of the **
** GNU General Public License ("GPL") version 2, or (at your option) any **
** later version as published by the Free Software Foundation. **
** **
** In the case this software is distributed under the GPL license, **
** you should have received a copy of the GNU General Public License **
** along with this software; if not, write to the Free Software **
** Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA **
** **
** THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR **
** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES**
** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. **
** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, **
** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT **
** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,**
** DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY **
** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT **
** (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF **
** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. **
** **
*******************************************************************************
EH0*/
#include "qcsapi_output.h"
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
int qcsapi_output_stdio_fn(struct qcsapi_output* qo,
enum qcsapi_print_type out_type, const char * format, va_list args)
{
FILE *file = stderr;
int rv;
if (out_type == OUT) {
file = stdout;
} else {
file = stderr;
}
rv = vfprintf(file, format, args);
return rv;
}
int qcsapi_output_buf_fn(struct qcsapi_output* qo,
enum qcsapi_print_type out_type, const char * format, va_list args)
{
const ssize_t realloc_threshold = 512;
ssize_t limit;
int ret;
struct qcsapi_output_bufinfo *bi = NULL;
if (out_type == OUT) {
bi = &qo->out;
} else {
bi = &qo->err;
}
limit = bi->bufsize - bi->bytes_written - 1;
if ((qo->flags & QCSAPI_OUTPUT_REALLOC) &&
(*bi->buf == NULL ||
limit < realloc_threshold)) {
char *newbuf;
ssize_t newbufsize;
newbufsize = bi->bufsize;
if (newbufsize < realloc_threshold)
newbufsize = realloc_threshold;
newbufsize <<= 1;
newbuf = realloc(*bi->buf, newbufsize);
if (newbuf == NULL) {
return -ENOMEM;
}
*bi->buf = newbuf;
bi->bufsize = newbufsize;
limit = bi->bufsize - bi->bytes_written - 1;
}
if (limit <= 0) {
ret = 0;
} else {
ret = vsnprintf(&(*bi->buf)[bi->bytes_written], limit, format, args);
bi->bytes_written += ret;
(*bi->buf)[bi->bytes_written] = '\0';
}
return ret;
}
struct qcsapi_output qcsapi_output_stdio_adapter(void)
{
struct qcsapi_output qo = {0};
qo.func = qcsapi_output_stdio_fn;
return qo;
}
struct qcsapi_output qcsapi_output_buf_adapter(
char **outbuf, size_t outbufsize,
char **errbuf, size_t errbufsize,
int realloc_allowed)
{
struct qcsapi_output qo;
qo.func = qcsapi_output_buf_fn;
qo.flags = realloc_allowed ? QCSAPI_OUTPUT_REALLOC : 0;
qo.out.buf = outbuf;
qo.out.bufsize = outbufsize;
qo.out.bytes_written = 0;
qo.err.buf = errbuf;
qo.err.bufsize = errbufsize;
qo.err.bytes_written = 0;
return qo;
}

View File

@ -0,0 +1,98 @@
/*SH1
*******************************************************************************
** **
** Copyright (c) 2009 - 2011 Quantenna Communications Inc **
** **
** File : qcsapi.h **
** Description : **
** **
*******************************************************************************
** **
** Redistribution and use in source and binary forms, with or without **
** modification, are permitted provided that the following conditions **
** are met: **
** 1. Redistributions of source code must retain the above copyright **
** notice, this list of conditions and the following disclaimer. **
** 2. Redistributions in binary form must reproduce the above copyright **
** notice, this list of conditions and the following disclaimer in the **
** documentation and/or other materials provided with the distribution. **
** 3. The name of the author may not be used to endorse or promote products **
** derived from this software without specific prior written permission. **
** **
** Alternatively, this software may be distributed under the terms of the **
** GNU General Public License ("GPL") version 2, or (at your option) any **
** later version as published by the Free Software Foundation. **
** **
** In the case this software is distributed under the GPL license, **
** you should have received a copy of the GNU General Public License **
** along with this software; if not, write to the Free Software **
** Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA **
** **
** THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR **
** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES**
** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. **
** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, **
** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT **
** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,**
** DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY **
** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT **
** (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF **
** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. **
** **
*******************************************************************************
EH1*/
#ifndef _QCSAPI_OUTPUT_H
#define _QCSAPI_OUTPUT_H
#include <sys/types.h>
#include <stdarg.h>
enum qcsapi_print_type {
OUT,
ERR,
};
struct qcsapi_output_bufinfo {
char **buf;
ssize_t bufsize;
ssize_t bytes_written;
};
typedef struct qcsapi_output {
int (*func)(struct qcsapi_output*, enum qcsapi_print_type, const char *, va_list args);
struct qcsapi_output_bufinfo out;
struct qcsapi_output_bufinfo err;
#define QCSAPI_OUTPUT_REALLOC 0x1
int flags;
} qcsapi_output;
extern struct qcsapi_output qcsapi_output_stdio_adapter(void);
extern struct qcsapi_output qcsapi_output_buf_adapter(char **stdout_buf, size_t stdout_bufsize,
char **stderr_buf, size_t stderr_bufsize, int realloc_allowed);
static inline int print_out(struct qcsapi_output *output, const char *format, ...)
{
int ret;
va_list args;
va_start(args, format);
ret = output->func(output, OUT, format, args);
va_end(args);
return ret;
}
static inline int print_err(struct qcsapi_output *output, const char *format, ...)
{
int ret;
va_list args;
va_start(args, format);
ret = output->func(output, ERR, format, args);
va_end(args);
return ret;
}
#endif /* _QCSAPI_OUTPUT_H */

View File

@ -0,0 +1,77 @@
/*SH0
*******************************************************************************
** **
** Copyright (c) 2009 - 2011 Quantenna Communications Inc **
** **
** File : call_qcsapi_local.c **
** Description : tiny wrapper to invoke call_qcsapi locally, from main() **
** **
*******************************************************************************
** **
** Redistribution and use in source and binary forms, with or without **
** modification, are permitted provided that the following conditions **
** are met: **
** 1. Redistributions of source code must retain the above copyright **
** notice, this list of conditions and the following disclaimer. **
** 2. Redistributions in binary form must reproduce the above copyright **
** notice, this list of conditions and the following disclaimer in the **
** documentation and/or other materials provided with the distribution. **
** 3. The name of the author may not be used to endorse or promote products **
** derived from this software without specific prior written permission. **
** **
** Alternatively, this software may be distributed under the terms of the **
** GNU General Public License ("GPL") version 2, or (at your option) any **
** later version as published by the Free Software Foundation. **
** **
** In the case this software is distributed under the GPL license, **
** you should have received a copy of the GNU General Public License **
** along with this software; if not, write to the Free Software **
** Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA **
** **
** THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR **
** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES**
** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. **
** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, **
** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT **
** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,**
** DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY **
** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT **
** (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF **
** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. **
** **
*******************************************************************************
EH0*/
#include <stdio.h>
#include <call_qcsapi.h>
#include <qcsapi_output.h>
#include <qcsapi_rpc/client/qcsapi_rpc_client.h>
#include <qcsapi_rpc/generated/qcsapi_rpc.h>
#include <qcsapi_rpc_common/common/rpc_pci.h>
int main(int argc, char **argv)
{
int ret;
char *host;
CLIENT *clnt;
struct qcsapi_output output;
output = qcsapi_output_stdio_adapter();
host = "localhost";
clnt = clnt_pci_create(host, QCSAPI_PROG, QCSAPI_VERS, NULL);
if (clnt == NULL) {
clnt_pcreateerror(host);
exit (1);
}
client_qcsapi_set_rpcclient(clnt);
ret = qcsapi_main(&output, argc, argv);
clnt_destroy(clnt);
return ret;
}

View File

@ -0,0 +1,61 @@
/*SH0
*******************************************************************************
** **
** Copyright (c) 2011 Quantenna Communications Inc **
** **
** File : call_qcsapi_sockrpcd.c **
** Description : Wrapper from rpc server daemon to call_qcsapi, **
** starting from an rpcgen generated server stub. **
** **
*******************************************************************************
** **
** Redistribution and use in source and binary forms, with or without **
** modification, are permitted provided that the following conditions **
** are met: **
** 1. Redistributions of source code must retain the above copyright **
** notice, this list of conditions and the following disclaimer. **
** 2. Redistributions in binary form must reproduce the above copyright **
** notice, this list of conditions and the following disclaimer in the **
** documentation and/or other materials provided with the distribution. **
** 3. The name of the author may not be used to endorse or promote products **
** derived from this software without specific prior written permission. **
** **
** Alternatively, this software may be distributed under the terms of the **
** GNU General Public License ("GPL") version 2, or (at your option) any **
** later version as published by the Free Software Foundation. **
** **
** In the case this software is distributed under the GPL license, **
** you should have received a copy of the GNU General Public License **
** along with this software; if not, write to the Free Software **
** Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA **
** **
** THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR **
** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES**
** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. **
** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, **
** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT **
** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,**
** DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY **
** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT **
** (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF **
** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. **
** **
*******************************************************************************
EH0*/
#ifndef __QCSAPI_RPC_CLIENT_H__
#define __QCSAPI_RPC_CLIENT_H__
#include <rpc/rpc.h>
typedef void (*client_qcsapi_callback_pre_t)(const char *);
typedef void (*client_qcsapi_callback_post_t)(const char *, int was_error);
typedef void (*client_qcsapi_callback_reconnect_t)(const char *);
extern void client_qcsapi_set_rpcclient(CLIENT * clnt);
extern void client_qcsapi_set_callbacks(client_qcsapi_callback_pre_t,
client_qcsapi_callback_post_t,
client_qcsapi_callback_reconnect_t);
#endif /* __QCSAPI_RPC_CLIENT_H__ */

View File

@ -0,0 +1,113 @@
/*SH0
*******************************************************************************
** **
** Copyright (c) 2009 - 2011 Quantenna Communications Inc **
** **
** File : call_qcsapi_local.c **
** Description : tiny wrapper to invoke call_qcsapi locally, from main() **
** **
*******************************************************************************
** **
** Redistribution and use in source and binary forms, with or without **
** modification, are permitted provided that the following conditions **
** are met: **
** 1. Redistributions of source code must retain the above copyright **
** notice, this list of conditions and the following disclaimer. **
** 2. Redistributions in binary form must reproduce the above copyright **
** notice, this list of conditions and the following disclaimer in the **
** documentation and/or other materials provided with the distribution. **
** 3. The name of the author may not be used to endorse or promote products **
** derived from this software without specific prior written permission. **
** **
** Alternatively, this software may be distributed under the terms of the **
** GNU General Public License ("GPL") version 2, or (at your option) any **
** later version as published by the Free Software Foundation. **
** **
** In the case this software is distributed under the GPL license, **
** you should have received a copy of the GNU General Public License **
** along with this software; if not, write to the Free Software **
** Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA **
** **
** THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR **
** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES**
** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. **
** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, **
** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT **
** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,**
** DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY **
** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT **
** (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF **
** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. **
** **
*******************************************************************************
EH0*/
#include <stdio.h>
#include <call_qcsapi.h>
#include <qcsapi_output.h>
#include <qcsapi_rpc_common/client/find_host_addr.h>
#include <qcsapi_rpc/client/qcsapi_rpc_client.h>
#include <qcsapi_rpc/generated/qcsapi_rpc.h>
static int client_qcsapi_get_udp_retry_timeout(int *argc, char ***argv)
{
int timeout = -1;
if (argc && argv && *argc >= 2 && strcmp((*argv)[1], "--udp-retry-timeout") == 0) {
timeout = atoi((const char *)(*argv)[2]);
/* move program argv[0] */
(*argv)[2] = (*argv)[0];
/* skip over --host <arg> args */
*argc = *argc - 2;
*argv = &(*argv)[2];
}
return timeout;
}
int main(int argc, char **argv)
{
int ret;
const char *host;
int udp_retry_timeout;
CLIENT *clnt;
struct qcsapi_output output;
output = qcsapi_output_stdio_adapter();
host = client_qcsapi_find_host_addr(&argc, &argv);
if (!host) {
client_qcsapi_find_host_errmsg(argv[0]);
exit(1);
}
udp_retry_timeout = client_qcsapi_get_udp_retry_timeout(&argc, &argv);
clnt = clnt_create(host, QCSAPI_PROG, QCSAPI_VERS, "udp");
if (clnt == NULL) {
clnt = clnt_create(host, QCSAPI_PROG, QCSAPI_VERS, "tcp");
} else {
if (udp_retry_timeout>0) {
struct timeval value;
value.tv_sec = (time_t)udp_retry_timeout;
value.tv_usec = (suseconds_t)0;
clnt_control(clnt, CLSET_RETRY_TIMEOUT, (char *)&value);
}
}
if (clnt == NULL) {
clnt_pcreateerror(host);
exit(1);
}
client_qcsapi_set_rpcclient(clnt);
ret = qcsapi_main(&output, argc, argv);
clnt_destroy(clnt);
return ret;
}

View File

@ -0,0 +1,88 @@
/*SH0
*******************************************************************************
** **
** Copyright (c) 2014 Quantenna Communications Inc **
** **
** **
*******************************************************************************
** **
** Redistribution and use in source and binary forms, with or without **
** modification, are permitted provided that the following conditions **
** are met: **
** 1. Redistributions of source code must retain the above copyright **
** notice, this list of conditions and the following disclaimer. **
** 2. Redistributions in binary form must reproduce the above copyright **
** notice, this list of conditions and the following disclaimer in the **
** documentation and/or other materials provided with the distribution. **
** 3. The name of the author may not be used to endorse or promote products **
** derived from this software without specific prior written permission. **
** **
** Alternatively, this software may be distributed under the terms of the **
** GNU General Public License ("GPL") version 2, or (at your option) any **
** later version as published by the Free Software Foundation. **
** **
** In the case this software is distributed under the GPL license, **
** you should have received a copy of the GNU General Public License **
** along with this software; if not, write to the Free Software **
** Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA **
** **
** THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR **
** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES**
** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. **
** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, **
** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT **
** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,**
** DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY **
** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT **
** (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF **
** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. **
** **
*******************************************************************************
EH0*/
#include <call_qcsapi.h>
#include <qcsapi_output.h>
#include <qcsapi_rpc/client/qcsapi_rpc_client.h>
#include <qcsapi_rpc/generated/qcsapi_rpc.h>
#include <qcsapi_rpc_common/common/rpc_raw.h>
#include <unistd.h>
int main(int argc, char **argv)
{
CLIENT *clnt;
struct qcsapi_output output;
uint8_t dst_mac[ETH_HLEN];
int ret;
if (geteuid()) {
printf("QRPC: only root can do that\n");
exit(1);
}
if (argc < 3) {
printf("QRPC: <src_ifname> <dst_mac_addr>\n");
exit(1);
}
if (str_to_mac(argv[2], dst_mac) < 0) {
printf("QRPC: Wrong destination MAC address format. "
"Use the following format: XX:XX:XX:XX:XX:XX\n");
exit(1);
}
output = qcsapi_output_stdio_adapter();
clnt = qrpc_clnt_raw_create(QCSAPI_PROG, QCSAPI_VERS, argv[1], dst_mac, QRPC_QCSAPI_RPCD_SID);
if (clnt == NULL) {
clnt_pcreateerror("QRPC: ");
exit (1);
}
client_qcsapi_set_rpcclient(clnt);
argv[2] = argv[0];
ret = qcsapi_main(&output, argc - 2, &argv[2]);
clnt_destroy(clnt);
return ret;
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,157 @@
/*SH0
*******************************************************************************
** **
** Copyright (c) 2009 - 2011 Quantenna Communications Inc **
** **
** File : call_qcsapi_local.c **
** Description : tiny wrapper to invoke call_qcsapi locally, from main() **
** **
*******************************************************************************
** **
** Redistribution and use in source and binary forms, with or without **
** modification, are permitted provided that the following conditions **
** are met: **
** 1. Redistributions of source code must retain the above copyright **
** notice, this list of conditions and the following disclaimer. **
** 2. Redistributions in binary form must reproduce the above copyright **
** notice, this list of conditions and the following disclaimer in the **
** documentation and/or other materials provided with the distribution. **
** 3. The name of the author may not be used to endorse or promote products **
** derived from this software without specific prior written permission. **
** **
** Alternatively, this software may be distributed under the terms of the **
** GNU General Public License ("GPL") version 2, or (at your option) any **
** later version as published by the Free Software Foundation. **
** **
** In the case this software is distributed under the GPL license, **
** you should have received a copy of the GNU General Public License **
** along with this software; if not, write to the Free Software **
** Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA **
** **
** THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR **
** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES**
** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. **
** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, **
** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT **
** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,**
** DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY **
** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT **
** (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF **
** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. **
** **
*******************************************************************************
EH0*/
#include <qcsapi_rpc_common/client/find_host_addr.h>
#include <stdio.h>
#include <errno.h>
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
#define QCSAPI_HOST_ENV_VAR "QCSAPI_RPC_TARGET"
static const char * const cfg_file_paths[] = {
"/mnt/jffs2/rmt_ip.conf",
"/etc/qcsapi_target_ip.conf",
NULL /* last entry must be null */
};
#define MAX_HOSTNAME_SIZE 128
void client_qcsapi_find_host_errmsg(const char *progname)
{
int i;
fprintf(stderr, "No remote host configured! Remote host config is\n");
fprintf(stderr, "evaluated in the following order:\n");
fprintf(stderr, " 1) Command line parameter:\n");
fprintf(stderr, " %s --host <host> <args>\n", progname);
fprintf(stderr, " 2) Environment variable:\n");
fprintf(stderr, " export %s=<host>\n", QCSAPI_HOST_ENV_VAR);
fprintf(stderr, " %s <args>\n", progname);
fprintf(stderr, " 3) Configuration files, in order:\n");
for (i = 0; cfg_file_paths[i]; i++) {
fprintf(stderr, " %s\n", cfg_file_paths[i]);
}
}
static void trim_trailing_space(char *buf)
{
int i;
for (i = strlen(buf) - 1; isspace(buf[i]); i--) {
buf[i] = '\0';
}
}
static const char *first_nonspace(const char *buf)
{
while (*buf && isspace(*buf)) {
buf++;
}
return buf;
}
static const char * client_qcsapi_find_host_read_file(const char * const filename)
{
static char hostbuf[MAX_HOSTNAME_SIZE];
const char* host = NULL;
char* fret;
FILE *file = fopen(filename, "r");
if (file == NULL) {
/* files may legitimately not exist */
return NULL;
}
/* assume the file contains the target host on the first line */
fret = fgets(hostbuf, MAX_HOSTNAME_SIZE, file);
if (fret || feof(file)) {
trim_trailing_space(hostbuf);
host = first_nonspace(hostbuf);
} else {
fprintf(stderr, "%s: error reading file '%s': %s\n",
__FUNCTION__, filename, strerror(errno));
}
fclose(file);
return host;
}
const char* client_qcsapi_find_host_addr(int *argc, char ***argv)
{
int i;
const char *host;
/* check for command line arguments */
if (argc && argv && *argc >= 2 && strcmp((*argv)[1], "--host") == 0) {
host = (*argv)[2];
/* move program argv[0] */
(*argv)[2] = (*argv)[0];
/* skip over --host <arg> args */
*argc = *argc - 2;
*argv = &(*argv)[2];
return host;
}
/* check for environment variables */
host = getenv(QCSAPI_HOST_ENV_VAR);
if (host) {
return host;
}
/* check for config files */
for (i = 0; cfg_file_paths[i]; i++) {
host = client_qcsapi_find_host_read_file(cfg_file_paths[i]);
if (host) {
return host;
}
}
return NULL;
}

View File

@ -0,0 +1,52 @@
/*SH0
*******************************************************************************
** **
** Copyright (c) 2009 - 2011 Quantenna Communications Inc **
** **
** File : call_qcsapi_local.c **
** Description : tiny wrapper to invoke call_qcsapi locally, from main() **
** **
*******************************************************************************
** **
** Redistribution and use in source and binary forms, with or without **
** modification, are permitted provided that the following conditions **
** are met: **
** 1. Redistributions of source code must retain the above copyright **
** notice, this list of conditions and the following disclaimer. **
** 2. Redistributions in binary form must reproduce the above copyright **
** notice, this list of conditions and the following disclaimer in the **
** documentation and/or other materials provided with the distribution. **
** 3. The name of the author may not be used to endorse or promote products **
** derived from this software without specific prior written permission. **
** **
** Alternatively, this software may be distributed under the terms of the **
** GNU General Public License ("GPL") version 2, or (at your option) any **
** later version as published by the Free Software Foundation. **
** **
** In the case this software is distributed under the GPL license, **
** you should have received a copy of the GNU General Public License **
** along with this software; if not, write to the Free Software **
** Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA **
** **
** THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR **
** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES**
** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. **
** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, **
** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT **
** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,**
** DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY **
** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT **
** (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF **
** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. **
** **
*******************************************************************************
EH0*/
#ifndef __QCSAPI_FIND_HOST_ADDR_H__
#define __QCSAPI_FIND_HOST_ADDR_H__
extern const char* client_qcsapi_find_host_addr(int *argc, char ***argv);
extern void client_qcsapi_find_host_errmsg(const char *progname);
#endif /* __QCSAPI_FIND_HOST_ADDR_H__ */

View File

@ -0,0 +1,227 @@
/*
* Copyright (c) 2015 Quantenna Communications, Inc.
* All rights reserved.
*/
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/socket.h>
#include <sys/stat.h>
#include <unistd.h>
#include <fcntl.h>
#include <errno.h>
#ifndef _GNU_SOURCE
#include <libgen.h>
#endif
#include <string.h>
#include "qcsapi_rpc_common/common/rpc_raw.h"
#define QFTC_READ_TIMEOUT_MS (250)
#define QFTC_CONNECT_RET_LIMIT 5
#define QFTC_RECV_RETRY_LIMIT 4
static struct qftc_cfg_t {
struct qftp_raw_ethpkt *send_buf;
struct qftp_raw_ethpkt *recv_buf;
struct qftp_ack_nack_pkt *recv_payload;
struct qftp_data_pkt *send_payload;
struct sockaddr_ll dst_addr;
int if_index;
int sock_fd;
int fd;
} qftc_cfg;
static void qftc_clean(void)
{
free(qftc_cfg.send_buf);
free(qftc_cfg.recv_buf);
if (qftc_cfg.fd >= 0)
close(qftc_cfg.fd);
if (qftc_cfg.sock_fd >= 0)
close(qftc_cfg.sock_fd);
}
static int qftc_init(const char *file_path_name, const char *sif_name, const uint8_t *dmac_addr)
{
qftc_cfg.sock_fd = -1;
qftc_cfg.send_buf = NULL;
qftc_cfg.recv_buf = NULL;
qftc_cfg.fd = open(file_path_name, O_RDONLY);
if (qftc_cfg.fd < 0) {
printf("Failed to open %s file\n", file_path_name);
return -1;
}
qftc_cfg.sock_fd = socket(AF_PACKET, SOCK_RAW, htons(ETH_P_ALL));
if (qftc_cfg.sock_fd < 0)
return -1;
if (qrpc_set_prot_filter(qftc_cfg.sock_fd, QFTP_RAW_SOCK_PROT) < 0) {
return -1;
}
qftc_cfg.send_buf = malloc(sizeof(*qftc_cfg.send_buf));
qftc_cfg.recv_buf = malloc(sizeof(*qftc_cfg.recv_buf));
if (!qftc_cfg.send_buf || !qftc_cfg.recv_buf) {
return -1;
}
qftc_cfg.send_payload = (struct qftp_data_pkt *)&qftc_cfg.send_buf->payload;
qftc_cfg.recv_payload = (struct qftp_ack_nack_pkt *)&qftc_cfg.recv_buf->payload;
qftc_cfg.if_index = qrpc_clnt_raw_config_dst(qftc_cfg.sock_fd, sif_name,
&qftc_cfg.dst_addr, dmac_addr,
(struct q_raw_ethoui_hdr *)
qftc_cfg.send_buf,
QFTP_RAW_SOCK_PROT);
if (qftc_cfg.if_index < 0) {
return -1;
}
return 0;
}
static uint32_t qftc_compose_connect_cmd(struct qftp_connect_pkt * const connect_payload,
const char *file_path_name)
{
struct stat file_stat;
memset(&file_stat, 0, sizeof(file_stat));
if (!stat(file_path_name, &file_stat) && (file_stat.st_mode & S_IFREG)) {
connect_payload->sub_type = QFTP_FRAME_TYPE_CONNECT;
connect_payload->seq = 0;
connect_payload->image_size = file_stat.st_size;
strcpy(connect_payload->image_name, basename((char *)file_path_name));
return (sizeof(struct qftp_connect_pkt) +
strlen(connect_payload->image_name));
}
return 0;
}
static uint32_t qftc_compose_data_cmd(void)
{
ssize_t read_bytes;
const size_t max_data_len = ETH_FRAME_LEN - QFTP_DATA_PKT_HDR_SIZE;
read_bytes = read(qftc_cfg.fd, qftc_cfg.send_payload->data, max_data_len);
qftc_cfg.send_payload->sub_type = QFTP_FRAME_TYPE_DATA;
++qftc_cfg.send_payload->seq;
return read_bytes;
}
static int qftc_send_cmd(const uint32_t cmd_size)
{
ssize_t sent_bytes;
do {
sent_bytes = sendto(qftc_cfg.sock_fd, qftc_cfg.send_buf, cmd_size, 0,
(struct sockaddr *)&qftc_cfg.dst_addr,
sizeof(qftc_cfg.dst_addr));
} while (sent_bytes < 0 && errno == EINTR);
return sent_bytes;
}
static int qftc_recv_cmd(void)
{
struct sockaddr_ll lladdr;
socklen_t addrlen = sizeof(lladdr);
ssize_t bytes_recv = -1;
int retry_count = 0;
memset(&lladdr, 0, sizeof(lladdr));
do {
if (!qrpc_raw_read_timeout(qftc_cfg.sock_fd, QFTC_READ_TIMEOUT_MS)) {
do {
bytes_recv = recvfrom(qftc_cfg.sock_fd, qftc_cfg.recv_buf,
sizeof(*qftc_cfg.recv_buf),
MSG_DONTWAIT, (struct sockaddr *)&lladdr,
&addrlen);
} while (bytes_recv < 0 && errno == EINTR);
} else if (++retry_count > QFTC_RECV_RETRY_LIMIT) {
break;
}
} while ((lladdr.sll_ifindex != qftc_cfg.if_index) || (lladdr.sll_pkttype != PACKET_HOST));
return retry_count > QFTC_RECV_RETRY_LIMIT ? -1 : bytes_recv;
}
static int qftc_connect(const char *file_path_name)
{
uint32_t connect_cmd_hdr_size;
ssize_t bytes_recv;
int retry_count = 0;
int op_failed = 0;
connect_cmd_hdr_size = qftc_compose_connect_cmd((struct qftp_connect_pkt *)
&qftc_cfg.send_buf->payload,
file_path_name);
if (!connect_cmd_hdr_size) {
return -1;
}
connect_cmd_hdr_size += sizeof(struct q_raw_ethoui_hdr);
do {
/* Sending CONNECT command */
if (qftc_send_cmd(connect_cmd_hdr_size) < 0) {
op_failed = 1;
break;
}
/* Waiting for ACK */
bytes_recv = qftc_recv_cmd();
if ((bytes_recv >= (ssize_t)QFTP_ACK_NACK_FRAME_LEN) ||
(qftc_cfg.recv_payload->sub_type == QFTP_FRAME_TYPE_ACK)) {
break;
}
} while (++retry_count < QFTC_CONNECT_RET_LIMIT);
if (op_failed || retry_count >= QFTC_CONNECT_RET_LIMIT)
return -1;
return 0;
}
int qftc_start(const char *file_path_name, const char *sif_name, const uint8_t *dmac_addr)
{
ssize_t read_bytes;
int op_failed = 0;
if (qftc_init(file_path_name, sif_name, dmac_addr) < 0 ||
qftc_connect(file_path_name) < 0) {
qftc_clean();
return -1;
}
read_bytes = qftc_compose_data_cmd();
/* Start transmitting image file */
while (read_bytes > 0) {
/* Sending DATA command */
if (qftc_send_cmd(QFTP_DATA_PKT_HDR_SIZE + read_bytes) < 0) {
op_failed = 1;
break;
}
/* Receiving ACK */
if ((qftc_recv_cmd() < 0) ||
(qftc_cfg.send_payload->seq != qftc_cfg.recv_payload->seq) ||
(qftc_cfg.recv_payload->sub_type != QFTP_FRAME_TYPE_ACK)) {
op_failed = 1;
break;
}
read_bytes = qftc_compose_data_cmd();
}
qftc_clean();
if (op_failed || (read_bytes < 0))
return -1;
return 0;
}

View File

@ -0,0 +1,8 @@
/*
* Copyright (c) 2015 Quantenna Communications, Inc.
* All rights reserved.
*/
#ifndef __QCSAPI_QFTC_H__
#define __QCSAPI_QFTC_H__
extern int qftc_start(const char *file_path_name, const char *sif_name, const uint8_t *dmac_addr);
#endif

View File

@ -0,0 +1,504 @@
/*
* Copyright (C) 1987, Sun Microsystems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of Sun Microsystems, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <alloca.h>
#include <errno.h>
#include <string.h>
#include <rpc/rpc.h>
#include <sys/socket.h>
#include <sys/types.h>
#include <unistd.h>
#include <netdb.h>
#include <linux/netlink.h>
#include <rpc/clnt.h>
#include <sys/poll.h>
#include <assert.h>
#include <qcsapi_rpc_common/common/rpc_pci.h>
#ifndef PCIE_RPC_TYPE
#error "Not configure PCIE_RPC_TYPE"
#else
#if (PCIE_RPC_TYPE != RPC_TYPE_CALL_QCSAPI_PCIE) && (PCIE_RPC_TYPE != RPC_TYPE_QCSAPI_PCIE)
#error "Configuration invalid value for PCIE_RPC_TYPE"
#endif
#endif
/*
* Private data kept per client handle
*/
struct cu_data {
int cu_sock;
struct sockaddr_nl cu_saddr;
struct sockaddr_nl cu_daddr;
//struct sockaddr_in cu_raddr;
//int cu_rlen;
int cu_slen;
int cu_dlen;
struct timeval cu_wait;
struct timeval cu_total;
struct rpc_err cu_error;
XDR cu_outxdrs;
u_int cu_xdrpos;
u_int cu_sendsz;
u_int cu_recvsz;
char *cu_outbuf;
char *cu_inbuf;
struct nlmsghdr *cu_reqnlh;
struct nlmsghdr *cu_respnlh;
};
static CLIENT *_clnt_pci_create(int sock_fd,
struct sockaddr_nl *src,
struct sockaddr_nl *dst,
u_long prog, u_long vers);
/*
* Generic client creation: takes (hostname, program-number, protocol) and
* returns client handle. Default options are set, which the user can
* change using the rpc equivalent of ioctl()'s.
*/
CLIENT *clnt_pci_create(const char *hostname,
u_long prog, u_long vers, const char *proto)
{
CLIENT *client;
struct sockaddr_nl src_addr, dest_addr;
int sock_fd;
sock_fd = socket(PF_NETLINK, SOCK_RAW, NETLINK_RPC_PCI_CLNT);
if (sock_fd < 0)
goto err;
memset(&src_addr, 0, sizeof(src_addr));
src_addr.nl_family = AF_NETLINK;
src_addr.nl_pid = getpid(); /* self pid */
bind(sock_fd, (struct sockaddr *)&src_addr, sizeof(src_addr));
memset(&dest_addr, 0, sizeof(dest_addr));
memset(&dest_addr, 0, sizeof(dest_addr));
dest_addr.nl_family = AF_NETLINK;
dest_addr.nl_pid = 0; /* For Linux Kernel */
dest_addr.nl_groups = 0; /* unicast */
client = _clnt_pci_create(sock_fd, &src_addr, &dest_addr, prog, vers);
if (client == NULL)
close(sock_fd);
return client;
err:
#if 0
if (errno) {
struct rpc_createerr *ce = &get_rpc_createerr();
ce->cf_stat = RPC_SYSTEMERROR;
ce->cf_error.re_errno = error;
return NULL;
}
#endif
return NULL;
}
extern u_long _create_xid(void);
/*
* PCI bases client side rpc operations
*/
static enum clnt_stat clnt_pci_call(CLIENT *, u_long, xdrproc_t, caddr_t,
xdrproc_t, caddr_t, struct timeval);
static void clnt_pci_abort(void);
static void clnt_pci_geterr(CLIENT *, struct rpc_err *);
static bool_t clnt_pci_freeres(CLIENT *, xdrproc_t, caddr_t);
static bool_t clnt_pci_control(CLIENT *, int, char *);
static void clnt_pci_destroy(CLIENT *);
static const struct clnt_ops pci_ops = {
clnt_pci_call,
clnt_pci_abort,
clnt_pci_geterr,
clnt_pci_freeres,
clnt_pci_destroy,
clnt_pci_control
};
/*
* Create a UDP based client handle.
* If *sockp<0, *sockp is set to a newly created UPD socket.
* If raddr->sin_port is 0 a binder on the remote machine
* is consulted for the correct port number.
* NB: It is the clients responsibility to close *sockp.
* NB: The rpch->cl_auth is initialized to null authentication.
* Caller may wish to set this something more useful.
*
* wait is the amount of time used between retransmitting a call if
* no response has been heard; retransmission occurs until the actual
* rpc call times out.
*
* sendsz and recvsz are the maximum allowable packet sizes that can be
* sent and received.
*/
static CLIENT *_clnt_pci_create(int sock_fd,
struct sockaddr_nl *src,
struct sockaddr_nl *dst,
u_long prog, u_long vers)
{
struct timeval wait;
CLIENT *cl;
struct cu_data *cu = NULL;
struct rpc_msg call_msg;
struct nlmsghdr *preqnlh, *prespnlh;
struct iovec iov;
struct msghdr msg;
//u_int sendsz, recvsz;
wait.tv_sec = 5;
wait.tv_usec = 0;
cl = (CLIENT *) malloc(sizeof(CLIENT));
//sendsz = ((PCIMSGSIZE + 3) / 4) * 4;
//recvsz = ((PCIMSGSIZE + 3) / 4) * 4;
cu = (struct cu_data *)calloc(1, sizeof(*cu));
/* Allocate memory for nlm headers */
preqnlh = (struct nlmsghdr *)calloc(1, NLMSG_SPACE(PCIMSGSIZE));
prespnlh = (struct nlmsghdr *)calloc(1, NLMSG_SPACE(PCIMSGSIZE));
if (cl == NULL || cu == NULL || preqnlh == NULL || prespnlh == NULL) {
fprintf(stderr, "pci_clnt_create out of memory\n");
goto fooy;
}
cl->cl_ops = (struct clnt_ops *)&pci_ops;
cl->cl_private = (caddr_t) cu;
cu->cu_saddr = *src;
cu->cu_daddr = *dst;
cu->cu_slen = sizeof(cu->cu_saddr);
cu->cu_dlen = sizeof(cu->cu_daddr);
cu->cu_wait = wait;
cu->cu_total.tv_sec = -1;
cu->cu_total.tv_usec = -1;
cu->cu_sendsz = PCIMSGSIZE;
cu->cu_recvsz = PCIMSGSIZE;
// setup req/resp netlink headers
cu->cu_reqnlh = preqnlh;
cu->cu_respnlh = prespnlh;
memset(preqnlh, 0, NLMSG_SPACE(PCIMSGSIZE));
preqnlh->nlmsg_len = NLMSG_SPACE(PCIMSGSIZE);
preqnlh->nlmsg_pid = getpid();
preqnlh->nlmsg_flags = NLM_F_REQUEST;
cu->cu_outbuf = NLMSG_DATA(preqnlh);
memset(prespnlh, 0, NLMSG_SPACE(PCIMSGSIZE));
prespnlh->nlmsg_len = NLMSG_SPACE(PCIMSGSIZE);
prespnlh->nlmsg_pid = getpid();
prespnlh->nlmsg_flags = NLM_F_REQUEST;
cu->cu_inbuf = NLMSG_DATA(prespnlh);
call_msg.rm_xid = getpid(); //_create_xid ();
call_msg.rm_direction = CALL;
call_msg.rm_call.cb_rpcvers = RPC_MSG_VERSION;
call_msg.rm_call.cb_prog = prog;
call_msg.rm_call.cb_vers = vers;
xdrmem_create(&(cu->cu_outxdrs), cu->cu_outbuf, PCIMSGSIZE, XDR_ENCODE);
if (!xdr_callhdr(&(cu->cu_outxdrs), &call_msg)) {
goto fooy;
}
cu->cu_xdrpos = XDR_GETPOS(&(cu->cu_outxdrs));
cu->cu_sock = sock_fd;
cl->cl_auth = authnone_create();
// Register the client. May not be necessary. FIXME
preqnlh->nlmsg_len = 0;
preqnlh->nlmsg_type = NETLINK_TYPE_CLNT_REGISTER;
iov.iov_base = (void *)cu->cu_reqnlh;
iov.iov_len = NLMSG_SPACE(0);
memset((caddr_t) & msg, 0, sizeof(msg));
msg.msg_name = (void *)&cu->cu_daddr;
msg.msg_namelen = cu->cu_dlen;
msg.msg_iov = &iov;
msg.msg_iovlen = 1;
sendmsg(cu->cu_sock, &msg, 0);
return cl;
fooy:
if (cu)
free((caddr_t) cu);
if (cl)
free((caddr_t) cl);
if (preqnlh)
free((caddr_t) preqnlh);
if (prespnlh)
free((caddr_t) prespnlh);
return (CLIENT *) NULL;
}
enum clnt_stat clnt_pci_call(cl, proc, xargs, argsp, xresults, resultsp,
utimeout)
CLIENT *cl; /* client handle */
u_long proc; /* procedure number */
xdrproc_t xargs; /* xdr routine for args */
caddr_t argsp; /* pointer to args */
xdrproc_t xresults; /* xdr routine for results */
caddr_t resultsp; /* pointer to results */
struct timeval utimeout; /* seconds to wait before giving up */
{
struct cu_data *cu = (struct cu_data *)cl->cl_private;
XDR *xdrs;
int outlen = 0;
int inlen;
//socklen_t fromlen;
struct pollfd fd;
int milliseconds = (cu->cu_wait.tv_sec * 1000) +
(cu->cu_wait.tv_usec / 1000);
//struct sockaddr_in from;
struct rpc_msg reply_msg;
XDR reply_xdrs;
struct timeval time_waited;
bool_t ok;
int nrefreshes = 2; /* number of times to refresh cred */
struct timeval timeout;
//int anyup; /* any network interface up */
struct iovec iov;
struct msghdr msg;
//int ret;
//printf("In clnt_pci_call\n");
if (cu->cu_total.tv_usec == -1) {
timeout = utimeout; /* use supplied timeout */
} else {
timeout = cu->cu_total; /* use default timeout */
}
time_waited.tv_sec = 0;
time_waited.tv_usec = 0;
call_again:
xdrs = &(cu->cu_outxdrs);
if (xargs == NULL)
goto get_reply;
xdrs->x_op = XDR_ENCODE;
XDR_SETPOS(xdrs, cu->cu_xdrpos);
/*
* the transaction is the first thing in the out buffer
*/
(*(uint32_t *) (cu->cu_outbuf))++;
if ((!XDR_PUTLONG(xdrs, (long *)&proc)) ||
(!AUTH_MARSHALL(cl->cl_auth, xdrs)) || (!(*xargs) (xdrs, argsp)))
return (cu->cu_error.re_status = RPC_CANTENCODEARGS);
outlen = (int)XDR_GETPOS(xdrs);
// Set up the netlink msg headers
cu->cu_reqnlh->nlmsg_len = outlen;
iov.iov_base = (void *)cu->cu_reqnlh;
iov.iov_len = NLMSG_SPACE(outlen);
memset((caddr_t) & msg, 0, sizeof(msg));
msg.msg_name = (void *)&cu->cu_daddr;
msg.msg_namelen = cu->cu_dlen;
msg.msg_iov = &iov;
msg.msg_iovlen = 1;
cu->cu_reqnlh->nlmsg_type = NETLINK_TYPE_CLNT_REQUEST;
assert(outlen <= PCIMSGSIZE);
//send_again:
//ret = sendmsg(cu->cu_sock, &msg, 0);
sendmsg(cu->cu_sock, &msg, 0);
//perror("sendmsg");
//fprintf(stderr, "sendmsg data len %d, sent %d\n", outlen, ret );
/*
* report error if it could not send.
{
cu->cu_error.re_errno = errno;
return (cu->cu_error.re_status = RPC_CANTSEND);
}
*/
/*
* Hack to provide rpc-based message passing
*/
if (timeout.tv_sec == 0 && timeout.tv_usec == 0) {
return (cu->cu_error.re_status = RPC_TIMEDOUT);
}
// Set up the netlink msg headers
iov.iov_base = (void *)cu->cu_respnlh;
iov.iov_len = cu->cu_respnlh->nlmsg_len;
msg.msg_name = (void *)&cu->cu_daddr;
msg.msg_namelen = cu->cu_dlen;
msg.msg_iov = &iov;
msg.msg_iovlen = 1;
get_reply:
/*
* sub-optimal code appears here because we have
* some clock time to spare while the packets are in flight.
* (We assume that this is actually only executed once.)
*/
reply_msg.acpted_rply.ar_verf = _null_auth;
reply_msg.acpted_rply.ar_results.where = resultsp;
reply_msg.acpted_rply.ar_results.proc = xresults;
fd.fd = cu->cu_sock;
fd.events = POLLIN;
for (;;) {
switch (poll(&fd, 1, milliseconds)) {
case 0:
time_waited.tv_sec += cu->cu_wait.tv_sec;
time_waited.tv_usec += cu->cu_wait.tv_usec;
while (time_waited.tv_usec >= 1000000) {
time_waited.tv_sec++;
time_waited.tv_usec -= 1000000;
}
if ((time_waited.tv_sec < timeout.tv_sec) ||
((time_waited.tv_sec == timeout.tv_sec) &&
(time_waited.tv_usec < timeout.tv_usec))) {
//goto send_again;
}
return (cu->cu_error.re_status = RPC_TIMEDOUT);
/*
* buggy in other cases because time_waited is not being
* updated.
*/
case -1:
if (errno == EINTR)
continue;
cu->cu_error.re_errno = errno;
return (cu->cu_error.re_status = RPC_CANTRECV);
}
do {
iov.iov_len = NLMSG_SPACE(PCIMSGSIZE);
inlen = recvmsg(cu->cu_sock, &msg, 0);
} while (inlen < 0 && errno == EINTR);
if (inlen < 0) {
if (errno == EWOULDBLOCK)
continue;
cu->cu_error.re_errno = errno;
return (cu->cu_error.re_status = RPC_CANTRECV);
}
if (inlen < NLMSG_HDRLEN)
continue;
/* see if reply transaction id matches sent id.
Don't do this if we only wait for a replay */
if (xargs != NULL && (*((u_int32_t *) (cu->cu_inbuf))
!= *((u_int32_t *) (cu->cu_outbuf))))
continue;
/* we now assume we have the proper reply */
break;
}
/*
* now decode and validate the response
*/
xdrmem_create(&reply_xdrs, cu->cu_inbuf, (u_int) inlen, XDR_DECODE);
ok = xdr_replymsg(&reply_xdrs, &reply_msg);
/* XDR_DESTROY(&reply_xdrs); save a few cycles on noop destroy */
if (ok) {
_seterr_reply(&reply_msg, &(cu->cu_error));
if (cu->cu_error.re_status == RPC_SUCCESS) {
if (!AUTH_VALIDATE(cl->cl_auth,
&reply_msg.acpted_rply.ar_verf)) {
cu->cu_error.re_status = RPC_AUTHERROR;
cu->cu_error.re_why = AUTH_INVALIDRESP;
}
if (reply_msg.acpted_rply.ar_verf.oa_base != NULL) {
xdrs->x_op = XDR_FREE;
(void)xdr_opaque_auth(xdrs,
&(reply_msg.acpted_rply.
ar_verf));
}
} /* end successful completion */
else {
/* maybe our credentials need to be refreshed ... */
if (nrefreshes > 0 && AUTH_REFRESH(cl->cl_auth)) {
nrefreshes--;
goto call_again;
}
} /* end of unsuccessful completion */
} /* end of valid reply message */
else {
cu->cu_error.re_status = RPC_CANTDECODERES;
}
return cu->cu_error.re_status;
return 0;
}
void clnt_pci_geterr(CLIENT * cl, struct rpc_err *errp)
{
}
bool_t clnt_pci_freeres(CLIENT * cl, xdrproc_t xdr_res, caddr_t res_ptr)
{
return 0;
}
void clnt_pci_abort(void)
{
}
bool_t clnt_pci_control(CLIENT * cl, int request, char *info)
{
return 0;
}
void clnt_pci_destroy(CLIENT * cl)
{
struct cu_data *cu = (struct cu_data *)cl->cl_private;
if (cu->cu_sock >= 0) {
close(cu->cu_sock);
}
XDR_DESTROY(&(cu->cu_outxdrs));
free((caddr_t) cu->cu_reqnlh);
free((caddr_t) cu->cu_respnlh);
free((caddr_t) cu);
free((caddr_t) cl);
}

View File

@ -0,0 +1,358 @@
/*
* Copyright (C) 1987, Sun Microsystems, Inc.
* Copyright (C) 2014 Quantenna Communications Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of Sun Microsystems, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/socket.h>
#include <sys/types.h>
#include <sys/ioctl.h>
#include <sys/time.h>
#include <linux/if_packet.h>
#include <linux/if.h>
#include <unistd.h>
#include <poll.h>
#include <errno.h>
#include <qcsapi_rpc_common/common/rpc_raw.h>
#define QRPC_CLNT_RAW_POLL_TIMEOUT 5000
enum clnt_stat qrpc_clnt_raw_call(CLIENT *cl, u_long proc, xdrproc_t xargs,
caddr_t argsp, xdrproc_t xresults, caddr_t resultsp, struct timeval utimeout);
void qrpc_clnt_raw_abort(void);
void qrpc_clnt_raw_geterr(CLIENT *cl, struct rpc_err *errp);
bool_t qrpc_clnt_raw_freeres(CLIENT *cl, xdrproc_t xdr_res, caddr_t res_ptr);
void qrpc_clnt_raw_destroy(CLIENT *cl);
bool_t qrpc_clnt_raw_control(CLIENT *cl, int request, char *info);
struct qrpc_clnt_raw_priv {
struct sockaddr_ll dst_addr;
struct rpc_err rpc_error;
XDR xdrs_out;
XDR xdrs_in;
uint8_t *outbuf;
uint8_t *out_pktbuf;
uint8_t *inbuf;
uint8_t *in_pktbuf;
struct qrpc_frame_hdr out_hdr;
uint32_t xdrs_outpos;
int raw_sock;
uint8_t sess_id;
};
static const struct clnt_ops qrpc_clnt_raw_ops = {
qrpc_clnt_raw_call,
qrpc_clnt_raw_abort,
qrpc_clnt_raw_geterr,
qrpc_clnt_raw_freeres,
qrpc_clnt_raw_destroy,
qrpc_clnt_raw_control
};
static void qrpc_clnt_raw_free_priv(struct qrpc_clnt_raw_priv *const priv)
{
free(priv->outbuf);
free(priv->out_pktbuf);
free(priv->inbuf);
free(priv->in_pktbuf);
if (priv->raw_sock >= 0)
close(priv->raw_sock);
free(priv);
}
CLIENT *qrpc_clnt_raw_create(u_long prog, u_long vers,
const char *const srcif_name, const uint8_t * dmac_addr, uint8_t sess_id)
{
CLIENT *client;
int rawsock_fd;
struct qrpc_clnt_raw_priv *priv;
struct rpc_msg call_msg;
rawsock_fd = socket(AF_PACKET, SOCK_RAW, htons(ETH_P_ALL));
if (rawsock_fd < 0)
return NULL;
if (qrpc_set_prot_filter(rawsock_fd, QRPC_RAW_SOCK_PROT) < 0) {
close(rawsock_fd);
return NULL;
}
priv = calloc(1, sizeof(*priv));
if (!priv) {
close(rawsock_fd);
return NULL;
}
priv->raw_sock = rawsock_fd;
priv->outbuf = calloc(1, QRPC_BUFFER_LEN);
priv->inbuf = calloc(1, QRPC_BUFFER_LEN);
priv->out_pktbuf = calloc(1, ETH_FRAME_LEN);
priv->in_pktbuf = calloc(1, ETH_FRAME_LEN);
if (!priv->outbuf || !priv->inbuf || !priv->out_pktbuf || !priv->in_pktbuf) {
qrpc_clnt_raw_free_priv(priv);
return NULL;
}
if (qrpc_clnt_raw_config_dst(rawsock_fd, srcif_name, &priv->dst_addr,
dmac_addr, &priv->out_hdr.qhdr,
QRPC_RAW_SOCK_PROT) < 0) {
qrpc_clnt_raw_free_priv(priv);
return NULL;
}
client = calloc(1, sizeof(*client));
if (!client) {
qrpc_clnt_raw_free_priv(priv);
return NULL;
}
client->cl_ops = (struct clnt_ops *)&qrpc_clnt_raw_ops;
client->cl_private = (caddr_t) priv;
client->cl_auth = authnone_create();
xdrmem_create(&priv->xdrs_in, (char *)priv->inbuf + sizeof(struct qrpc_frame_hdr),
QRPC_BUFFER_LEN - sizeof(struct qrpc_frame_hdr), XDR_DECODE);
xdrmem_create(&priv->xdrs_out, (char *)priv->outbuf,
QRPC_BUFFER_LEN, XDR_ENCODE);
call_msg.rm_xid = getpid();
call_msg.rm_direction = CALL;
call_msg.rm_call.cb_rpcvers = RPC_MSG_VERSION;
call_msg.rm_call.cb_prog = prog;
call_msg.rm_call.cb_vers = vers;
if (!xdr_callhdr(&priv->xdrs_out, &call_msg)) {
qrpc_clnt_raw_free_priv(priv);
free(client);
return NULL;
}
priv->xdrs_outpos = XDR_GETPOS(&(priv->xdrs_out));
priv->sess_id = sess_id;
return client;
}
static int qrpc_clnt_raw_call_send(struct qrpc_clnt_raw_priv *const priv, const int len)
{
int ret;
static const uint16_t payload_max = ETH_FRAME_LEN - sizeof(struct qrpc_frame_hdr);
uint16_t pkt_nr;
uint16_t i;
uint16_t payload_done = 0;
struct qrpc_frame_hdr *hdr;
pkt_nr = (len + payload_max - 1) / payload_max;
for (i = 0; i < pkt_nr; i++) {
uint16_t payload_len = MIN((uint16_t)len - payload_done, payload_max);
/* build an EthII frame */
priv->out_hdr.sub_type = ((i != pkt_nr - 1) ? QRPC_FRAME_TYPE_FRAG
: QRPC_FRAME_TYPE_COMPLETE);
priv->out_hdr.sid = priv->sess_id;
hdr = (struct qrpc_frame_hdr *)priv->out_pktbuf;
memcpy(hdr, &priv->out_hdr, sizeof(priv->out_hdr));
memcpy(hdr + 1, priv->outbuf + payload_done, payload_len);
payload_done += payload_len;
do {
ret = sendto(priv->raw_sock, priv->out_pktbuf, sizeof(struct qrpc_frame_hdr) + payload_len, 0,
(struct sockaddr *)&priv->dst_addr, sizeof(priv->dst_addr));
} while (ret < 0 && errno == EINTR);
if ((uint16_t)ret != sizeof(struct qrpc_frame_hdr) + payload_len) {
priv->rpc_error.re_status = RPC_CANTSEND;
return -1;
}
}
return 0;
}
static int qrpc_clnt_raw_call_recv(struct qrpc_clnt_raw_priv *const priv)
{
struct pollfd fds;
struct sockaddr_ll lladdr;
socklen_t addrlen = sizeof(lladdr);
int ret;
uint16_t payload_done = sizeof(struct qrpc_frame_hdr);
struct qrpc_frame_hdr hdr;
do {
fds.fd = priv->raw_sock;
fds.events = POLLIN;
do {
ret = poll(&fds, 1, QRPC_CLNT_RAW_POLL_TIMEOUT);
} while (ret < 0 && errno == EINTR);
if (!ret) {
priv->rpc_error.re_status = RPC_TIMEDOUT;
return -1;
}
if (ret < 0) {
priv->rpc_error.re_status = RPC_SYSTEMERROR;
return -1;
}
do {
ret = recvfrom(priv->raw_sock, priv->in_pktbuf, ETH_FRAME_LEN,
0, (struct sockaddr *)&lladdr, &addrlen);
} while (ret < 0 && errno == EINTR);
if (lladdr.sll_pkttype != PACKET_HOST) {
priv->rpc_error.re_status = RPC_TIMEDOUT;
return -1;
}
if ((ret < (int)sizeof(struct qrpc_frame_hdr))
|| (ret - sizeof(struct qrpc_frame_hdr) + payload_done > QRPC_BUFFER_LEN)) {
priv->rpc_error.re_status = RPC_CANTRECV;
return -1;
}
/* assemble the buffer */
memcpy(&hdr, priv->in_pktbuf, sizeof(struct qrpc_frame_hdr));
memcpy(priv->inbuf + payload_done, priv->in_pktbuf + sizeof(struct qrpc_frame_hdr),
ret - sizeof(struct qrpc_frame_hdr));
payload_done += (ret - sizeof(struct qrpc_frame_hdr));
} while (hdr.sub_type == QRPC_FRAME_TYPE_FRAG);
memcpy(priv->inbuf, &hdr, sizeof(struct qrpc_frame_hdr));
return 0;
}
enum clnt_stat qrpc_clnt_raw_call(CLIENT *cl, u_long proc, xdrproc_t xargs, caddr_t argsp,
xdrproc_t xresults, caddr_t resultsp,
struct timeval utimeout)
{
struct qrpc_clnt_raw_priv *priv = (struct qrpc_clnt_raw_priv *)cl->cl_private;
XDR *xdrs_out = &priv->xdrs_out;
XDR *xdrs_in = &priv->xdrs_in;
struct rpc_msg reply_msg;
struct timeval curr_time;
struct qrpc_frame_hdr *hdr;
uint16_t tmp;
if (xargs) {
xdrs_out->x_op = XDR_ENCODE;
XDR_SETPOS(xdrs_out, priv->xdrs_outpos);
if ((!XDR_PUTLONG(xdrs_out, (long *)&proc)) ||
(!AUTH_MARSHALL(cl->cl_auth, xdrs_out)) ||
(!(*xargs) (xdrs_out, argsp))) {
priv->rpc_error.re_status = RPC_CANTENCODEARGS;
return priv->rpc_error.re_status;
}
tmp = ntohs(priv->out_hdr.seq);
priv->out_hdr.seq = htons(tmp + 1);
if (qrpc_clnt_raw_call_send(priv, XDR_GETPOS(xdrs_out)) < 0) {
return priv->rpc_error.re_status;
}
}
if (gettimeofday(&curr_time, NULL) < 0) {
priv->rpc_error.re_status = RPC_SYSTEMERROR;
return priv->rpc_error.re_status;
}
utimeout.tv_sec += curr_time.tv_sec;
/* Waiting for reply */
do {
if (qrpc_clnt_raw_call_recv(priv) < 0) {
if (priv->rpc_error.re_status == RPC_TIMEDOUT)
continue;
else
break;
}
hdr = (struct qrpc_frame_hdr *)priv->inbuf;
if (xargs && priv->out_hdr.seq != hdr->seq) {
continue;
}
xdrs_in->x_op = XDR_DECODE;
XDR_SETPOS(xdrs_in, 0);
reply_msg.acpted_rply.ar_verf = _null_auth;
reply_msg.acpted_rply.ar_results.where = resultsp;
reply_msg.acpted_rply.ar_results.proc = xresults;
if (xdr_replymsg(xdrs_in, &reply_msg)) {
if (reply_msg.rm_xid != (unsigned long)getpid()) {
continue;
}
_seterr_reply(&reply_msg, &priv->rpc_error);
if (priv->rpc_error.re_status == RPC_SUCCESS) {
if (!AUTH_VALIDATE(cl->cl_auth, &reply_msg.acpted_rply.ar_verf)) {
priv->rpc_error.re_status = RPC_AUTHERROR;
priv->rpc_error.re_why = AUTH_INVALIDRESP;
}
break;
}
} else {
priv->rpc_error.re_status = RPC_CANTDECODERES;
}
} while ((gettimeofday(&curr_time, NULL) == 0) && (curr_time.tv_sec < utimeout.tv_sec));
return priv->rpc_error.re_status;
}
void qrpc_clnt_raw_abort(void)
{
}
void qrpc_clnt_raw_geterr(CLIENT *cl, struct rpc_err *errp)
{
struct qrpc_clnt_raw_priv *priv = (struct qrpc_clnt_raw_priv *)cl->cl_private;
*errp = priv->rpc_error;
}
bool_t qrpc_clnt_raw_freeres(CLIENT *cl, xdrproc_t xdr_res, caddr_t res_ptr)
{
return FALSE;
}
void qrpc_clnt_raw_destroy(CLIENT *cl)
{
struct qrpc_clnt_raw_priv *priv = (struct qrpc_clnt_raw_priv *)cl->cl_private;
if (priv) {
XDR_DESTROY(&priv->xdrs_out);
XDR_DESTROY(&priv->xdrs_in);
qrpc_clnt_raw_free_priv(priv);
}
free(cl);
}
bool_t qrpc_clnt_raw_control(CLIENT *cl, int request, char *info)
{
return FALSE;
}

Some files were not shown because too many files have changed in this diff Show More