lede/package/qat/firmware/quickassist-c2xxx/patches/0002-contig-mem-driver-backport.patch

5909 lines
192 KiB
Diff

--- a/quickassist/Makefile
+++ b/quickassist/Makefile
@@ -80,8 +80,11 @@ else
endif
#Paths to Top-Level Makefiles for each team####
+KBUILD_EXTRA_SYMBOLS += $(ICP_ROOT)/quickassist/utilities/libusdm_drv/Module.symvers
+export KBUILD_EXTRA_SYMBOLS
OSAL_PATH=$(ICP_ROOT)/quickassist/utilities/osal/
+CMN_MEM_PATH=$(ICP_ROOT)/quickassist/utilities/libusdm_drv/
HAL_PATH=$(ICP_ROOT)/quickassist/utilities/downloader/
HAL_LIB_PATH=$(ICP_ROOT)/quickassist/utilities/downloader/
QAT_FW_PATH=$(ICP_ROOT)/quickassist/lookaside/firmware/
@@ -121,6 +124,22 @@ install_scripts:
@cp $(CONFIG_PATH)/dh89xxcc_qa_dev0_single_accel.conf $(ICP_BUILD_OUTPUT)/;
@cp $(CONFIG_PATH)/c2xxx_qa_dev0.conf $(ICP_BUILD_OUTPUT)/;
+#
+# Common memory driver
+#
+
+#userspace common memory library
+cmn_user: clean output_dir lac_lib_dir
+ @echo ; echo 'Building common mem driver for user space';
+ @cd $(CMN_MEM_PATH) && $(MAKE) ARCH=$(ICP_ARCH_USER) ICP_ENV_DIR=$(ICP_TOP_ENV) OS=linux ICP_OS?=linux_2.6 ICP_OS_LEVEL=user_space CPM_UPSTREAM=1 cm_user;\
+ echo ; echo 'Copying Common mem library';
+ cp $(CMN_MEM_PATH)/libusdm_drv_s.so $(CMN_MEM_PATH)/libusdm_drv.a $(ICP_BUILD_OUTPUT)/;
+
+#common mem driver ko
+cmn_ko: clean output_dir
+ @echo ; echo 'Building usdm_drv.ko';
+ @cd $(CMN_MEM_PATH) && $(MAKE) ICP_ENV_DIR=$(ICP_TOP_ENV) OS=linux ICP_OS?=linux_2.6 ICP_OS_LEVEL=kernel_space ICP_QDM_IOMMU=1 CPM_UPSTREAM=1 cm_kernel
+ @cp $(CMN_MEM_PATH)/usdm_drv.ko $(ICP_BUILD_OUTPUT)
#libosal needed by hal and adf
libosal: output_dir lac_lib_dir
@@ -151,7 +170,7 @@ hal_ci: output_dir libosal_ci
@echo ; echo 'Copying HAL Binary to $(LAC_LIB_DIR)';
@cp $(HAL_LIB_PATH)/lib_linux_le/$(ICP_TOOLS_TARGET)/icp_ae_loader_kernel.a $(LAC_LIB_DIR)/
-adf: output_dir lac_lib_dir libosal hal
+adf: output_dir lac_lib_dir libosal hal cmn_ko
@echo ; echo 'Building ADF';
@cd $(ADF_PATH) && export ADF_PLATFORM=ACCELDEV && export ICP_ENV_DIR=$(ICP_TOP_ENV) export ONE_KO_RELEASE_PACKAGE=1 && ICP_OS_LEVEL=kernel_space && $(MAKE);
cp $(ADF_PATH)/build/linux_2.6/libadf.a $(LAC_LIB_DIR)/;
@@ -179,7 +198,7 @@ adfvf: output_dir lac_lib_dir libosalvf
@cd $(ADF_PATH) && export ADF_PLATFORM=ACCELDEVVF && export ICP_ENV_DIR=$(ICP_TOP_ENV) export ONE_KO_RELEASE_PACKAGE=1 && ICP_OS_LEVEL=kernel_space && $(MAKE);
cp $(ADF_PATH)/build/linux_2.6/libadf.a $(LAC_LIB_DIR)/;
-adf_user: output_dir lac_lib_dir libosal_user
+adf_user: output_dir lac_lib_dir libosal_user cmn_user
@echo ; echo 'Building user ADF';
@cd $(ADF_PATH) && export ADF_PLATFORM=ACCELDEV && export ICP_ENV_DIR=$(ICP_TOP_ENV) && export ONE_KO_RELEASE_PACKAGE=1 && ICP_OS_LEVEL=user_space && $(MAKE) ARCH=$(ICP_ARCH_USER) adf_user;
cp $(ADF_PATH)/build/linux_2.6/libadf_proxy.a $(ICP_BUILD_OUTPUT)/;
--- /dev/null
+++ b/quickassist/utilities/libusdm_drv/Makefile
@@ -0,0 +1,142 @@
+#########################################################################
+#
+# @par
+# This file is provided under a dual BSD/GPLv2 license. When using or
+# redistributing this file, you may do so under either license.
+#
+# GPL LICENSE SUMMARY
+#
+# Copyright(c) 2007-2019 Intel Corporation. All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of version 2 of the GNU General Public License as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+# The full GNU General Public License is included in this distribution
+# in the file called LICENSE.GPL.
+#
+# Contact Information:
+# Intel Corporation
+#
+# BSD LICENSE
+#
+# Copyright(c) 2007-2019 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+#
+# version: QAT1.7.L.4.7.0-00006
+############################################################################
+
+
+####################Common variables and definitions########################
+export BASE_NAME = usdm_drv
+export OSTYPE=$(shell uname -s)
+
+ifeq ($(OSTYPE),FreeBSD)
+ICP_OS := freebsd
+OS := freebsd
+export MAKE := make
+DMESG := dmesg -c
+FBSD_VERSION := $(shell uname -r | cut -d'-' -f1,2)
+ifeq ($(FBSD_VERSION),8.4-RELEASE)
+DMESG := dmesg && sudo sysctl kern.msgbuf_clear=1
+endif
+else
+ICP_OS?=linux_2.6
+OS?=linux
+ICP_OS_LEVEL?=user_space
+DMESG := dmesg -C
+endif
+
+
+all: cm_user cm_kernel
+ @echo ; echo "Build Completed";
+cm_user:
+ifneq ($(OSTYPE),FreeBSD)
+ @cd $(OS) && \
+ $(MAKE) clean ICP_OS_LEVEL=user_space;
+ @cd $(OS) && \
+ $(MAKE) ICP_OS_LEVEL=user_space lib_shared UT=$(UT) BE=$(BE);
+ @cd $(OS) && \
+ $(MAKE) ICP_OS_LEVEL=user_space lib_static UT=$(UT) BE=$(BE);
+ @cp $(OS)/build/$(ICP_OS)/user_space/lib$(BASE_NAME)_s.so lib$(BASE_NAME)_s.so ;
+ @cp $(OS)/build/$(ICP_OS)/user_space/lib$(BASE_NAME).a lib$(BASE_NAME).a ;
+else
+ @cd $(ICP_OS) && \
+ make clean ICP_OS_LEVEL=user_space;
+ @cd $(ICP_OS) && \
+ make ICP_OS_LEVEL=user_space;
+ @cp $(ICP_OS)/user_space/libusdm_drv.so.0 libusdm_drv_s.so ;
+ @cp $(ICP_OS)/user_space/libusdm_drv.a libusdm_drv.a ;
+endif
+
+cm_kernel:
+ifneq ($(OSTYPE),FreeBSD)
+ @echo $(ICP_BUILDSYSTEM_PATH)
+ @cd $(OS) && \
+ $(MAKE) clean ICP_OS_LEVEL=kernel_space && \
+ $(MAKE) ICP_OS_LEVEL=kernel_space UT=$(UT) BE=$(BE);
+ @mv $(OS)/build/$(ICP_OS)/kernel_space/$(BASE_NAME).a $(OS)/build/$(ICP_OS)/kernel_space/lib$(BASE_NAME).a ;
+ @cp linux/Module.symvers Module.symvers;
+ @cp $(OS)/build/$(ICP_OS)/kernel_space/$(BASE_NAME).ko $(BASE_NAME).ko;
+else
+ @cd $(ICP_OS) && \
+ make clean ICP_OS_LEVEL=kernel_space && \
+ make ICP_OS_LEVEL=kernel_space UT=$(UT);
+ @cp $(ICP_OS)/kernel_space/usdm_drv.ko usdm_drv.ko;
+endif
+
+bsd_kernel:
+ @cd $(ICP_OS) && \
+ make clean ICP_OS_LEVEL=kernel_space && \
+ make ICP_OS_LEVEL=kernel_space UT=$(UT);
+ @cp $(ICP_OS)/kernel_space/$(BASE_NAME).ko $(BASE_NAME).ko;
+
+
+clean:
+ rm -f *.a *.so *.ko
+ @cd $(OS) && \
+ rm -f kernel_space/.*.cmd && \
+ rm -f kernel_space/.depend* && \
+ rm -f user_space/.depend* && \
+ $(MAKE) ICP_OS_LEVEL=user_space clean && \
+ $(MAKE) ICP_OS_LEVEL=kernel_space clean
+
+doxygen:
+ @doxygen qae_mem.h > doxygen_output.txt 2>&1;
+ @echo "The doxygen file is available at $(PWD)/html/index.html"
+ @echo "The doxygen command output is available at $(PWD)/doxygen_output.txt"
--- /dev/null
+++ b/quickassist/utilities/libusdm_drv/linux/Makefile
@@ -0,0 +1,197 @@
+#########################################################################
+#
+# @par
+# This file is provided under a dual BSD/GPLv2 license. When using or
+# redistributing this file, you may do so under either license.
+#
+# GPL LICENSE SUMMARY
+#
+# Copyright(c) 2007-2019 Intel Corporation. All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of version 2 of the GNU General Public License as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+# The full GNU General Public License is included in this distribution
+# in the file called LICENSE.GPL.
+#
+# Contact Information:
+# Intel Corporation
+#
+# BSD LICENSE
+#
+# Copyright(c) 2007-2019 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+#
+# version: QAT1.7.L.4.7.0-00006
+############################################################################
+
+
+####################Common variables and definitions########################
+ICP_OS?=linux_2.6
+OS?=linux
+ICP_OS_LEVEL?=user_space
+CMN_ROOT?=$(ICP_ROOT)/quickassist/utilities/libusdm_drv
+QAT_DRV?=$(ICP_ROOT)/quickassist/qat
+
+#include the makefile with all the default and common Make variable definitions
+include $(ICP_BUILDSYSTEM_PATH)/build_files/common.mk
+
+# List of Source Files to be compiled (to be in a single line or on different lines separated by a "\" and tab.
+
+ifeq ($(ICP_OS_LEVEL),user_space)
+SOURCES:= $(ICP_OS_LEVEL)/qae_mem_utils.c $(ICP_OS_LEVEL)/qae_mem_hugepage_utils.c
+else
+MODULE_SOURCES:= $(ICP_OS_LEVEL)/qae_mem_drv.c
+SOURCES:=$(ICP_OS_LEVEL)/qae_mem_utils.c
+endif
+
+ifeq ($(UT),1)
+MODULE_SOURCES+= ../test/$(OS)/$(ICP_OS_LEVEL)/qae_mem_drv_utils.c $(ICP_OS_LEVEL)/qdm.c
+else
+MODULE_SOURCES+= $(ICP_OS_LEVEL)/qae_mem_drv_utils.c $(ICP_OS_LEVEL)/qdm.c
+endif
+
+INCLUDES += -I$(CMN_ROOT)
+
+INCLUDES += -I$(CMN_ROOT)/$(OS)/include
+
+ifeq ($(ICP_ADF_IOMMU), 1)
+EXTRA_CFLAGS += -DICP_ADF_IOMMU
+KBUILD_EXTRA_SYMBOLS+=$(QAT_DRV)/src/Module.symvers
+export KBUILD_EXTRA_SYMBOLS
+endif
+
+ifeq ($(ICP_OSAL_IOMMU), 1)
+EXTRA_CFLAGS += -DICP_OSAL_IOMMU
+KBUILD_EXTRA_SYMBOLS+=$(QAT_DRV)/src/Module.symvers
+export KBUILD_EXTRA_SYMBOLS
+endif
+
+ifeq ($(ICP_QDM_IOMMU), 1)
+EXTRA_CFLAGS += -DICP_QDM_IOMMU
+KBUILD_EXTRA_SYMBOLS+=$(QAT_DRV)/Module.symvers
+export KBUILD_EXTRA_SYMBOLS
+endif
+
+ifdef QAE_USE_128K_SLABS
+EXTRA_CFLAGS+=-DQAE_NUM_PAGES_PER_ALLOC=32
+endif
+
+ifeq ($(ICP_OS_LEVEL),user_space)
+EXTRA_CFLAGS += -DUSER_SPACE
+EXTRA_CFLAGS += -Wextra -Werror -Wno-missing-field-initializers
+ifdef ICP_X86
+EXTRA_CFLAGS += -m32 -D_FILE_OFFSET_BITS=64
+LIB_SHARED_FLAGS += -m elf_i386
+endif
+ifdef ICP_DISABLE_SECURE_MEM_FREE
+EXTRA_CFLAGS += -DICP_DISABLE_SECURE_MEM_FREE
+endif
+ifdef ICP_WITHOUT_THREAD
+EXTRA_CFLAGS += -DICP_WITHOUT_THREAD
+endif
+else
+
+EXTRA_CFLAGS += -DKERNEL_SPACE
+ifeq ($(BE) ,1)
+KBUILD_EXTRA_SYMBOLS+=$(BE_DIR)/run/linuxKernel/Module.symvers
+export KBUILD_EXTRA_SYMBOLS
+endif
+ifdef ICP_NO_PROC_SUPPORT
+EXTRA_CFLAGS += -DICP_NO_PROC_SUPPORT
+endif
+
+# Check for defense with stack protection in kernel
+ifeq ($(KERNEL_DEFENSES_STACK_PROTECTION), n)
+STACK_PROTECTION=-fstack-protector -fstack-protector-strong
+EXTRA_CFLAGS := $(filter-out $(STACK_PROTECTION), $(EXTRA_CFLAGS))
+endif
+endif
+
+ifeq ($(ICP_OS_LEVEL),user_space)
+#include os dependent rules
+include $(ICP_ENV_DIR)/$(ICP_OS)_$(ICP_OS_LEVEL).mk
+lib: lib_shared
+all: lib_shared
+OUTPUT_NAME=lib$(BASE_NAME)
+export OUTPUT_NAME
+else
+
+OUTPUT_NAME=$(BASE_NAME)
+export OUTPUT_NAME
+
+#kernel space rules here
+#produce two artefacts: module and static library and copy them
+ifeq ($(OS),linux)
+EXTRA_CFLAGS+=-I$(INCLUDES) -Werror -ftree-ter
+obj-m+=$(OUTPUT_NAME).o
+$(OUTPUT_NAME)-objs :=$(patsubst %.c,%.o, $(MODULE_SOURCES)) $(ADDITIONAL_KERNEL_LIBS)
+lib-m := $(patsubst %.c,%.o, $(SOURCES)) $(patsubst %.S,%.o, $(ASM_SOURCES))
+$(LIB_STATIC): dirs
+ @echo 'Creating static library ${LIB_STATIC}'; \
+ $(MAKE) -C $(KERNEL_SOURCE_ROOT)/ M=$(PWD) obj-m=""; \
+ echo 'Copying outputs';\
+ mv -f $(OBJ) $($(PROG_ACY)_FINAL_OUTPUT_DIR);\
+ test -f built-in.o && mv -f built-in.o $($(PROG_ACY)_FINAL_OUTPUT_DIR);\
+ test -f lib.a && mv lib.a $($(PROG_ACY)_FINAL_OUTPUT_DIR)/$(LIB_STATIC);\
+ test -f $(OUTPUT_NAME).ko && mv -f $(OUTPUT_NAME).ko $($(PROG_ACY)_FINAL_OUTPUT_DIR);\
+ test -f $(OUTPUT_NAME).o && mv -f $(OUTPUT_NAME).o $($(PROG_ACY)_FINAL_OUTPUT_DIR);\
+ $(RM) -rf *.mod.* .*.cmd;
+
+$(MODULENAME): dirs
+ @echo 'Creating kernel module'; \
+ $(MAKE) -C $(KERNEL_SOURCE_ROOT)/ M=$(PWD); \
+ echo 'Copying outputs';\
+ mv -f $(OBJ) $($(PROG_ACY)_FINAL_OUTPUT_DIR);\
+ test -f built-in.o && mv -f built-in.o $($(PROG_ACY)_FINAL_OUTPUT_DIR);\
+ test -f lib.a && mv lib.a $($(PROG_ACY)_FINAL_OUTPUT_DIR)/$(LIB_STATIC);\
+ test -f $(OUTPUT_NAME).ko && mv -f $(OUTPUT_NAME).ko $($(PROG_ACY)_FINAL_OUTPUT_DIR);\
+ test -f $(OUTPUT_NAME).o && mv -f $(OUTPUT_NAME).o $($(PROG_ACY)_FINAL_OUTPUT_DIR);\
+ $(RM) -rf *.mod.* .*.cmd;
+else
+include $(ICP_ENV_DIR)/$(ICP_OS)_$(ICP_OS_LEVEL).mk
+endif
+
+all: module
+install: module
+endif
+###################Include rules makefiles########################
+include $(ICP_BUILDSYSTEM_PATH)/build_files/rules.mk
+
+###################End of Rules inclusion#########################
--- /dev/null
+++ b/quickassist/utilities/libusdm_drv/linux/include/qae_mem_utils.h
@@ -0,0 +1,757 @@
+/***************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007-2019 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Corporation
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2007-2019 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * version: QAT1.7.L.4.7.0-00006
+ *
+ ***************************************************************************/
+/**
+*****************************************************************************
+* @file qae_mem_utils.h
+*
+* This file provides linux kernel memory allocation for quick assist API
+*
+*****************************************************************************/
+#ifndef QAE_MEM_UTILS_H_
+#define QAE_MEM_UTILS_H_
+#if defined(__KERNEL__)
+#ifdef __FreeBSD__
+#include <sys/malloc.h>
+#include <sys/sysctl.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+MALLOC_DECLARE(M_QAE_MEM);
+#else
+#include <asm/page.h>
+#include <linux/io.h>
+#include <linux/version.h>
+
+#if (KERNEL_VERSION(2, 6, 38) >= LINUX_VERSION_CODE)
+#define kstrtoll strict_strtoll
+#endif /* KERNEL_VERSION */
+#endif /* OS selection */
+#endif /* __KERNEL__ */
+
+#define USDM_MOD "usdm_drv: "
+
+#define mm_err(...) pr_err(USDM_MOD __VA_ARGS__)
+
+#define mm_info(...) pr_info(USDM_MOD __VA_ARGS__)
+
+#define mm_warning(...) pr_warning(USDM_MOD __VA_ARGS__)
+
+/*define types which need to vary between 32 and 64 bit*/
+#define QAE_PAGE_SHIFT 12
+#define QAE_PAGE_SIZE (1UL << QAE_PAGE_SHIFT)
+
+/* QAE_NUM_PAGES_PER_ALLOC can be defined as 32 pages when library
+is built, default is 512 */
+#ifndef QAE_NUM_PAGES_PER_ALLOC
+#define QAE_NUM_PAGES_PER_ALLOC 512
+#endif
+
+#define STATIC static
+
+#define QAE_PHYS_ADDR uint64_t
+
+#define QAE_MEM_ZALLOC_GEN(size) kzalloc(size, GFP_KERNEL)
+#define QAE_MEM_FREE(ptr) \
+ do \
+ { \
+ if (ptr) \
+ { \
+ kfree(ptr); \
+ ptr = NULL; \
+ } \
+ } while (0)
+/**
+ *****************************************************************************
+ * @ingroup perfCodeFramework
+ * Framework aligned memory structure.
+ * @description
+ * This structure is used to assist the framework in allocating aligned
+ * memory
+ ****************************************************************************/
+typedef struct qae_mem_alloc_info_s
+{
+ void *mAllocMemPtr; /* memory addr returned by the kernel */
+ size_t mSize; /* allocated size */
+
+} qae_mem_alloc_info_t;
+
+enum slabType
+{
+ SMALL = 0,
+ LARGE = 1,
+ HUGE_PAGE = 2,
+};
+
+/* User space memory information structure. */
+typedef struct dev_mem_info_s
+{
+ int64_t nodeId; /* shared b/w user/kernel */
+ /* Node id for NUMA */
+ uint64_t size; /* shared b/w user/kernel */
+ /* Size of this block (bytes) */
+ enum slabType type;
+ /* Slab for normal memory or large memory */
+ uint32_t allocations; /* user space only */
+ /* Huge page file descriptor */
+ int64_t hpg_fd; /* user space only */
+ /* The huge page file descriptor of each slab */
+ uint64_t phy_addr; /* shared b/w user/kernel */
+ /* Physical address of the kmalloced area */
+ union {
+ void *virt_addr; /* user space only */
+ uint64_t padding_virt;
+ };
+ /* Base address in user space - i.e. virtual address */
+ union {
+ struct dev_mem_info_s *pPrev_user; /* user space only */
+ uint64_t padding_prevu;
+ };
+ union {
+ struct dev_mem_info_s *pNext_user; /* user space only */
+ uint64_t padding_nextu;
+ };
+ union {
+ struct dev_mem_info_s *pPrev_user_hash; /* user space only */
+ uint64_t padding_prevuh;
+ };
+ union {
+ struct dev_mem_info_s *pNext_user_hash; /* user space only */
+ uint64_t padding_nextuh;
+ };
+} dev_mem_info_t;
+
+/* Kernel space memory information structure. */
+typedef struct kdev_mem_info_s
+{
+ void *kmalloc_ptr; /* kernel space only (small slab) */
+ /* Pointer to mem originally returned by kmalloc */
+ void *huge_mem_ctrl;
+ uint64_t size;
+ /* Slab size */
+ uint64_t phy_addr; /* shared b/w user/kernel */
+ /* Physical address of the kmalloc'ed area */
+ struct kdev_mem_info_s *pPrev_kernel;
+ struct kdev_mem_info_s *pNext_kernel;
+ struct kdev_mem_info_s *pPrev_kernel_hash;
+ struct kdev_mem_info_s *pNext_kernel_hash;
+} kdev_mem_info_t;
+
+typedef struct user_page_info_s
+{
+ /* Use 64-bit unsigned to support 32bit application on
+ * a 64-bit kernel */
+ uint64_t virt_addr;
+ /* physical address shared b/w user/kernel */
+ uint64_t phy_addr;
+} user_page_info_t;
+
+/* size of allocation unit */
+#define UNIT_SIZE 1024
+#define QAE_KBYTE 1024
+#define QWORD_WIDTH (8 * sizeof(uint64_t))
+#define QWORD_ALL_ONE 0xFFFFFFFFFFFFFFFFULL
+
+/*
+Bitmap is used to keep track the allocation of each block
+Each 1k block is represented by one bit allocated(1)/free(0)
+BITMAP_LEN is a macro the represents the number of 64-bit quad words
+that make up the bitmap
+with 512 pages of 4k page and 1k units this value is 32
+ */
+#define CHUNK_SIZE (UNIT_SIZE * QWORD_WIDTH)
+
+#define BITMAP_LEN (QAE_NUM_PAGES_PER_ALLOC * QAE_PAGE_SIZE / CHUNK_SIZE)
+
+#define BLOCK_SIZES (BITMAP_LEN * QWORD_WIDTH)
+
+/*block control structure */
+typedef struct block_ctrl_s
+{
+ dev_mem_info_t mem_info; /* memory device info type */
+ /* adding an extra element at the end to make a barrier */
+ uint64_t bitmap[BITMAP_LEN + 1]; /* bitmap each bit represents a 1k block */
+ uint16_t sizes[BLOCK_SIZES]; /* Holds the size of each allocated block */
+} block_ctrl_t;
+
+/**
+ *****************************************************************************
+ * @ingroup qaeMemUtils
+ * array structure
+ * @description
+ * This structure is used to copy chunks of data read from files
+ * from user to kernel space
+ ****************************************************************************/
+typedef struct dev_mem_file_s
+{
+ unsigned char data[2048];
+ unsigned int size;
+} dev_mem_file_t;
+
+/**
+ *****************************************************************************
+ * @ingroup qaeMemUtils
+ * user space memory list pointer structure.
+ * @description
+ * This structure is used to assist in allocating aligned
+ * memory
+ ****************************************************************************/
+typedef struct user_proc_mem_list_s
+{
+ int pid;
+ uint64_t allocs_nr;
+ uint64_t hugepages_nr;
+ kdev_mem_info_t *head;
+ kdev_mem_info_t *tail;
+ struct user_proc_mem_list_s *pPrev;
+ struct user_proc_mem_list_s *pNext;
+} user_proc_mem_list_t;
+/**
+ *****************************************************************************
+ * @ingroup qaeMemUtils
+ * user space memory list pointer structure.
+ * @description
+ * This structure is used to assist in allocating aligned
+ * memory
+ ****************************************************************************/
+typedef struct user_mem_dev_s
+{
+ user_proc_mem_list_t *head;
+ user_proc_mem_list_t *tail;
+} user_mem_dev_t;
+
+/*
+ ******************************************************************************
+ * @ingroup ADD_ELEMENT_TO_HEAD_LIST
+ * insert element at the head of a linked list
+ * @description
+ * inserts a new element at the head of a
+ * double linked list in user or kernel mode
+ * depending on mode parameter if mode is an
+ * empty string kernel mode is used
+ * elementToAdd - ptr to the new element
+ * headPtr - ptr to the first element in list
+ * tailPtr - ptr to the last element int the list
+ * mode - _kernel,_user or empty
+ ******************************************************************************/
+
+#define ADD_ELEMENT_TO_HEAD_LIST(elementToAdd, headPtr, tailPtr, mode) \
+ do \
+ { \
+ elementToAdd->pPrev##mode = NULL; \
+ if (NULL == headPtr) \
+ { \
+ tailPtr = elementToAdd; \
+ elementToAdd->pNext##mode = NULL; \
+ } \
+ else \
+ { \
+ elementToAdd->pNext##mode = headPtr; \
+ headPtr->pPrev##mode = elementToAdd; \
+ } \
+ headPtr = elementToAdd; \
+ } while (0)
+
+/*
+ ******************************************************************************
+ * @ingroup ADD_ELEMENT_TO_END_LIST
+ * insert element at the end of a linked list
+ * @description
+ * inserts a new element at the head of a
+ * double linked list in user or kernel mode
+ * depending on mode parameter if mode is an
+ * empty string kernel mode is used
+ * elementToAdd - ptr to the new element
+ * headPtr - ptr to the first element in list
+ * tailPtr - ptr to the last element int the list
+ * mode - _kernel,_user or empty
+ ******************************************************************************/
+
+#define ADD_ELEMENT_TO_END_LIST(elementToAdd, headPtr, tailPtr, mode) \
+ do \
+ { \
+ elementToAdd->pNext##mode = NULL; \
+ if (NULL == tailPtr) \
+ { \
+ headPtr = elementToAdd; \
+ elementToAdd->pPrev##mode = NULL; \
+ } \
+ else \
+ { \
+ elementToAdd->pPrev##mode = tailPtr; \
+ tailPtr->pNext##mode = elementToAdd; \
+ } \
+ tailPtr = elementToAdd; \
+ } while (0)
+
+/*
+ ******************************************************************************
+ * @ingroup REMOVE_ELEMENT_FROM_LIST
+ * remove element at the end of a linked list
+ * @description
+ * removes an element from a
+ * double linked list in user or kernel mode
+ * depending on mode parameter if mode is an
+ * empty string kernel mode is used
+ * elementToREmove - ptr to the new element
+ * headPtr - ptr to the first element in list
+ * tailPtr - ptr to the last element int the list
+ * mode - _kernel,_user or empty
+ ******************************************************************************/
+
+#define REMOVE_ELEMENT_FROM_LIST(elementToRemove, headPtr, tailPtr, mode) \
+ do \
+ { \
+ if (NULL != elementToRemove->pPrev##mode) \
+ { \
+ elementToRemove->pPrev##mode->pNext##mode = \
+ elementToRemove->pNext##mode; \
+ if (NULL != elementToRemove->pNext##mode) \
+ { \
+ elementToRemove->pNext##mode->pPrev##mode = \
+ elementToRemove->pPrev##mode; \
+ } \
+ else \
+ { \
+ tailPtr = elementToRemove->pPrev##mode; \
+ } \
+ } \
+ else \
+ { \
+ if (NULL != elementToRemove->pNext##mode) \
+ { \
+ elementToRemove->pNext##mode->pPrev##mode = NULL; \
+ headPtr = elementToRemove->pNext##mode; \
+ } \
+ else \
+ { \
+ headPtr = NULL; \
+ tailPtr = NULL; \
+ } \
+ } \
+ } while (0)
+
+/* IOCTL number for use between the kernel and the user space application */
+#define DEV_MEM_MAGIC 'q'
+#define DEV_MEM_CMD_MEMALLOC (0)
+#define DEV_MEM_CMD_MEMFREE (1)
+#define DEV_MEM_CMD_RELEASE (2)
+#define DEV_MEM_CMD_UNREGISTER (3)
+#define DEV_MEM_CMD_GET_NUM_HPT (4)
+#define DEV_MEM_CMD_GET_USER_PAGE (5)
+
+/* IOCTL commands for requesting kernel memory */
+#define DEV_MEM_IOC_MEMALLOC \
+ _IOWR(DEV_MEM_MAGIC, DEV_MEM_CMD_MEMALLOC, dev_mem_info_t)
+
+#define DEV_MEM_IOC_MEMFREE \
+ _IOWR(DEV_MEM_MAGIC, DEV_MEM_CMD_MEMFREE, dev_mem_info_t)
+
+#define DEV_MEM_IOC_RELEASE _IO(DEV_MEM_MAGIC, DEV_MEM_CMD_RELEASE)
+
+#define DEV_MEM_IOC_UNREGISTER \
+ _IOWR(DEV_MEM_MAGIC, DEV_MEM_CMD_UNREGISTER, dev_mem_info_t)
+
+#define DEV_MEM_IOC_GET_NUM_HPT \
+ _IOWR(DEV_MEM_MAGIC, DEV_MEM_CMD_GET_NUM_HPT, uint32_t)
+
+#define DEV_MEM_IOC_GET_USER_PAGE \
+ _IOWR(DEV_MEM_MAGIC, DEV_MEM_CMD_GET_USER_PAGE, user_page_info_t)
+
+/*****************************************************************************
+ * * @ingroup CommonMemoryDriver
+ * qaeMemInit
+ *
+ * @description
+ * Initialize the user-space allocator, opening the device driver
+ * used to communicate with the kernel-space.
+ *
+ * @param[in] path - path to the specific device
+ *
+ * @retval 0 if the open of the device was successful and
+ * non-zero otherwise
+ * @pre
+ * none
+ * @post
+ * Allocator is initialized
+ *
+ ****************************************************************************/
+int32_t qaeMemInit(void);
+
+/*****************************************************************************
+ * * @ingroup CommonMemoryDriver
+ * qaeMemDestroy
+ *
+ * @description
+ * Release the user-space allocator. It closes the file descriptor
+ * associated with the device driver
+ *
+ * @param[in] none
+ *
+ * @retval none
+ *
+ * @pre
+ * The user space allocator is initialized using qaeMemInit
+ * @post
+ * The user-space allocator is released
+ *
+ ****************************************************************************/
+void qaeMemDestroy(void);
+
+/*****************************************************************************
+ * * @ingroup CommonMemoryDriver
+ * qaeIOMMUInit
+ *
+ * @description
+ * Function creates iommu domain. Applicable when IOMMU is enabled
+ *
+ * @param[in] none
+ *
+ * @retval 0 - if successful.
+ * non-zero - otherwise
+ *
+ * @pre
+ * IOMMU is enabled.
+ * @post
+ * iommu domain created
+ *
+ ****************************************************************************/
+int32_t qaeIOMMUInit(void);
+
+/*****************************************************************************
+ * * @ingroup CommonMemoryDriver
+ * qaeIOMMUExit
+ *
+ * @description
+ * Function removes iommu domain. Applicable when IOMMU is enabled
+ *
+ * @param[in] none
+ *
+ * @retval none
+ *
+ * @pre
+ * IOMMU is enabled and an iommu domain is created using qaeIOMMUInit
+ * @post
+ * iommu domain removed
+ *
+ ****************************************************************************/
+void qaeIOMMUExit(void);
+
+/*****************************************************************************
+ * * @ingroup CommonMemoryDriver
+ * qaeIOMMUgetRemappingSize
+ *
+ * @description
+ * Function calculates size for remapping when IOMMU is enabled.
+ * Before calling any of the qaeMemAlloc functions, this function can be
+ * used to calculate the actual size of memory to be allocated.
+ * The remapping size is at least PAGE_SIZE.
+ *
+ * @param[in] size - Actual size of the memory to be allocated
+ *
+ * @retval Remapping size
+ *
+ * @pre
+ * IOMMU is enabled and an iommu domain is created using qaeIOMMUInit.
+ * @post
+ * Remapping size provided.
+ *
+ ****************************************************************************/
+size_t qaeIOMMUgetRemappingSize(size_t size);
+
+/*****************************************************************************
+ * * @ingroup CommonMemoryDriver
+ * qaeIOMMUMap
+ *
+ * @description
+ * Function adds mapping from io virtual address to a physical address.
+ * Applicable when IOMMU is enabled
+ *
+ * @param[in] phaddr - Host physical address.
+ * @param[in] iova - IO virtual address.
+ * @param[in] size - Memory size to be remapped obtained from
+ * qaeIOMMUgetRemappingSize() function.
+ *
+ * @retval CPA_STATUS_SUCCESS - if successful.
+ * CPA_STATUS_UNSUPPORTED - if not supported
+ * CPA_STATUS_FAIL - otherwise
+ *
+ * @pre
+ * An iommu domain is created using qaeIOMMUInit. iova points to
+ * previously allocated memory. phaddr is already obtained using
+ * iova using virt_to_phys or similar functions. size is calculated
+ * using qaeIOMMUgetRemappingSize function.
+ * @post
+ * IO virtual address mapped
+ ****************************************************************************/
+int32_t qaeIOMMUMap(uint64_t phaddr, uint64_t iova, size_t size);
+
+/*****************************************************************************
+ * * @ingroup CommonMemoryDriver
+ * qaeIOMMUUnmap
+ *
+ * @description
+ * Function removes mapping from io virtual address to a physical
+ * address. Applicable when IOMMU is enabled
+ *
+ * @param[in] iova - IO virtual address.
+ * @param[in] size - Memory size to be unmapped
+ *
+ * @retval CPA_STATUS_SUCCESS - if successful.
+ * CPA_STATUS_UNSUPPORTED - if not supported
+ * CPA_STATUS_FAIL - otherwise
+ *
+ * @pre
+ * An iommu domain is created using qaeIOMMUInit. iova points to
+ * previously allocated memory.
+ * @post
+ * IO virtual address unmapped
+ ****************************************************************************/
+int32_t qaeIOMMUUnmap(uint64_t iova, size_t size);
+
+/*****************************************************************************
+ * * @ingroup CommonMemoryDriver
+ * qaeIOMMUVirtToPhys
+ *
+ * @description
+ * Function translates io virtual address to a physical address.
+ * Applicable when IOMMU is enabled.
+ *
+ * @param[in] iova, IO virtual address
+ *
+ * @retval host physical address - if successful
+ * NULL Otherwise
+ *
+ * @pre
+ * An iommu domain is created using qaeIOMMUInit. iova points to
+ * previously allocated memory.
+ * @post
+ * virtual address is translated to physical address
+ *
+ ****************************************************************************/
+uint64_t qaeIOMMUVirtToPhys(uint64_t iova);
+
+/*****************************************************************************
+ * * @ingroup CommonMemoryDriver
+ * qaeIOMMUAttachDev
+ *
+ * @description
+ * This function attaches a pci dev (VF) to an iommu domain.
+ * Applicable when IOMMU/SRIOV are enabled and after the driver bringup
+ * in Host is succesful.
+ *
+ * @param[in] dev, Device to be attached
+ *
+ * @retval CPA_STATUS_SUCCESS - if successful
+ * CPA_STATUS_UNSUPPORTED - if not supported
+ * CPA_STATUS_FAIL - otherwise
+ *
+ * @pre
+ * An iommu domain is created using qaeIOMMUInit. Driver bringup
+ * in Host is succesful.
+ * @post
+ * device is attached
+ *
+ ****************************************************************************/
+int32_t qaeIOMMUAttachDev(void *dev);
+
+/*****************************************************************************
+ * * @ingroup CommonMemoryDriver
+ * qaeIOMMUDetachDev
+ *
+ * @description
+ * Function detaches pci dev to iommu domain
+ *
+ * @param[in] dev, Device to be detached
+ *
+ * @retval none
+ *
+ * @pre
+ * An iommu domain is created using qaeIOMMUInit, Driver bringup
+ * in Host is succesful and dev is already
+ * attached using qaeIOMMUAttachDev
+ * @post
+ * Device is detached
+ *
+ ****************************************************************************/
+void qaeIOMMUDetachDev(void *dev);
+
+/**
+ *****************************************************************************
+ * @ingroup CommonMemoryDriver
+ * printMemAllocations
+ *
+ * @description
+ * Prints only the overall count of NUMA and non-NUMA memory allocations
+ * performed. This doesn't provide other details like the allocation
+ * sizes, pointers etc.
+ *
+ * @retval none
+ *
+ * @pre
+ * The user space allocator is initialized using qaeMemInit
+ * @post
+ * memory allocation count printed
+ *
+ ****************************************************************************/
+void printMemAllocations(void);
+
+#ifndef __KERNEL__
+#ifdef ICP_WITHOUT_THREAD
+#define mem_mutex_lock(x) (0)
+#define mem_mutex_unlock(x) (0)
+#else
+#define mem_mutex_lock(x) pthread_mutex_lock(x)
+#define mem_mutex_unlock(x) pthread_mutex_unlock(x)
+#endif
+
+#define mem_ioctl(fd, cmd, pMemInfo) ioctl(fd, cmd, pMemInfo)
+#define qae_open(file, options) open(file, options)
+#define qae_lseek(fd, offset, whence) lseek(fd, offset, whence)
+#define qae_read(fd, buf, nbytes) read(fd, buf, nbytes)
+#define qae_mmap(addr, length, prot, flags, fd, offset) \
+ mmap(addr, length, prot, flags, fd, offset)
+#define qae_munmap(addr, length) munmap(addr, length)
+#define qae_madvise(addr, len, advice) madvise(addr, len, advice)
+#define qae_mkstemp(template) mkstemp(template)
+#endif
+
+#if defined(__KERNEL__)
+#if defined(ICP_ADF_IOMMU)
+int icp_adf_iommu_map(void *iova, void *phaddr, size_t size);
+int icp_adf_iommu_unmap(void *iova, size_t size);
+size_t icp_adf_iommu_get_remapping_size(size_t size);
+static inline int icp_iommu_map(void **iova, void *vaddr, size_t size)
+{
+ void *phaddr = (void *)virt_to_phys(vaddr);
+ *iova = phaddr;
+ return icp_adf_iommu_map(*iova, phaddr, size);
+}
+static inline int icp_iommu_unmap(void *iova, size_t size)
+{
+ return icp_adf_iommu_unmap(iova, size);
+}
+static inline size_t icp_iommu_get_remapping_size(size_t size)
+{
+ return icp_adf_iommu_get_remapping_size(size);
+}
+#elif defined(ICP_OSAL_IOMMU)
+int osalIOMMUMap(uint64_t iova, uint64_t phaddr, size_t size);
+static inline int icp_iommu_map(void **iova, void *vaddr, size_t size)
+{
+ void *phaddr = (void *)virt_to_phys(vaddr);
+ *iova = phaddr;
+ return osalIOMMUMap((uintptr_t)*iova, phaddr, size);
+}
+
+int osalIOMMUUnmap(uint64_t iova, size_t size);
+static inline int icp_iommu_unmap(void *iova, size_t size)
+{
+ return osalIOMMUUnmap((uintptr_t)iova, size);
+}
+uint64_t osalIOMMUVirtToPhys(uint64_t iova);
+static inline uint64_t icp_iommu_virt_to_phys(void *iova)
+{
+ return osalIOMMUVirtToPhys((uintptr_t)iova);
+}
+size_t osalIOMMUgetRemappingSize(size_t size);
+static inline size_t icp_iommu_get_remapping_size(size_t size)
+{
+ return osalIOMMUgetRemappingSize(size);
+}
+#elif defined(ICP_QDM_IOMMU)
+int qdm_iommu_map(void **iova, void *vaddr, size_t size);
+int qdm_iommu_unmap(void *iova, size_t size);
+static inline int icp_iommu_map(void **iova, void *vaddr, size_t size)
+{
+ return qdm_iommu_map(iova, vaddr, size);
+}
+static inline int icp_iommu_unmap(void *iova, size_t size)
+{
+ return qdm_iommu_unmap(iova, size);
+}
+static inline size_t icp_iommu_get_remapping_size(size_t size)
+{
+ return (size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
+}
+#else
+#define ICP_IOMMU_DISABLED
+static inline int icp_iommu_map(void **iova, void *vaddr, size_t size)
+{
+#ifdef __FreeBSD__
+ *iova = (void *)(uintptr_t)vtophys(vaddr);
+#else
+ *iova = (void *)(uintptr_t)virt_to_phys(vaddr);
+#endif
+ return 0;
+}
+
+static inline int icp_iommu_unmap(void *iova, size_t size)
+{
+ return 0;
+}
+static inline size_t icp_iommu_get_remapping_size(size_t size)
+{
+ return (size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
+}
+#endif
+#endif
+#endif
--- /dev/null
+++ b/quickassist/utilities/libusdm_drv/linux/include/qdm.h
@@ -0,0 +1,61 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ * Copyright(c) 2016 Intel Corporation.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Contact Information:
+ *
+ * qat-linux@intel.com
+ *
+ * BSD LICENSE
+ * Copyright(c) 2016 Intel Corporation.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __QDM_H__
+#define __QDM_H__
+#include <linux/types.h>
+
+struct device;
+
+int qdm_init(void);
+void qdm_exit(void);
+int qdm_attach_device(struct device *dev);
+int qdm_detach_device(struct device *dev);
+int qdm_iommu_map(dma_addr_t *iova, void *vaddr, size_t size);
+int qdm_iommu_unmap(dma_addr_t iova, size_t size);
+
+#endif /* __QDM_H__ */
--- /dev/null
+++ b/quickassist/utilities/libusdm_drv/linux/kernel_space/qae_mem_drv.c
@@ -0,0 +1,1590 @@
+/***************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007-2019 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Corporation
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2007-2019 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * version: QAT1.7.L.4.7.0-00006
+ *
+ ***************************************************************************/
+
+/**
+ *
+ * @file qae_mem_drv.c
+ *
+ * @brief Kernel-space support for user-space contiguous memory allocation
+ *
+ */
+
+#include <asm/io.h>
+#include <linux/cdev.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/nodemask.h>
+#include <linux/proc_fs.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <linux/version.h>
+#include <linux/hugetlb.h>
+
+#include "qae_mem_utils.h"
+
+#define DEV_MEM_NAME "usdm_drv"
+#define MODULE_NAME "USDM"
+#define DEV_MEM_MAJOR 0
+#define DEV_MEM_MAX_MINOR 1
+#define DEV_MEM_BASE_MINOR 0
+#define QAE_LOCAL_ENSURE(c, str, ret) \
+ if (!(c)) { \
+ mm_err("%s in file %s, ret = %d\n", \
+ str, __FILE__, ret); \
+ return ret; }
+
+#define FREE(ptr) kfree(ptr)
+#define IS_PAGE_ALIGNED(x) (PAGE_ALIGN((uintptr_t) (x)) == (uintptr_t) (x))
+
+/**
+******************************************************************************
+* @ingroup max_mem_numa
+* maximum amount of memory allocated in kernel space
+* @description
+* This is a command line parameter that defines the maximum
+* amount of memory allocated by the driver in kernel space.
+* Measured in kilobytes.
+*****************************************************************************/
+static uint32_t max_mem_numa = 0;
+/**
+******************************************************************************
+* @ingroup mem_allocated
+* amount of memory currently allocated in kernel space
+* @description
+* This variable holds the overall
+* amount of memory allocated by the driver in kernel space.
+* Measured in bytes.
+*****************************************************************************/
+static size_t mem_allocated = 0;
+/**
+******************************************************************************
+* @ingroup max_huge_pages
+* total number of huge pages currently reserved
+* @description
+* This variable holds the total number of
+* huge pages reserved by the memory driver.
+* Measured in number of huge pages.
+*****************************************************************************/
+static uint max_huge_pages = 0;
+/**
+******************************************************************************
+* @ingroup max_huge_pages_per_process
+* number of huge pages could be allocated
+* for each user space process
+* @description
+* This variable holds the number of
+* huge pages allocated by each process.
+* Measured in number of huge pages.
+*****************************************************************************/
+static uint max_huge_pages_per_process = 0;
+
+
+module_param(max_mem_numa, uint, S_IRUGO);
+MODULE_PARM_DESC(max_mem_numa,
+ "Maximum number of allocatable memory in 1k units");
+
+module_param(max_huge_pages, uint, S_IRUGO);
+MODULE_PARM_DESC(max_huge_pages,
+ "Maximum number of huge pages enabled for the module");
+
+module_param(max_huge_pages_per_process, uint, S_IRUGO);
+MODULE_PARM_DESC(max_huge_pages_per_process,
+ "Maximum number of huge pages enabled for each process");
+
+/* Version 0.7.1:
+ * - Slab caching in user space introduced;
+ * - Slab hash introduced for fast searching;
+ * - Performance optimizations;
+ * - Adding huge pages support.
+ */
+static const char VERSION_STRING[]="Version 0.7.1";
+
+static DEFINE_MUTEX(dev_mem_lock);
+static user_mem_dev_t *mem_dev_numa = NULL;
+
+/*directory entry structure for debug root directory and debug file*/
+static struct dentry *qae_dbg_root_dir = NULL;
+static struct dentry *qae_dbg_slabs_file = NULL;
+
+typedef struct chr_drv_info_s
+{
+ unsigned major;
+ unsigned min_minor;
+ unsigned max_minor;
+ char *name;
+ struct cdev drv_cdev;
+ struct class *drv_class;
+ struct device *drv_class_dev;
+ unsigned num_devices;
+ unsigned unregistered;
+} chr_drv_info_t;
+
+typedef struct {
+ kdev_mem_info_t *head;
+ kdev_mem_info_t *tail;
+} slab_list_t;
+/* Kernel space hash for fast slab searching */
+static slab_list_t g_slab_list[PAGE_SIZE] = {{0}};
+
+extern int handle_other_ioctls(uint32_t cmd);
+/******************************************************************************
+ * debug: /sys/kernel/debug/qae_mem_dbg directory
+ * qae_mem_slabs file
+ * cat qae_mem_slabs shows the allocated slabs for each process with the
+ * physical and virtual start address
+ * echo "d processid virt_addr" > qae_mem_slabs
+ * echo "d processid phys_addr" > qae_mem_slabs
+ * write dump command to debug file, the next cat qae_mem_slabs command
+ * shows the 256 byte from address in hex and ascii format
+ * echo "c processid slabid" > qae_mem_slabs
+ * write dump command to debug file, the next cat qae_mem_slabs command
+ * shows the 32 x 64 allocation bit map for small buffers allocations
+ ******************************************************************************/
+
+/*****************************************************************************
+ memory mgt code begin
+*****************************************************************************/
+
+static inline uint64_t get_key(const uint64_t phys)
+{
+ /* Use bits 20-31 of a physical address as a hash key.
+ * It provides a good distribution for 1Mb/2Mb slabs
+ * and a moderate distribution for 128Kb/256Kb/512Kb slabs.
+ */
+ return (phys >> 20) & ~PAGE_MASK;
+}
+
+static inline void add_slab_to_hash(kdev_mem_info_t *slab)
+{
+ const size_t key = get_key(slab->phy_addr);
+
+ ADD_ELEMENT_TO_HEAD_LIST(slab, g_slab_list[key].head,
+ g_slab_list[key].tail, _kernel_hash);
+}
+
+static inline void del_slab_from_hash(kdev_mem_info_t *slab)
+{
+ const size_t key = get_key(slab->phy_addr);
+
+ REMOVE_ELEMENT_FROM_LIST(slab, g_slab_list[key].head,
+ g_slab_list[key].tail, _kernel_hash);
+}
+
+static inline kdev_mem_info_t *find_slab(const uint64_t phy_addr)
+{
+ const size_t key = get_key(phy_addr);
+ kdev_mem_info_t *slab = g_slab_list[key].head;
+
+ while (slab)
+ {
+ if (phy_addr == slab->phy_addr)
+ return slab;
+ slab = slab->pNext_kernel_hash;
+ }
+
+ return NULL;
+}
+
+/*
+ * Find memory information
+ */
+static kdev_mem_info_t*
+userMemGetInfo(struct file* fp, uint64_t id)
+{
+ user_proc_mem_list_t* list = NULL;
+ if(!fp)
+ {
+ mm_err("%s:%d Invalid file pointer\n",__func__,__LINE__ );
+ return NULL;
+ }
+ list = (user_proc_mem_list_t*)fp->private_data;
+ if(!list)
+ {
+ mm_info("%s:%d empty list\n",__func__,__LINE__);
+ return NULL;
+ }
+ return find_slab(id);
+}
+/*
+ * Allocate numa memory
+ */
+static dev_mem_info_t *
+userMemAlloc(struct file* fp, size_t size,
+ int node, int large_memory)
+{
+ block_ctrl_t *block_ctrl = NULL;
+ dev_mem_info_t *mem_info = NULL;
+ kdev_mem_info_t *kmem = NULL;
+ user_proc_mem_list_t *list = NULL;
+ void *phy_addr = NULL;
+ size_t totalInKBytes = 0;
+
+ if(!size || !fp)
+ {
+ mm_err("%s:%d Invalid parameter value [%zu] %p\n",
+ __func__, __LINE__, size, fp);
+ return NULL;
+ }
+
+ if(node != NUMA_NO_NODE)
+ {
+ /* node 0.. (MAX_NUMNODES-1) */
+ if(node >= 0 && node < MAX_NUMNODES)
+ {
+ if(!node_online(node))
+ {
+ mm_err("%s:%d Requested node %d is not online. "
+ "Using node 0 as default\n",
+ __func__, __LINE__,node);
+ node = 0;
+ }
+ }
+ else
+ {
+ /*greater than MAX_NUMNODES */
+ mm_err("%s:%d Requested node %d not present. "
+ "Using node 0 as default\n",
+ __func__, __LINE__,node);
+ node = 0;
+ }
+ }
+
+ /*
+ * Find the process allocation list
+ */
+ list = (user_proc_mem_list_t*)fp->private_data;
+ if (!list)
+ {
+ mm_err("%s:%d User process memory list is NULL \n",__func__,__LINE__);
+ return NULL;
+ }
+
+ size = icp_iommu_get_remapping_size(size);
+ totalInKBytes = DIV_ROUND_UP(mem_allocated + size, QAE_KBYTE);
+
+ /* for request > 2M mem_info control block allocated separately */
+ if(large_memory)
+ {
+ /*one page is used for block control information*/
+ const uint32_t pageSizeInKb = PAGE_SIZE / QAE_KBYTE;
+ if ( max_mem_numa && max_mem_numa < (totalInKBytes + pageSizeInKb))
+ {
+ mm_err(KERN_ERR "%s:%d Maximum NUMA allocation of %u kB reached "
+ "currently allocated %zu bytes requested %zu bytes\n",
+ __func__,__LINE__,max_mem_numa,
+ mem_allocated , (size_t)(size + PAGE_SIZE) );
+ return NULL;
+ }
+ mem_info = (dev_mem_info_t*) get_zeroed_page(GFP_KERNEL);
+ if ( !mem_info )
+ {
+ mm_err("%s:%d Unable to allocate control block\n",
+ __func__,__LINE__);
+ return NULL;
+ }
+ kmem = kmalloc (sizeof(kdev_mem_info_t), GFP_KERNEL);
+ if ( !kmem )
+ {
+ mm_err("%s:%d Unable to allocate Kernel control block\n",
+ __func__,__LINE__);
+ free_page((unsigned long) mem_info);
+ return NULL;
+ }
+ /* kmalloc is faster than kzalloc */
+ kmem->kmalloc_ptr = kmalloc_node(size, GFP_KERNEL, node);
+ if (!kmem->kmalloc_ptr || !IS_PAGE_ALIGNED(kmem->kmalloc_ptr))
+ {
+ mm_err("%s:%d Unable to allocate memory slab size %zu"
+ " or wrong alignment: %p\n",
+ __func__, __LINE__, size, kmem->kmalloc_ptr);
+ FREE(kmem->kmalloc_ptr);
+ FREE(kmem);
+ free_page((unsigned long) mem_info);
+ return NULL;
+ }
+ /* Initialize the huge page control */
+ kmem->huge_mem_ctrl = mem_info;
+ /* Update slab size */
+ kmem->size = size;
+ /* Update allocated size */
+ mem_allocated += (size + PAGE_SIZE);
+ }
+ else
+ {
+ if ( max_mem_numa && max_mem_numa < totalInKBytes )
+ {
+ mm_err(KERN_ERR "%s:%d Maximum NUMA allocation of %u kB reached"
+ " currently allocated %zu bytes requested %zu bytes\n",
+ __func__, __LINE__, max_mem_numa,
+ mem_allocated, size);
+ return NULL;
+ }
+ block_ctrl = kmalloc_node(size, GFP_KERNEL, node);
+ if (!block_ctrl || !IS_PAGE_ALIGNED(block_ctrl))
+ {
+ mm_err("%s:%d Unable to allocate memory slab"
+ " or wrong alignment: %p\n",
+ __func__, __LINE__, block_ctrl);
+ FREE(block_ctrl);
+ return NULL;
+ }
+
+ kmem = kmalloc (sizeof(kdev_mem_info_t), GFP_KERNEL);
+ if ( !kmem )
+ {
+ mm_err("%s:%d Unable to allocate Kernel control block\n",
+ __func__,__LINE__);
+ FREE(block_ctrl);
+ return NULL;
+ }
+
+ /* It is faster to alloc a slab and memset it later vs. kZalloc_node. */
+ memset(block_ctrl, 0, sizeof(block_ctrl_t));
+ mem_info = &block_ctrl->mem_info;
+ kmem->kmalloc_ptr = block_ctrl;
+ /* Huge page control block not applicable here for small slabs. */
+ kmem->huge_mem_ctrl = NULL;
+ /* Update slab size */
+ kmem->size = size;
+ /* Update allocated size */
+ mem_allocated += size;
+ }
+ mem_info->nodeId = node;
+ mem_info->size = size;
+ mem_info->type = large_memory;
+#ifdef ICP_IOMMU_DISABLED
+ icp_iommu_map(&phy_addr, kmem->kmalloc_ptr, mem_info->size);
+#else
+ if (icp_iommu_map(&phy_addr, kmem->kmalloc_ptr, mem_info->size))
+ {
+ mm_err("%s:%d iommu map failed\n",__func__,__LINE__);
+ if( LARGE == mem_info->type )
+ {
+ free_page((unsigned long) mem_info);
+ mem_allocated -= PAGE_SIZE;
+ }
+ /* For small block size, kmalloc_ptr points to block_ctrl */
+ FREE(kmem->kmalloc_ptr);
+ FREE(kmem);
+ mem_allocated -= size;
+ return NULL;
+ }
+#endif
+ mem_info->phy_addr = (uintptr_t) phy_addr;
+ kmem->phy_addr = (uintptr_t) phy_addr;
+ list->allocs_nr++;
+ ADD_ELEMENT_TO_END_LIST(kmem, list->head, list->tail, _kernel);
+ add_slab_to_hash(kmem);
+
+ return mem_info;
+}
+/*
+ * Free slab
+ */
+static void
+free_slab(user_proc_mem_list_t *list, kdev_mem_info_t *slab,
+ const int cleanup)
+{
+ void *ptr = slab->kmalloc_ptr;
+ const size_t size = slab->size;
+
+ icp_iommu_unmap((void *) (uintptr_t) slab->phy_addr, size);
+ REMOVE_ELEMENT_FROM_LIST(slab, list->head, list->tail, _kernel);
+
+ del_slab_from_hash(slab);
+
+ /* If we are dealing with huge pages, then huge_mem_ctrl is not NULL
+ * and the slab can be freed.
+ */
+ if(slab->huge_mem_ctrl)
+ {
+ free_page((unsigned long) slab->huge_mem_ctrl);
+ mem_allocated -= PAGE_SIZE;
+ }
+
+ if (cleanup)
+ {
+ /* Cleanup released memory. */
+ memset(ptr, 0, size);
+ }
+ FREE(ptr);
+
+ /* Destroy the slab as it is no longer needed */
+ FREE(slab);
+
+ mem_allocated -= size;
+ list->allocs_nr -= 1;
+}
+/*
+ * Free memory
+ */
+static int
+userMemFree(struct file* fp, uint64_t id)
+{
+ user_proc_mem_list_t* list = NULL;
+ kdev_mem_info_t *kmem = NULL;
+
+ if (!fp)
+ {
+ mm_err("%s:%d Invalid file pointer\n",__func__,__LINE__);
+ return -EIO;
+ }
+ list = (user_proc_mem_list_t *)fp->private_data;
+ if(!list)
+ {
+ mm_warning("%s:%d No slab to free\n",__func__,__LINE__);
+ return -EIO;
+ }
+ kmem = find_slab(id);
+ if (kmem)
+ {
+ /* Free memory slab, no cleanup. */
+ free_slab(list, kmem, 0);
+ return 0;
+ }
+ mm_warning("%s:%d Could not find slab with id: %llu \n",
+ __func__,__LINE__,id);
+ return -EIO;
+}
+/*
+ * Clean all memory for a process
+ */
+static int
+userMemFreeSlabs(struct file* fp)
+{
+ kdev_mem_info_t* kmem = NULL, *next = NULL;
+ user_proc_mem_list_t* list = NULL;
+
+ if (!fp)
+ {
+ mm_err("%s:%d Invalid file pointer\n",__func__,__LINE__);
+ return -EIO;
+ }
+ list = (user_proc_mem_list_t*)fp->private_data;
+ if(!list)
+ {
+ mm_warning("%s:%d No slab to free\n",__func__,__LINE__);
+ return -EIO;
+ }
+ max_huge_pages += list->hugepages_nr;
+#ifdef ICP_DEBUG
+ mm_info("[FREE] pid: %u return number of pages: %llu total number: %u\n",
+ current->pid,
+ list->hugepages_nr,
+ max_huge_pages);
+#endif
+ list->hugepages_nr = 0;
+ kmem = list->head;
+ while(kmem)
+ {
+#ifdef ICP_DEBUG
+ mm_warning("%s:%d Potential memory leak, Process Id %d "
+ "Virtual address %p "
+ "Physical address %px has allocated block\n",
+ __func__,__LINE__,
+ list->pid,
+ kmem->kmalloc_ptr,
+ (void*)kmem->phy_addr);
+#endif
+ next = kmem->pNext_kernel;
+ /* Free and cleanup memory slab. */
+ free_slab(list, kmem, 1);
+ kmem = next;
+ }
+ return 0;
+}
+
+/*****************************************************************************
+ memory mgt code end
+*****************************************************************************/
+
+static int
+dev_mem_alloc(struct file* fp, uint32_t cmd, unsigned long arg)
+{
+ unsigned long ret = 0;
+ dev_mem_info_t* mem_info = NULL;
+ dev_mem_info_t user_mem_info = {0};
+
+ if( fp == NULL )
+ {
+ mm_err("%s:%d Invalid file descriptor\n",__func__,__LINE__);
+ return -EIO;
+ }
+ if( fp->private_data == NULL)
+ {
+ mm_err("%s:%d Invalid file private data \n",__func__,__LINE__);
+ return -EIO;
+ }
+ ret = copy_from_user(&user_mem_info,
+ (dev_mem_info_t *)arg,
+ sizeof(dev_mem_info_t));
+ if (unlikely(ret))
+ {
+ mm_err("%s:%d copy_from_user failed, ret=%lu\n",
+ __func__,__LINE__,ret);
+ return -EIO;
+ }
+ mem_info = userMemAlloc(fp, user_mem_info.size,
+ (int) user_mem_info.nodeId,
+ user_mem_info.type);
+ if (!mem_info)
+ {
+ mm_err("%s:%d userMemAlloc failed\n",__func__,__LINE__);
+ return -ENOMEM;
+ }
+ ret = copy_to_user((dev_mem_info_t *)arg,
+ mem_info,
+ sizeof(dev_mem_info_t));
+ if (unlikely(ret))
+ {
+ (void) userMemFree(fp, user_mem_info.phy_addr);
+ mm_err("%s:%d copy_to_user failed, ret=%lu\n",
+ __func__,__LINE__,ret);
+ return -EIO;
+ }
+ return 0;
+}
+
+static int
+dev_mem_free(struct file *fp, uint32_t cmd, unsigned long arg)
+{
+ unsigned long ret = 0;
+ dev_mem_info_t user_mem_info = {0};
+
+ if( fp == NULL )
+ {
+ mm_err("%s:%d Invalid file descriptor\n",__func__,__LINE__);
+ return -EIO;
+ }
+ if( fp->private_data == NULL)
+ {
+ mm_err("%s:%d Invalid file private data\n",__func__,__LINE__);
+ return -EIO;
+ }
+ ret = copy_from_user(&user_mem_info,
+ (dev_mem_info_t *)arg,
+ sizeof(dev_mem_info_t));
+ if (ret)
+ {
+ mm_err("%s:%d dev_mem_free: copy_from_user failed, ret=%lu\n",
+ __func__,__LINE__,ret);
+ return -EIO;
+ }
+ return userMemFree(fp, user_mem_info.phy_addr);
+}
+
+static int
+dev_release_pid(struct file *fp, uint32_t cmd, unsigned long arg)
+{
+ return userMemFreeSlabs(fp);
+}
+static int
+dev_get_user_page(struct file *fp, uint32_t cmd, unsigned long arg)
+{
+ unsigned long ret;
+ struct page *page;
+ int errno = 0;
+ user_page_info_t user_mem_info = {0};
+
+ if( fp == NULL )
+ {
+ mm_err("%s:%d Invalid file descriptor\n",__func__,__LINE__);
+ return -EIO;
+ }
+ if( fp->private_data == NULL)
+ {
+ mm_err("%s:%d Invalid file private data\n",__func__,__LINE__);
+ return -EIO;
+ }
+ ret = copy_from_user(&user_mem_info, (user_page_info_t *)arg,
+ sizeof(user_page_info_t));
+ if (ret)
+ {
+ mm_err("%s:%d dev_get_user_page: copy_from_user failed, ret=%lu\n",
+ __func__,__LINE__,ret);
+ return -EIO;
+ }
+
+ errno = get_user_pages_fast(
+ (unsigned long)user_mem_info.virt_addr, 1, 1, &page);
+ if ( errno != 1 )
+ {
+ user_mem_info.phy_addr = 0x00;
+ mm_err("%s:%d dev_get_user_page: get_user_pages_fast failed, ret=%d\n",
+ __func__,__LINE__,errno);
+ return -EIO;
+ }
+ else
+ {
+ if (PageHuge(page))
+ {
+ user_mem_info.phy_addr = page_to_phys(page);
+ }
+ else
+ {
+ user_mem_info.phy_addr = 0x00;
+ }
+
+ }
+ put_page(page);
+
+ ret = copy_to_user( (user_page_info_t *)arg, &user_mem_info,
+ sizeof(user_page_info_t));
+ if (ret)
+ {
+ mm_err("%s:%d dev_get_user_page: copy_to_user failed, ret=%lu\n",
+ __func__,__LINE__,ret);
+ return -EIO;
+ }
+ return 0;
+}
+
+static int
+dev_num_hp_get(struct file *fp, uint32_t cmd, unsigned long arg)
+{
+ unsigned long ret = 0;
+ uint actual_num_hugepages = 0;
+
+ if( fp == NULL )
+ {
+ mm_err("%s:%d Invalid file descriptor\n",__func__,__LINE__);
+ return -EIO;
+ }
+ if( fp->private_data == NULL)
+ {
+ mm_err("%s:%d Invalid file private data\n",__func__,__LINE__);
+ return -EIO;
+ }
+ actual_num_hugepages = min(max_huge_pages, max_huge_pages_per_process);
+ ret = copy_to_user((uint32_t *)arg,
+ &actual_num_hugepages,
+ sizeof(uint32_t));
+ if (ret)
+ {
+ mm_err("%s:%d dev_num_hp_get: copy_to_user failed, ret=%lu\n",
+ __func__,__LINE__,ret);
+ return -EIO;
+ }
+ max_huge_pages -= actual_num_hugepages;
+ ((user_proc_mem_list_t*)fp->private_data)->hugepages_nr =
+ actual_num_hugepages;
+#ifdef ICP_DEBUG
+ mm_info("[ALLOC] pid: %u max_huge_pages: %u actual_num_hugepages: %u\n",
+ current->pid,
+ max_huge_pages,
+ actual_num_hugepages);
+#endif
+ return 0;
+}
+
+static long
+mem_ioctl(struct file *fp, uint32_t cmd, unsigned long arg)
+{
+ int ret = 0;
+ switch(cmd) {
+ case DEV_MEM_IOC_MEMALLOC:
+ mutex_lock(&dev_mem_lock);
+ ret = dev_mem_alloc(fp, cmd, arg);
+ mutex_unlock(&dev_mem_lock);
+ if (ret)
+ {
+ return -ENOMEM;
+ }
+ break;
+
+ case DEV_MEM_IOC_MEMFREE:
+ mutex_lock(&dev_mem_lock);
+ ret = dev_mem_free(fp, cmd, arg);
+ mutex_unlock(&dev_mem_lock);
+ if (unlikely(ret))
+ {
+ return -EIO;
+ }
+ break;
+
+ case DEV_MEM_IOC_RELEASE:
+ mutex_lock(&dev_mem_lock);
+ ret = dev_release_pid(fp, cmd, arg);
+ mutex_unlock(&dev_mem_lock);
+ if (unlikely(ret))
+ {
+ return -EIO;
+ }
+ break;
+
+ case DEV_MEM_IOC_GET_NUM_HPT:
+ mutex_lock(&dev_mem_lock);
+ ret = dev_num_hp_get(fp, cmd, arg);
+ mutex_unlock(&dev_mem_lock);
+ if (unlikely(ret))
+ {
+ return -EIO;
+ }
+ break;
+
+ case DEV_MEM_IOC_GET_USER_PAGE:
+ mutex_lock(&dev_mem_lock);
+ ret = dev_get_user_page(fp, cmd, arg);
+ mutex_unlock(&dev_mem_lock);
+ if (unlikely(ret))
+ {
+ return -EIO;
+ }
+ break;
+
+ default:
+ ret = handle_other_ioctls(cmd);
+ return ret;
+ }
+ return 0;
+}
+
+static int cmd_mmap_access(struct vm_area_struct *vma,
+ unsigned long addr, void *buf, int len, int write)
+{
+ int size = vma->vm_end - addr;
+ unsigned long offs = addr - vma->vm_start;
+ unsigned long phy_addr = vma->vm_pgoff << PAGE_SHIFT;
+ void *virt_addr = phys_to_virt(phy_addr);
+
+ len = min(len, size);
+
+ if (write)
+ memcpy(virt_addr + offs, buf, len);
+ else
+ memcpy(buf, virt_addr + offs, len);
+
+ return len;
+}
+
+static struct vm_operations_struct cmd_mmap_operations = {
+ .access = cmd_mmap_access,
+};
+
+static int
+mem_mmap(struct file *fp, struct vm_area_struct *vma)
+{
+ int ret = 0;
+ uint64_t id = 0;
+ unsigned long phys_kmalloc_area = 0;
+ kdev_mem_info_t *kmem = NULL;
+ unsigned long size = vma->vm_end - vma->vm_start;
+ id = vma->vm_pgoff << PAGE_SHIFT;
+
+ mutex_lock(&dev_mem_lock);
+ kmem = userMemGetInfo(fp, id);
+ if (!kmem)
+ {
+ mutex_unlock(&dev_mem_lock);
+ mm_err("%s:%d cannot find meminfo\n",__func__,__LINE__);
+ return -ENOMEM;
+ }
+
+ /* Ensure memory mapping does not exceed the allocated memory region */
+ if (size > kmem->size)
+ {
+ mutex_unlock(&dev_mem_lock);
+ mm_err("%s:%d cannot map allocated memory region\n",
+ __func__, __LINE__);
+ return -ENOMEM;
+ }
+
+ /* There is an agreement that mmap(PAGE_SIZE) means control block. */
+ if (PAGE_SIZE == size)
+ {
+ phys_kmalloc_area = virt_to_phys(kmem->huge_mem_ctrl);
+ }
+ /* Any other size means memory block. */
+ else
+ {
+ phys_kmalloc_area = virt_to_phys(kmem->kmalloc_ptr);
+ }
+ mutex_unlock(&dev_mem_lock);
+
+ vma->vm_ops = &cmd_mmap_operations;
+ ret = remap_pfn_range(vma,
+ vma->vm_start,
+ phys_kmalloc_area >> PAGE_SHIFT,
+ size,
+ vma->vm_page_prot);
+ if (unlikely(ret))
+ {
+ mm_err("%s:%d remap_pfn_range failed, ret = %d\n",
+ __func__,__LINE__,ret);
+ }
+ return ret;
+}
+static int
+mem_open(struct inode *inp, struct file *fp)
+{
+ user_proc_mem_list_t *list = NULL;
+ mutex_lock(&dev_mem_lock);
+ if (!fp->private_data)
+ {
+ list = kzalloc(sizeof(user_proc_mem_list_t), GFP_KERNEL);
+ if(!list)
+ {
+ mm_err("%s:%d memory allocation failed\n",
+ __func__,__LINE__);
+ mutex_unlock(&dev_mem_lock);
+ return -ENODEV;
+ }
+ fp->private_data = list;
+ ADD_ELEMENT_TO_END_LIST(list, mem_dev_numa->head,
+ mem_dev_numa->tail, );
+ list->pid = current->tgid;
+ }
+ mutex_unlock(&dev_mem_lock);
+ return 0;
+}
+
+static inline void remove_element(user_proc_mem_list_t * p)
+{
+ if (NULL == p)
+ return;
+ if (NULL != p->pPrev) {
+ p->pPrev->pNext = p->pNext;
+ }
+ if (NULL != p->pNext) {
+ p->pNext->pPrev = p->pPrev;
+ }
+}
+
+static int
+mem_release(struct inode *inp, struct file *fp)
+{
+ user_proc_mem_list_t *list = NULL;
+ mutex_lock(&dev_mem_lock);
+ list=(user_proc_mem_list_t *)fp->private_data;
+ if( list )
+ {
+ (void)userMemFreeSlabs(fp);
+ if (NULL != mem_dev_numa)
+ {
+ REMOVE_ELEMENT_FROM_LIST(list,
+ mem_dev_numa->head, mem_dev_numa->tail, );
+ }
+ else
+ {
+ remove_element(list);
+ }
+ FREE(list);
+ fp->private_data=NULL;
+ }
+ mutex_unlock(&dev_mem_lock);
+ return 0;
+}
+
+static struct file_operations mem_ops = {
+ owner:THIS_MODULE,
+ mmap:mem_mmap,
+ unlocked_ioctl:mem_ioctl,
+ compat_ioctl:mem_ioctl,
+ open:mem_open,
+ release:mem_release,
+};
+
+static chr_drv_info_t mem_drv_info = {
+ major:0,
+ min_minor:DEV_MEM_BASE_MINOR,
+ max_minor:DEV_MEM_MAX_MINOR,
+ name:DEV_MEM_NAME,
+};
+
+static int32_t
+chr_drv_create_class(chr_drv_info_t* drv_info)
+{
+ QAE_LOCAL_ENSURE(drv_info,
+ "chr_drv_create_class(): Invalid parameter value ",
+ -EINVAL);
+
+ drv_info->drv_class = class_create(THIS_MODULE, drv_info->name);
+ if (IS_ERR(drv_info->drv_class))
+ {
+ mm_err("%s:%d class_create failed\n",__func__,__LINE__);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static void
+chr_drv_destroy_class(chr_drv_info_t* drv_info)
+{
+ if (NULL == drv_info)
+ {
+ mm_err("%s:%d Invalid parameter value\n",__func__,__LINE__);
+ return;
+ };
+ class_destroy( drv_info->drv_class );
+ drv_info->drv_class = NULL;
+ return;
+}
+
+static inline void
+chr_drv_destroy_device(chr_drv_info_t *drv_info)
+{
+ if (NULL == drv_info)
+ {
+ mm_err("%s:%d Invalid parameter value\n",__func__,__LINE__);
+ return;
+ }
+ if (NULL != drv_info->drv_class_dev)
+ {
+ device_destroy(drv_info->drv_class, MKDEV(drv_info->major,
+ DEV_MEM_BASE_MINOR));
+ }
+ cdev_del(&(drv_info->drv_cdev));
+ unregister_chrdev_region( MKDEV(drv_info->major, DEV_MEM_BASE_MINOR),
+ drv_info->max_minor);
+ return;
+}
+
+static int
+chr_drv_create_device(chr_drv_info_t *drv_info)
+{
+ int ret = 0;
+ dev_t devid = 0;
+
+ QAE_LOCAL_ENSURE(drv_info,
+ "chr_drv_create_device(): Invalid parameter value ",
+ -ENODEV);
+ ret = alloc_chrdev_region(&devid,
+ drv_info->min_minor,
+ drv_info->max_minor,
+ drv_info->name);
+ if (unlikely(ret))
+ {
+ mm_err("%s:%d Unable to allocate chrdev region\n",
+ __func__,__LINE__);
+ return -ENOMEM;
+ }
+ drv_info->major = MAJOR(devid);
+ drv_info->drv_cdev.owner=THIS_MODULE;
+ cdev_init(&(drv_info->drv_cdev), &mem_ops);
+ ret = cdev_add(&(drv_info->drv_cdev), devid, drv_info->max_minor);
+ if (unlikely(ret))
+ {
+ mm_err("%s:%d cdev add failed\n",__func__,__LINE__);
+ chr_drv_destroy_device(drv_info);
+ return -ENOENT;
+ }
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27))
+ drv_info->drv_class_dev = device_create(drv_info->drv_class,
+ NULL, MKDEV(drv_info->major, DEV_MEM_BASE_MINOR),
+ NULL, "%s", drv_info->name);
+#else
+ drv_info->drv_class_dev = device_create(drv_info->drv_class,
+ NULL, MKDEV(drv_info->major, DEV_MEM_BASE_MINOR),
+ drv_info->name);
+#endif
+ if( NULL == drv_info->drv_class_dev )
+ {
+ mm_err("%s:%d device_create failed\n",__func__,__LINE__);
+ chr_drv_destroy_device(drv_info);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int32_t register_mem_device_driver(void)
+{
+ int ret = 0;
+ mutex_init(&dev_mem_lock);
+ mem_dev_numa = kzalloc(sizeof(user_mem_dev_t), GFP_KERNEL);
+ if(!mem_dev_numa)
+ {
+ mm_err("failed to allocate memory for numa mem device\n");
+ return -ENOMEM;
+ }
+ ret = chr_drv_create_class(&mem_drv_info);
+ if(unlikely(ret))
+ {
+ mm_err("failed to create device driver class\n");
+ FREE(mem_dev_numa);
+ return -ENODEV;
+ }
+ ret = chr_drv_create_device(&mem_drv_info);
+ if(unlikely(ret))
+ {
+ mm_err("failed to create mem numa device driver\n");
+ chr_drv_destroy_class(&mem_drv_info);
+ FREE(mem_dev_numa);
+ return -ENODEV;
+ }
+ mem_drv_info.unregistered = 0;
+ return 0;
+}
+/*
+ * unregister the device driver
+ */
+static void unregister_mem_device_driver(void)
+{
+ if(!mem_drv_info.unregistered)
+ {
+ chr_drv_destroy_device(&mem_drv_info);
+ chr_drv_destroy_class(&mem_drv_info);
+ FREE(mem_dev_numa);
+ mem_dev_numa = NULL;
+ mem_drv_info.unregistered = 1;
+ }
+}
+
+static inline char printable(char sym)
+{
+ if (sym >= 0x20 && sym <= 0x7E)
+ /*check if printable ascii*/
+ return sym;
+ else
+ /*else put out a dot*/
+ return '.';
+}
+
+static char qae_dbg_ascii[128];
+static char qae_dbg_command[128];
+static char qae_dbg_slab_data[4096];
+/*dumps memory data in 16 8 hex bytes and 8 ascii chars columns and 32 rows*/
+static int
+dumpData(void *start, void *end)
+{
+ int row = 0;
+ int col = 0;
+ char *src = start;
+ char *endaddr = end;
+ size_t offs = 0;
+ const int ROWS = 32;
+ const int COLUMNS = 8;
+
+ for (row = 0; row < ROWS; ++row)
+ {
+ size_t ascii = 0;
+
+ for (col = 0; col < COLUMNS; ++col)
+ {
+ if (src > endaddr)
+ {
+ offs += scnprintf(qae_dbg_slab_data + offs,
+ sizeof(qae_dbg_slab_data) - offs, " ");
+ ascii += scnprintf(qae_dbg_ascii + ascii,
+ sizeof(qae_dbg_ascii) - ascii, " ");
+ }
+ else
+ {
+ /*in the first 8 columns print bytes in hex with 2 nibbles*/
+ offs += scnprintf(qae_dbg_slab_data + offs,
+ sizeof(qae_dbg_slab_data) - offs, "%02hhx ", *src);
+ /*in the last 8 columns print ascii char or dot*/
+ ascii += scnprintf(qae_dbg_ascii + ascii,
+ sizeof(qae_dbg_ascii) - ascii, "%c ", printable(*src));
+ src++;
+ }
+ }
+ offs += scnprintf(qae_dbg_slab_data + offs,
+ sizeof(qae_dbg_slab_data) - offs, "%.128s\n", qae_dbg_ascii);
+ if (src > endaddr)
+ return offs;
+ }
+ return offs;
+}
+/*
+ * findSlabsForPid - find the link list of slabs for a given pid
+ */
+static kdev_mem_info_t*
+findSlabsForPid(const uint64_t pid)
+{
+ if (mem_dev_numa)
+ {
+ user_proc_mem_list_t *list = mem_dev_numa->head;
+ while (list)
+ {
+ if (list->pid == pid )
+ return list->head;
+ list=list->pNext;
+ }
+ }
+ return NULL;
+}
+/*
+ * execute dump command
+ * returns length of data in output buffer
+ */
+static int
+execDump(kdev_mem_info_t* slab, const uintptr_t param, const uint64_t pid)
+{
+ uintptr_t endaddr = 0;
+ uintptr_t startaddr = param;
+ uintptr_t offset = 0;
+ size_t len = 0;
+
+ mm_info("Process dump command \n");
+ /* traverse thru slabs */
+ while (slab)
+ {
+ uintptr_t phy_addr = (uintptr_t) slab->phy_addr;
+ uintptr_t virt_addr = (uintptr_t) slab->kmalloc_ptr;
+ /*calculate virtual address end of slab*/
+ endaddr = virt_addr + slab->size;
+ /*check if this slab was sought after by virtual address*/
+ if (startaddr >= virt_addr && startaddr < endaddr)
+ {
+ offset = startaddr - virt_addr;
+ mm_info("Block found: "
+ "start %p block end %p dump addr %p offset %p\n",
+ (void *) virt_addr, (void *) endaddr,
+ (void *) startaddr, (void *) offset);
+ break;
+ }
+ /*calculate physical address end of slab*/
+ endaddr = phy_addr + slab->size;
+ /*check if this slab was sought after by phy address*/
+ if (startaddr >= phy_addr && startaddr < endaddr)
+ {
+ offset = startaddr - phy_addr;
+ mm_info("Block found (using phy_addr): "
+ "start %p block end %p dump addr %p offset %p\n",
+ (void *) phy_addr, (void *) endaddr,
+ (void *) startaddr, (void *) offset);
+ break;
+ }
+ /* take next slab if no hit */
+ slab = slab->pNext_kernel;
+ }
+ /* log slab not found */
+ if( !slab )
+ {
+ len = scnprintf(qae_dbg_slab_data, sizeof(qae_dbg_slab_data),
+ "Slab not found PID %llu Address %p\n",
+ pid, (void *) startaddr);
+ }
+ else /*dump 256 byte of slab data */
+ {
+ startaddr = (uintptr_t) slab + offset;
+ endaddr = (uintptr_t) slab +
+ slab->size - 1;
+ len = dumpData((void *) startaddr, (void *) endaddr);
+ }
+ return len;
+}
+/*
+ * execute dump control area command
+ * returns length of data in output buffer
+ */
+static int32_t
+execDumpControl(kdev_mem_info_t* slab, const uintptr_t param, const uint64_t pid)
+{
+ uint64_t id = param;
+ uintptr_t endaddr = 0;
+ size_t len = 0;
+
+ /*traverse thru slabs search by slab id*/
+ while(slab)
+ {
+ endaddr = slab->phy_addr + slab->size;
+ if (id >= slab->phy_addr && id < endaddr)
+ {
+ break;
+ }
+ slab = slab->pNext_kernel;
+ }
+ if( !slab ) /* log slab not found*/
+ {
+ len = scnprintf(qae_dbg_slab_data, sizeof(qae_dbg_slab_data),
+ "Slab not found PID %llu slab ID %llu\n", pid, id);
+ }
+ else /*dump bitmap*/
+ {
+ int row;
+ uint64_t bitmap_row,mask;
+ /* banner message */
+ len = scnprintf(qae_dbg_slab_data, sizeof(qae_dbg_slab_data),
+ "Small buffer allocation bitmap \n Slab id %llu \n", id);
+ /* display 0/1 in bitmap positions throughout the bitmap */
+ for ( row = 0; row < BITMAP_LEN; ++row )
+ {
+ /* The slab does not contain any bitmap information anymore.
+ * We must now access with kmalloc_ptr */
+ bitmap_row = ((block_ctrl_t*)slab->kmalloc_ptr)->bitmap[row];
+ for ( mask = 1ULL<<(QWORD_WIDTH-1); mask; mask>>=1)
+ {
+ char bit = '0';
+ if ( mask & bitmap_row )
+ {
+ bit = '1';
+ }
+ len += scnprintf(qae_dbg_slab_data + len,
+ sizeof(qae_dbg_slab_data) - len, "%c", bit);
+ }
+ len += scnprintf(qae_dbg_slab_data + len,
+ sizeof(qae_dbg_slab_data) - len, "\n");
+ }
+ }
+ return len;
+}
+/* processCommand
+ * performs the command found in the command buffer
+ * returns the number of characters the debug
+ * buffer has after command was executed
+ */
+static int
+processCommand(void)
+{
+ char *arg = NULL;
+ char *cmd = NULL;
+ char command = '\0'; /*command char c/d*/
+ uint64_t param = 0; /*command parameter*/
+ uint64_t pid = 0; /*process id*/
+ kdev_mem_info_t* slab = NULL; /*slab the info is required for*/
+ size_t len = 0; /*length of string in output buffer*/
+
+ command = qae_dbg_command[0];
+ if ('\0' == command) /*check if there is a command*/
+ {
+ return 0;
+ }
+ /* Search for a first numeric argument after the command itself. */
+ cmd = strpbrk(qae_dbg_command, "0123456789");
+ arg = strsep(&cmd, " ");
+ if (NULL != arg) {
+ int status = kstrtoll(arg, 0, &pid);
+ pid *= (status == 0);
+
+ /* Find a next argument. */
+ arg = strsep(&cmd, " ");
+ if (NULL != arg)
+ {
+ status = kstrtoll(arg, 0, &param);
+ param *= (status == 0);
+ }
+ }
+ mm_info("%s:%d "
+ "Command %c Param %llu %llu Buffer %s Arg %s\n",
+ __func__, __LINE__, command, pid, param, qae_dbg_command, arg);
+ /* Destroy the original command. */
+ qae_dbg_command[0] = '\0';
+
+ switch (command)
+ {
+ case 'd':
+ slab = findSlabsForPid(pid); /* find slab for process id*/
+ if(!slab)
+ {
+ mm_info("%s:%d "
+ "Could not find slab for process id: %llu\n",
+ __func__,__LINE__,pid);
+ return 0;
+ }
+ /*dump memory content*/
+ len = execDump(slab,param,pid);
+ break;
+ case 'c':
+ slab = findSlabsForPid(pid); /* find slab for process id*/
+ if(!slab)
+ {
+ mm_info("%s:%d "
+ "Could not find slab for process id: %llu\n",
+ __func__,__LINE__,pid);
+ return 0;
+ }
+ /* control block data (bitmap) */
+ len = execDumpControl(slab,param,pid);
+ break;
+ case 't':
+ /* print total allocated NUMA memory */
+ len = scnprintf(qae_dbg_slab_data, sizeof(qae_dbg_slab_data),
+ "Total allocated NUMA memory: %zu bytes\n",
+ mem_allocated);
+ break;
+ default:
+ len = scnprintf(qae_dbg_slab_data, sizeof(qae_dbg_slab_data),
+ "Invalid command %c\n", command);
+ break;
+ }
+ return len;
+}
+/* print info about a slab in debug buffer
+ * return number of byte in buffer
+ * 0 return value will end the file read operation
+ * each time this function is called one slab data
+ * is entered in the debug buffer
+ * process and slab ptrs are saved in static variables
+ * to traverse the linked list by file read until a 0
+ * return value is received.
+ */
+static int
+getMemInfo(user_proc_mem_list_t** pmem_list)
+{
+ /*memory info for slab in memory list*/
+ static kdev_mem_info_t* mem_info;
+ /*memory list element of current process*/
+ user_proc_mem_list_t* mem_list = *pmem_list;
+ int length = 0;
+ /*initialise list of processes that allocated slabs*/
+ if (!mem_info && !mem_list )
+ {
+ mem_list = mem_dev_numa->head;
+ /*return if list is empty*/
+ if ( !mem_list)
+ return 0;
+ mem_info = mem_list->head;
+ }
+ /* iterate through all processes in the list*/
+ while(mem_list)
+ {
+ /*check if there is a valid slab entry*/
+ if(mem_info)
+ {
+ length = scnprintf(qae_dbg_slab_data, sizeof(qae_dbg_slab_data),
+ "Pid %d, Slab Id %llu \n"
+ "Virtual address %p, Physical Address %llx, Size %lld\n",
+ mem_list->pid, mem_info->phy_addr, mem_info->kmalloc_ptr,
+ mem_info->phy_addr, mem_info->size);
+ /*advance slab pointer for next call*/
+ mem_info = mem_info->pNext_kernel;
+ /*send slab info into read buffer*/
+ break;
+ }
+ else
+ {
+ /* null slab ptr in list of previous process
+ * get next process from list*/
+ mem_list = mem_list->pNext;
+ /*get first slab from next list element*/
+ if(mem_list)
+ mem_info = mem_list->head;
+ }
+ }
+ /* if at the end of process list chain*/
+ if(!mem_list)
+ {
+ mem_list = mem_dev_numa->head;
+ mem_info = NULL;
+ }
+ /* save current process in list in a static for next call*/
+ *pmem_list = mem_list;
+ return length;
+}
+/*
+*qae_mem_update_slab_data
+* updates data in debug buffer depending on last command
+* open - non-null if called from debug file open routine
+* otherwise 0
+*/
+static int
+qae_mem_update_slab_data(int open)
+{
+ /* memory list of current process*/
+ static user_proc_mem_list_t* mem_list;
+ static int count; /*number of chars in debug buffer*/
+ if( !mem_dev_numa )
+ return 0;
+ /* if file just opened initialise; make sure
+ * list of slabs are generated from the top
+ * if qae_dbg_command buffer is empty */
+ if(open)
+ {
+ mem_list = NULL;
+ count = 0;
+ return 0;
+ }
+ /* last time a buffer with chars were sent in response to read operation
+ return 0 now to complete read operation.*/
+ if(count)
+ {
+ count = 0;
+ return 0;
+ }
+ /* process command and report to read op if there is any result*/
+ count = processCommand();
+ if(count)
+ return count;
+ /*get next slab info into debug data buffer*/
+ /* when 0 is returned it marks the end of buffer list*/
+ /* and will end the file read operation as well*/
+ return getMemInfo(&mem_list);
+}
+/*read function for debug file
+ returns number of bytes read
+ read operation completes when 0 is returned here*/
+static ssize_t
+qae_mem_slabs_data_read(struct file* filp, char __user *buffer,
+ size_t count, loff_t * pos)
+{
+ /*update data in debug buffer*/
+ int data_len = qae_mem_update_slab_data(false);
+ /*check length and position */
+ if( 0 == data_len || *pos >= data_len )
+ return 0;
+ /* Ensure the addition of (*pos + count) does not overflow */
+ if ((*pos + count) > ULLONG_MAX)
+ return 0;
+ if( *pos + count > data_len )
+ count = data_len - *pos;
+ /*copy from kernel buffer to user*/
+ if( copy_to_user(buffer ,qae_dbg_slab_data + *pos,
+ (unsigned)count))
+ return -EFAULT;
+ return count;
+}
+/*write function for write operation of the debug file*/
+static ssize_t
+qae_mem_slabs_data_write (struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *pos)
+{
+ /*write command to qae_dbg_command buffer
+ *next read on debug file will parse the command string
+ *and execute the requested command
+ *if command buffer empty the next read
+ *lists the allocated slabs */
+ /* check count vs size of command buffer*/
+ if (count >= sizeof(qae_dbg_command) )
+ {
+ return -EFAULT;
+ }
+ /* copy command string from user buffer*/
+ if ( copy_from_user(qae_dbg_command, buffer, count) )
+ {
+ return -EFAULT;
+ }
+ /*terminating 0*/
+ qae_dbg_command[count] = '\0';
+ return count;
+}
+/*called when debug file is opened
+ used for initialisation */
+static int
+qae_mem_slabs_data_open(struct inode *inode, struct file* filep)
+{
+ qae_mem_update_slab_data(1);
+ return 0;
+}
+static struct file_operations qae_mem_slabs_file_fops = {
+ .owner = THIS_MODULE,
+ .open = qae_mem_slabs_data_open,
+ .read = qae_mem_slabs_data_read,
+ .write = qae_mem_slabs_data_write
+};
+/*
+ * Initialisation function to insmod device driver
+ */
+static inline void
+qae_debug_init(void)
+{
+ if ( ( qae_dbg_root_dir = debugfs_create_dir("qae_mem_dbg", NULL) )
+ == ERR_PTR(-ENODEV) ||
+ ( qae_dbg_slabs_file = debugfs_create_file("qae_mem_slabs", 0666,
+ qae_dbg_root_dir, NULL,
+ &qae_mem_slabs_file_fops) ) == ERR_PTR(-ENODEV) )
+ {
+ mm_warning(
+ "Debug FS not initialised, debug info not available\n");
+ }
+}
+
+static int
+qae_mem_init( void )
+{
+ mm_info("Loading %s Module %s ...\n", MODULE_NAME, VERSION_STRING);
+ mm_info("IOCTLs: %lx, %lx, %lx, %lx\n",
+ (unsigned long)DEV_MEM_IOC_MEMALLOC,
+ (unsigned long)DEV_MEM_IOC_MEMFREE,
+ (unsigned long)DEV_MEM_IOC_RELEASE,
+ (unsigned long)DEV_MEM_IOC_GET_NUM_HPT);
+ if(register_mem_device_driver())
+ {
+ mm_err("Error loading %s Module\n", MODULE_NAME);
+ return -1;
+ }
+ qae_debug_init();
+ return 0;
+}
+/*
+ * tear down function to rmmod device driver
+ */
+STATIC void
+qae_mem_exit( void )
+{
+ mm_info("Unloading %s Module %s...\n", MODULE_NAME, VERSION_STRING);
+ unregister_mem_device_driver();
+ if( NULL != qae_dbg_root_dir )
+ {
+ debugfs_remove_recursive(qae_dbg_root_dir);
+ qae_dbg_root_dir = NULL;
+ }
+}
+module_init(qae_mem_init);
+module_exit(qae_mem_exit);
+
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("User Space DMA-able Memory Driver");
+
+
--- /dev/null
+++ b/quickassist/utilities/libusdm_drv/linux/kernel_space/qae_mem_drv_utils.c
@@ -0,0 +1,82 @@
+/***************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007-2019 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Corporation
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2007-2019 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * version: QAT1.7.L.4.7.0-00006
+ *
+ ***************************************************************************/
+/**
+*****************************************************************************
+ * @file qae_mem_drv_utils.c
+ *
+ * This file handles ioctls from user space to kernel space for quick assist API
+ *
+ *****************************************************************************/
+
+#include <asm-generic/errno-base.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include "qae_mem_utils.h"
+
+int handle_other_ioctls(uint32_t cmd)
+{
+ mm_err("Invalid IOCTL command specified(0x%x)\n", cmd);
+ return -EINVAL;
+}
+
--- /dev/null
+++ b/quickassist/utilities/libusdm_drv/linux/kernel_space/qae_mem_utils.c
@@ -0,0 +1,277 @@
+/***************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007-2019 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Corporation
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2007-2019 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * version: QAT1.7.L.4.7.0-00006
+ *
+ ***************************************************************************/
+/**
+*****************************************************************************
+ * @file qae_mem_utils.c
+ *
+ * This file provides linux kernel memory allocation for quick assist API
+ *
+ *****************************************************************************/
+
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <asm/io.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+
+#include "qae_mem.h"
+#include "qae_mem_utils.h"
+
+#define IS_VMALLOC_ADDR(addr) (((uintptr_t)(addr) >= VMALLOC_START) && \
+ ((uintptr_t)(addr) < VMALLOC_END))
+
+/**
+******************************************************************************
+* @ingroup max_mem_numa
+* maximum amount of memory allocated in kernel space
+* @description
+* This is a command line parameter that defines the maximum
+* amount of memory allocated by the driver in kernel space.
+* Measured in kilobytes.
+*****************************************************************************/
+static uint32_t max_mem_numa = 0;
+/**
+******************************************************************************
+* @ingroup mem_allocated
+* amount of memory currently allocated in kernel space
+* @description
+* This variable holds the overall
+* amount of memory allocated by the driver in kernel space.
+* Measured in bytes.
+*****************************************************************************/
+static size_t mem_allocated = 0;
+module_param(max_mem_numa, uint, S_IRUGO);
+MODULE_PARM_DESC(max_mem_numa,"Maximum number of allocatable memory in 1k units");
+
+static uint32_t numaAllocations_g = 0;
+static uint32_t normalAllocations_g = 0;
+
+/*Defining Max Size limit to be used, to allocate using kmalloc as 4MB */
+static const int QAE_MEM_SIZE_LIMIT = 1024 * 4096;
+
+/**************************************
+ * Memory functions
+ *************************************/
+void* qaeMemAlloc (size_t memsize)
+{
+ if(memsize > QAE_MEM_SIZE_LIMIT)
+ {
+ return ( vmalloc(memsize) );
+ }
+ normalAllocations_g++;
+ return (kmalloc (memsize, GFP_KERNEL));
+}
+
+void* qaeMemAllocNUMA(size_t size, int node, size_t alignment)
+{
+ void* ptr = NULL;
+ void* phys_ptr = NULL;
+ void* pRet = NULL;
+ size_t alignment_offset = 0;
+ qae_mem_alloc_info_t memInfo = {0};
+ size_t totalInKBytes = (mem_allocated + size)/QAE_KBYTE;
+
+ if( (mem_allocated + size) % QAE_KBYTE )
+ {
+ totalInKBytes += 1;
+ }
+
+ if( max_mem_numa && max_mem_numa < totalInKBytes)
+ {
+ mm_err("%s:%d Maximum NUMA allocation of %u kB reached "
+ "currently allocated %zu bytes requested %zu bytes\n",
+ __func__,__LINE__,max_mem_numa,mem_allocated, size);
+ return NULL;
+ }
+
+ if(!size || alignment < 1)
+ {
+ mm_err("%s:%d Either size or alignment is zero - size = %zu, "
+ "alignment = %zu \n",__func__,__LINE__,size,alignment);
+ return NULL;
+ }
+ /*alignment should be 1,2,4,8....*/
+ if(alignment & (alignment-1))
+ {
+ mm_err("%s:%d Expecting alignment of a power of "\
+ "two but did not get one\n",__func__,__LINE__);
+ return NULL;
+ }
+ /*add the alignment and the struct size to the buffer size*/
+ memInfo.mSize = icp_iommu_get_remapping_size(size + alignment +
+ sizeof(qae_mem_alloc_info_t));
+ if(memInfo.mSize > QAE_MEM_SIZE_LIMIT)
+ {
+ mm_err("%s:%d Total size needed for this " \
+ "set of size and alignment (%zu) exceeds the OS " \
+ "limit %d\n", __func__,__LINE__,memInfo.mSize,QAE_MEM_SIZE_LIMIT);
+ return NULL;
+ }
+ /*allocate contigous memory*/
+ ptr = kmalloc_node (memInfo.mSize, GFP_KERNEL, node);
+ if(!ptr)
+ {
+ mm_err("%s:%d failed to allocate memory\n",__func__,__LINE__);
+ return NULL;
+ }
+ /*store the base address into the struct*/
+ memInfo.mAllocMemPtr = ptr;
+#ifdef ICP_IOMMU_DISABLED
+ icp_iommu_map(&phys_ptr, ptr, memInfo.mSize);
+#else
+ if (icp_iommu_map(&phys_ptr, ptr, memInfo.mSize))
+ {
+ mm_err("%s:%d failed to iommu map\n",__func__,__LINE__);
+ kfree(ptr);
+ return NULL;
+ }
+#endif
+ /*add the size of the struct to the return pointer*/
+ pRet = (char *)memInfo.mAllocMemPtr + sizeof(qae_mem_alloc_info_t);
+ /*compute the offset from the alignement*/
+ alignment_offset = (uintptr_t)pRet % alignment;
+ /*in order to obtain the pointer to the buffer add the alignment and
+ subtract the offset, now we have the return pointer aligned*/
+ pRet = (char*)pRet + (alignment - alignment_offset);
+ /*copy the struct immediately before the buffer pointer*/
+ memcpy((void*)((char*)pRet - sizeof(qae_mem_alloc_info_t)),
+ (void*)(&memInfo),
+ sizeof(qae_mem_alloc_info_t));
+ /*increment the NUMA allocations counter*/
+ numaAllocations_g++;
+ mem_allocated += memInfo.mSize;
+ return pRet;
+}
+
+void qaeMemFreeNUMA (void** ptr)
+{
+ qae_mem_alloc_info_t *memInfo = NULL;
+ uint64_t phy_addr = 0;
+
+ if(!ptr || !(*ptr) )
+ {
+ mm_err("%s:%d Pointer to be freed cannot be NULL\n",
+ __func__,__LINE__);
+ return;
+ }
+ memInfo = (qae_mem_alloc_info_t *)((int8_t *)*ptr -
+ sizeof(qae_mem_alloc_info_t));
+
+ if (memInfo->mSize == 0 || memInfo->mAllocMemPtr == NULL)
+ {
+ mm_err("%s:%d Detected the corrupted data: memory leak!! \n",
+ __func__,__LINE__);
+ mm_err("%s:%d Size: %zu, memPtr: %p\n",
+ __func__,__LINE__,memInfo->mSize, memInfo->mAllocMemPtr);
+ return;
+ }
+ phy_addr = virt_to_phys(memInfo->mAllocMemPtr);
+#ifdef ICP_IOMMU_DISABLED
+ icp_iommu_unmap((void*)(uintptr_t) phy_addr, memInfo->mSize);
+#else
+ if (icp_iommu_unmap((void*)(uintptr_t) phy_addr, memInfo->mSize))
+ {
+ mm_warning("%s:%d failed to iommu unmap\n",__func__,__LINE__);
+ }
+#endif
+ kfree (memInfo->mAllocMemPtr);
+ numaAllocations_g--;
+ if ( mem_allocated > memInfo->mSize )
+ {
+ mem_allocated -= memInfo->mSize;
+ }
+ else
+ {
+ mem_allocated = 0;
+ }
+ *ptr = NULL;
+}
+
+void qaeMemFree (void **ptr)
+{
+ if(!ptr || !(*ptr) )
+ {
+ mm_err("%s:%d Pointer to be freed cannot be NULL\n",__func__,__LINE__);
+ return;
+ }
+ if(IS_VMALLOC_ADDR(*ptr))
+ {
+ vfree(*ptr);
+ return;
+ }
+ kfree (*ptr);
+ normalAllocations_g--;
+ *ptr = NULL;
+}
+
+uint64_t qaeVirtToPhysNUMA(void* ptr)
+{
+ if (!ptr)
+ {
+ mm_err("%s:%d Input parameter cannot be NULL \n",
+ __func__,__LINE__);
+ return 0;
+ }
+ return (uint64_t)(uintptr_t)virt_to_phys(ptr);
+}
--- /dev/null
+++ b/quickassist/utilities/libusdm_drv/linux/kernel_space/qdm.c
@@ -0,0 +1,187 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ * Copyright(c) 2016 Intel Corporation.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Contact Information:
+ *
+ * qat-linux@intel.com
+ *
+ * BSD LICENSE
+ * Copyright(c) 2016 Intel Corporation.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef LINUX_VERSION_CODE
+#include <linux/version.h>
+#else
+#define KERNEL_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c))
+#endif
+
+#include <linux/iommu.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include "qdm.h"
+
+static struct iommu_domain *domain;
+
+/**
+ * qdm_attach_device() - Attach a device to the QAT IOMMU domain
+ * @dev: Device to be attached
+ *
+ * Function attaches the device to the QDM IOMMU domain.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int qdm_attach_device(struct device *dev)
+{
+ if (!domain)
+ return 0;
+
+ if (!dev) {
+ pr_err("QDM: Invalid device\n");
+ return -ENODEV;
+ }
+
+ return iommu_attach_device(domain, dev);
+}
+
+/**
+ * qdm_detach_device() - Detach a device from the QAT IOMMU domain
+ * @dev: Device to be detached
+ *
+ * Function detaches the device from the QDM IOMMU domain.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int qdm_detach_device(struct device *dev)
+{
+ if (!domain)
+ return 0;
+
+ if (!dev) {
+ pr_err("QDM: Invalid device\n");
+ return -ENODEV;
+ }
+
+ iommu_detach_device(domain, dev);
+ return 0;
+}
+
+/**
+ * qdm_iommu_map() - Map a block of memory to the QAT IOMMU domain
+ * @iova: Device virtual address
+ * @vaddr: Kernel virtual address
+ * @size: Size (in bytes) of the memory block.
+ * Must be a multiple of PAGE_SIZE
+ *
+ * Function maps a block of memory to the QDM IOMMU domain.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int qdm_iommu_map(dma_addr_t *iova, void *vaddr, size_t size)
+{
+ phys_addr_t paddr = (phys_addr_t) virt_to_phys(vaddr);
+ *iova = (dma_addr_t) paddr;
+
+ if (!domain)
+ return 0;
+
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,34)
+ return iommu_map_range(domain, *iova, paddr, size,
+ IOMMU_READ|IOMMU_WRITE|IOMMU_CACHE);
+#elif LINUX_VERSION_CODE <= KERNEL_VERSION(3,2,45) && \
+ LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)
+ return iommu_map(domain, *iova, paddr, get_order(size),
+ IOMMU_READ|IOMMU_WRITE|IOMMU_CACHE);
+#else
+ return iommu_map(domain, *iova, paddr, size,
+ IOMMU_READ|IOMMU_WRITE|IOMMU_CACHE);
+#endif
+}
+EXPORT_SYMBOL_GPL(qdm_iommu_map);
+
+/**
+ * qdm_iommu_unmap() - Unmap a block of memory from the QAT IOMMU domain
+ * @iova: Device virtual address
+ * @size: Size (in bytes) of the memory block
+ * Must be the same size as mapped.
+ *
+ * Function unmaps a block of memory from the QDM IOMMU domain.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int qdm_iommu_unmap(dma_addr_t iova, size_t size)
+{
+ if (!domain)
+ return 0;
+
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,34)
+ iommu_unmap_range(domain, (unsigned long)iova, size);
+#elif LINUX_VERSION_CODE <= KERNEL_VERSION(3,2,45) && \
+ LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)
+ iommu_unmap(domain, (unsigned long)iova, get_order(size));
+#else
+ iommu_unmap(domain, (unsigned long)iova, size);
+#endif
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qdm_iommu_unmap);
+
+int __init qdm_init(void)
+{
+ if (!iommu_present(&pci_bus_type))
+ return 0;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0)
+ domain = iommu_domain_alloc();
+#else
+ domain = iommu_domain_alloc(&pci_bus_type);
+#endif
+ if (!domain) {
+ pr_err("QDM: Failed to allocate a domain\n");
+ return -1;
+ }
+ return 0;
+}
+
+void __exit qdm_exit(void)
+{
+ if (domain)
+ iommu_domain_free(domain);
+ domain = NULL;
+}
--- /dev/null
+++ b/quickassist/utilities/libusdm_drv/linux/user_space/qae_mem_hugepage_utils.c
@@ -0,0 +1,288 @@
+/***************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007-2019 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Corporation
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2007-2019 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * version: QAT1.7.L.4.7.0-00006
+ *
+ ***************************************************************************/
+/**
+ ****************************************************************************
+ * @file qae_mem_hugepage_utils.c
+ *
+ * This file provides for utilities for Linux/FreeBSD user space memory
+ * allocation with huge page enabled.
+ *
+ ***************************************************************************/
+
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <errno.h>
+#include <stdarg.h>
+#include <unistd.h>
+#include <stdbool.h>
+#ifndef __FreeBSD__
+#include "qae_page_table.h"
+#endif
+#include "qae_mem_hugepage_utils.h"
+#include "qae_mem_user_utils.h"
+
+#define HUGEPAGE_FILE_DIR "/dev/hugepages/usdm.XXXXXX"
+#define HUGEPAGE_FILE_LEN (sizeof(HUGEPAGE_FILE_DIR))
+
+#ifndef __FreeBSD__
+static bool g_hugepages_enabled = false;
+#else
+static const bool g_hugepages_enabled = false;
+#endif
+
+static size_t g_num_hugepages = 0;
+
+#ifndef __FreeBSD__ /* FreeBSD only uses init_hugepages, hugepage_enabled */
+/*
+ * Get physical address of mapped hugepage virtual address in the current
+ * process.
+ */
+API_LOCAL
+uint64_t hugepage_virt2phy(const int fd, const void *virtaddr)
+{
+ int ret = 0;
+ user_page_info_t user_pages = {0};
+
+ user_pages.virt_addr = (uintptr_t)virtaddr;
+ ret = mem_ioctl(fd, DEV_MEM_IOC_GET_USER_PAGE, &user_pages);
+ if (ret)
+ {
+ CMD_ERROR("%s:%d ioctl call for get physical addr failed, "
+ "ret = %d\n",
+ __func__,
+ __LINE__,
+ ret);
+ ret = -EIO;
+ }
+
+ return user_pages.phy_addr;
+}
+
+API_LOCAL
+void *hugepage_mmap_phy_addr(const size_t len)
+{
+ void *addr = NULL;
+ int ret = 0;
+ int hpg_fd;
+ char hpg_fname[HUGEPAGE_FILE_LEN];
+
+ /*
+ * for every mapped huge page there will be a separate file descriptor
+ * created from a temporary file, we should NOT close fd explicitly, it
+ * will be reclaimed by the OS when the process gets terminated, and
+ * meanwhile the huge page binding to the fd will be released, this could
+ * guarantee the memory cleanup order between user buffers and ETR.
+ */
+ snprintf(hpg_fname, sizeof(HUGEPAGE_FILE_DIR), "%s", HUGEPAGE_FILE_DIR);
+ hpg_fd = qae_mkstemp(hpg_fname);
+
+ if (hpg_fd < 0)
+ {
+ CMD_ERROR("%s:%d mkstemp(%s) for hpg_fd failed with errno: %d\n",
+ __func__,
+ __LINE__,
+ hpg_fname,
+ errno);
+ return NULL;
+ }
+
+ unlink(hpg_fname);
+
+ addr = qae_mmap(NULL,
+ len,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_POPULATE | MAP_HUGETLB,
+ hpg_fd,
+ 0);
+
+ if (MAP_FAILED == addr)
+ {
+ CMD_ERROR("%s:%d qae_mmap(%s) for hpg_fd failed with errno:%d\n",
+ __func__,
+ __LINE__,
+ hpg_fname,
+ errno);
+ close(hpg_fd);
+ return NULL;
+ }
+
+ ret = qae_madvise(addr, len, MADV_DONTFORK);
+ if (0 != ret)
+ {
+ munmap(addr, len);
+ CMD_ERROR("%s:%d qae_madvise(%s) for hpg_fd failed with errno:%d\n",
+ __func__,
+ __LINE__,
+ hpg_fname,
+ errno);
+ close(hpg_fd);
+ return NULL;
+ }
+
+ ((dev_mem_info_t *)addr)->hpg_fd = hpg_fd;
+ return addr;
+}
+
+API_LOCAL
+dev_mem_info_t *hugepage_alloc_slab(const int fd,
+ const size_t size,
+ const int node,
+ enum slabType type)
+{
+ dev_mem_info_t *slab = NULL;
+
+ if (!g_num_hugepages)
+ {
+ CMD_ERROR("%s:%d mmap: exceeded max huge pages allocations for this "
+ "process.\n",
+ __func__,
+ __LINE__);
+ return NULL;
+ }
+ slab = hugepage_mmap_phy_addr(size);
+ if (!slab)
+ {
+ CMD_ERROR("%s:%d mmap on huge page memory allocation failed\n",
+ __func__,
+ __LINE__);
+ return NULL;
+ }
+ slab->nodeId = node;
+ slab->size = size;
+ slab->type = type;
+ slab->virt_addr = slab;
+ slab->phy_addr = hugepage_virt2phy(fd, slab);
+ if (!slab->phy_addr)
+ {
+ CMD_ERROR("%s:%d virt2phy on huge page memory allocation failed\n",
+ __func__,
+ __LINE__);
+ close(slab->hpg_fd);
+ munmap(slab, size);
+ return NULL;
+ }
+ g_num_hugepages--;
+
+ return slab;
+}
+
+API_LOCAL
+void hugepage_free_slab(const dev_mem_info_t *memInfo)
+{
+ g_num_hugepages++;
+
+ close(memInfo->hpg_fd);
+}
+
+#endif /* !__FreeBSD__ */
+
+API_LOCAL
+int init_hugepages(const int fd)
+{
+ int ret = 0;
+#if (QAE_NUM_PAGES_PER_ALLOC == 512)
+#ifndef __FreeBSD__
+ ret = mem_ioctl(fd, DEV_MEM_IOC_GET_NUM_HPT, &g_num_hugepages);
+ if (ret)
+ {
+ CMD_ERROR("%s:%d ioctl call for checking number of huge page failed, "
+ "ret = %d\n",
+ __func__,
+ __LINE__,
+ ret);
+ g_num_hugepages = 0;
+ ret = -EIO;
+ }
+ if (g_num_hugepages > 0)
+ {
+ set_free_page_table_fptr(free_page_table_hpg);
+ set_loadaddr_fptr(load_addr_hpg);
+ set_loadkey_fptr(load_key_hpg);
+
+ g_hugepages_enabled = true;
+ }
+ else
+ {
+ set_free_page_table_fptr(free_page_table);
+ set_loadaddr_fptr(load_addr);
+ set_loadkey_fptr(load_key);
+
+ g_hugepages_enabled = false;
+ }
+#endif /* !__FreeBSD__ */
+#else
+ if (fd < 0)
+ return -EIO;
+#endif
+ return ret;
+}
+
+API_LOCAL
+int hugepage_enabled()
+{
+ return g_hugepages_enabled;
+}
--- /dev/null
+++ b/quickassist/utilities/libusdm_drv/linux/user_space/qae_mem_hugepage_utils.h
@@ -0,0 +1,91 @@
+/***************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007-2019 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Corporation
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2007-2019 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * version: QAT1.7.L.4.7.0-00006
+ *
+ ***************************************************************************/
+/**
+ ****************************************************************************
+ * @file qae_mem_hugepage_utils.h
+ *
+ * This file provides API for utilities of Linux/FreeBSD user space memory
+ * allocation with huge page enabled.
+ *
+ ***************************************************************************/
+#ifndef QAE_MEM_HUGEPAGE_UTILS_H
+#define QAE_MEM_HUGEPAGE_UTILS_H
+#ifndef __FreeBSD__
+#include "qae_mem_utils.h"
+
+uint64_t hugepage_virt2phy(const int fd, const void *virtaddr);
+
+void *hugepage_mmap_phy_addr(const size_t len);
+
+dev_mem_info_t *hugepage_alloc_slab(const int fd,
+ const size_t size,
+ const int node,
+ enum slabType type);
+
+void hugepage_free_slab(const dev_mem_info_t *memInfo);
+#endif /* !__FreeBSD__ */
+
+int init_hugepages(const int fd);
+
+int hugepage_enabled();
+#endif
--- /dev/null
+++ b/quickassist/utilities/libusdm_drv/linux/user_space/qae_mem_user_utils.h
@@ -0,0 +1,108 @@
+/***************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007-2019 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Corporation
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2007-2019 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * version: QAT1.7.L.4.7.0-00006
+ *
+ ***************************************************************************/
+/**
+ ****************************************************************************
+ * @file qae_mem_user_utils.h
+ *
+ * This file provides for API of Linux user space memory allocation
+ *
+ ***************************************************************************/
+
+#ifndef QAE_MEM_USER_UTILS_H
+#define QAE_MEM_USER_UTILS_H
+
+#ifndef SKIP_BUILTIN_FUNC
+#define unlikely(x) __builtin_expect((x), 0)
+#else
+#define unlikely(x) (0 == (x))
+#endif
+
+#if __GNUC__ >= 4
+#define API_PUBLIC __attribute__((visibility("default")))
+#define API_LOCAL __attribute__((visibility("hidden")))
+#else
+#define API_PUBLIC
+#define API_LOCAL
+#endif
+
+#ifdef ICP_DEBUG
+static inline void CMD_DEBUG(const char *format, ...)
+{
+ va_list args;
+ va_start(args, format);
+ vfprintf(stdout, format, args);
+ va_end(args);
+}
+#else
+#define CMD_DEBUG(...)
+#endif
+
+static inline void CMD_ERROR(const char *format, ...)
+{
+ va_list args;
+ va_start(args, format);
+ vfprintf(stderr, format, args);
+ va_end(args);
+}
+
+#endif
--- /dev/null
+++ b/quickassist/utilities/libusdm_drv/linux/user_space/qae_mem_utils.c
@@ -0,0 +1,1331 @@
+/***************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007-2019 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Corporation
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2007-2019 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * version: QAT1.7.L.4.7.0-00006
+ *
+ ***************************************************************************/
+/**
+ ****************************************************************************
+ * @file qae_mem_utils.c
+ *
+ * This file provides for Linux user space memory allocation. It uses
+ * a driver that allocates the memory in kernel memory space (to ensure
+ * physically contiguous memory) and maps it to
+ * user space for use by the quick assist sample code
+ *
+ ***************************************************************************/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <ctype.h>
+#include <sys/types.h>
+#include <dirent.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <sys/param.h>
+#ifndef ICP_WITHOUT_THREAD
+#include <pthread.h>
+#endif
+#include <errno.h>
+#include <stdarg.h>
+#include <unistd.h>
+#include "qae_mem.h"
+#include "qae_mem_utils.h"
+#include "qae_mem_user_utils.h"
+#include "qae_page_table.h"
+#include "qae_mem_hugepage_utils.h"
+
+STATIC int fd = -1;
+
+/**************************************************************************
+ macro
+**************************************************************************/
+
+#define QAE_MEM "/dev/usdm_drv"
+
+/**************************************************************************
+ static variable
+**************************************************************************/
+
+/* Current cached memory size. */
+static size_t g_cache_size = 0;
+/* Maximum cached memory size, 8 Mb by default */
+static size_t g_max_cache = 0x800000;
+/* The maximum number we allow to search for available size */
+static size_t g_max_lookup_num = 10;
+/* User space page table for fast virtual to physical address translation */
+static page_table_t g_page_table = {{{0}}};
+
+typedef struct
+{
+ dev_mem_info_t *head;
+ dev_mem_info_t *tail;
+} slab_list_t;
+/* User space hash for fast slab searching */
+static slab_list_t g_slab_list[PAGE_SIZE] = {{0}};
+
+static int g_strict_node = 1;
+
+#ifndef ICP_WITHOUT_THREAD
+static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
+#endif
+
+static dev_mem_info_t *pUserCacheHead = NULL;
+static dev_mem_info_t *pUserCacheTail = NULL;
+static dev_mem_info_t *pUserMemListHead = NULL;
+static dev_mem_info_t *pUserMemListTail = NULL;
+static dev_mem_info_t *pUserLargeMemListHead = NULL;
+static dev_mem_info_t *pUserLargeMemListTail = NULL;
+
+
+static free_page_table_fptr_t free_page_table_fptr = free_page_table;
+static load_addr_fptr_t load_addr_fptr = load_addr;
+static load_key_fptr_t load_key_fptr = load_key;
+
+/**************************************************************************
+ function
+**************************************************************************/
+#ifndef __FreeBSD__
+API_LOCAL
+void set_free_page_table_fptr(free_page_table_fptr_t fp)
+{
+ free_page_table_fptr = fp;
+}
+
+API_LOCAL
+void set_loadaddr_fptr(load_addr_fptr_t fp)
+{
+ load_addr_fptr = fp;
+}
+
+API_LOCAL
+void set_loadkey_fptr(load_key_fptr_t fp)
+{
+ load_key_fptr = fp;
+}
+#endif /* __FreeBSD__ */
+
+static inline size_t div_round_up(const size_t n, const size_t d)
+{
+ return (n + d - 1) / d;
+}
+
+static inline void add_slab_to_hash(dev_mem_info_t *slab)
+{
+ const size_t key = get_key(slab->phy_addr);
+
+ ADD_ELEMENT_TO_HEAD_LIST(
+ slab, g_slab_list[key].head, g_slab_list[key].tail, _user_hash);
+}
+static inline void del_slab_from_hash(dev_mem_info_t *slab)
+{
+ const size_t key = get_key(slab->phy_addr);
+
+ REMOVE_ELEMENT_FROM_LIST(
+ slab, g_slab_list[key].head, g_slab_list[key].tail, _user_hash);
+}
+
+static inline dev_mem_info_t *find_slab_in_hash(void *virt_addr)
+{
+ const size_t key = load_key_fptr(&g_page_table, virt_addr);
+ dev_mem_info_t *slab = g_slab_list[key].head;
+
+ while (slab)
+ {
+ uintptr_t offs = (uintptr_t)virt_addr - (uintptr_t)slab->virt_addr;
+ if (offs < slab->size)
+ return slab;
+ slab = slab->pNext_user_hash;
+ }
+
+ return NULL;
+}
+
+/* mem_ctzll function
+ * input: a 64-bit bitmap window
+ * output: number of contiguous 0s from least significant bit position
+ * __GNUC__ predefined macro and __builtin_ctz() are supported by Intel C
+ */
+static inline int32_t mem_ctzll(uint64_t bitmap_window)
+{
+ if (bitmap_window)
+ {
+#ifdef __GNUC__
+ return __builtin_ctzll(bitmap_window);
+#else
+#error "Undefined built-in function"
+#endif
+ }
+ return QWORD_WIDTH;
+}
+
+/* bitmap_read function
+ * reads a 64-bit window from a BITMAP_LENx64-bit bitmap
+ * starting from window_pos (0 <-> BITMAP_LENx64 -1)
+ * map points to the BITMAP_LENx64 bit map area
+ * returns the 64-bit window from the BITMAP_LENx64 bitmap.
+ * Each bit represents a 1k block in the 2 Meg buffer
+ */
+
+static uint64_t bitmap_read(uint64_t *map, size_t window_pos)
+{
+ uint64_t quad_word_window = 0ULL;
+ uint64_t next_quad_word = 0ULL;
+ size_t quad_word_pos = 0;
+ size_t bit_pos = 0;
+
+ quad_word_pos = window_pos / QWORD_WIDTH;
+
+ if (quad_word_pos >= BITMAP_LEN)
+ {
+ return QWORD_ALL_ONE;
+ }
+ bit_pos = window_pos % QWORD_WIDTH;
+
+ quad_word_window = map[quad_word_pos];
+
+ if (0 == bit_pos)
+ {
+ return quad_word_window;
+ }
+
+ /* it is safe to read the next quad word because
+ * there is always a barrier at the end */
+ next_quad_word = map[quad_word_pos + 1];
+
+ quad_word_window >>= bit_pos;
+ next_quad_word <<= QWORD_WIDTH - bit_pos;
+ quad_word_window |= next_quad_word;
+
+ return quad_word_window;
+}
+
+static const uint64_t __bitmask[65] = {
+ 0x0000000000000000ULL, 0x0000000000000001ULL, 0x0000000000000003ULL,
+ 0x0000000000000007ULL, 0x000000000000000fULL, 0x000000000000001fULL,
+ 0x000000000000003fULL, 0x000000000000007fULL, 0x00000000000000ffULL,
+ 0x00000000000001ffULL, 0x00000000000003ffULL, 0x00000000000007ffULL,
+ 0x0000000000000fffULL, 0x0000000000001fffULL, 0x0000000000003fffULL,
+ 0x0000000000007fffULL, 0x000000000000ffffULL, 0x000000000001ffffULL,
+ 0x000000000003ffffULL, 0x000000000007ffffULL, 0x00000000000fffffULL,
+ 0x00000000001fffffULL, 0x00000000003fffffULL, 0x00000000007fffffULL,
+ 0x0000000000ffffffULL, 0x0000000001ffffffULL, 0x0000000003ffffffULL,
+ 0x0000000007ffffffULL, 0x000000000fffffffULL, 0x000000001fffffffULL,
+ 0x000000003fffffffULL, 0x000000007fffffffULL, 0x00000000ffffffffULL,
+ 0x00000001ffffffffULL, 0x00000003ffffffffULL, 0x00000007ffffffffULL,
+ 0x0000000fffffffffULL, 0x0000001fffffffffULL, 0x0000003fffffffffULL,
+ 0x0000007fffffffffULL, 0x000000ffffffffffULL, 0x000001ffffffffffULL,
+ 0x000003ffffffffffULL, 0x000007ffffffffffULL, 0x00000fffffffffffULL,
+ 0x00001fffffffffffULL, 0x00003fffffffffffULL, 0x00007fffffffffffULL,
+ 0x0000ffffffffffffULL, 0x0001ffffffffffffULL, 0x0003ffffffffffffULL,
+ 0x0007ffffffffffffULL, 0x000fffffffffffffULL, 0x001fffffffffffffULL,
+ 0x003fffffffffffffULL, 0x007fffffffffffffULL, 0x00ffffffffffffffULL,
+ 0x01ffffffffffffffULL, 0x03ffffffffffffffULL, 0x07ffffffffffffffULL,
+ 0x0fffffffffffffffULL, 0x1fffffffffffffffULL, 0x3fffffffffffffffULL,
+ 0x7fffffffffffffffULL, 0xffffffffffffffffULL,
+};
+
+/* clear_bitmap function
+ * clear the BITMAP_LENx64-bit bitmap from pos
+ * for len length
+ * input : map - pointer to the bitmap
+ * pos - bit position
+ * len - number of contiguous bits
+ */
+static inline void clear_bitmap(uint64_t *bitmap,
+ const size_t index,
+ size_t len)
+{
+ size_t qword = index / QWORD_WIDTH;
+ const size_t offset = index % QWORD_WIDTH;
+ size_t num;
+
+ if (offset > 0)
+ {
+ const size_t width = MIN(len, QWORD_WIDTH - offset);
+ const uint64_t mask = __bitmask[width] << offset;
+
+ /* Clear required bits */
+ bitmap[qword] &= ~mask;
+
+ len -= width;
+ qword += 1;
+ }
+
+ num = len / QWORD_WIDTH;
+ len %= QWORD_WIDTH;
+
+ while (num--)
+ {
+ bitmap[qword++] = 0;
+ }
+
+ /* Clear remaining bits */
+ bitmap[qword] &= ~__bitmask[len];
+}
+
+/* set_bitmap function
+ * set the BITMAP_LENx64-bit bitmap from pos
+ * for len length
+ * input : map - pointer to the bitmap
+ * pos - bit position
+ * len - number of contiguous bits
+ */
+static inline void set_bitmap(uint64_t *bitmap, const size_t index, size_t len)
+{
+ size_t qword = index / QWORD_WIDTH;
+ const size_t offset = index % QWORD_WIDTH;
+ size_t num;
+
+ if (offset > 0)
+ {
+ const size_t width = MIN(len, QWORD_WIDTH - offset);
+ const uint64_t mask = __bitmask[width] << offset;
+
+ /* Set required bits */
+ bitmap[qword] |= mask;
+
+ len -= width;
+ qword += 1;
+ }
+
+ num = len / QWORD_WIDTH;
+ len %= QWORD_WIDTH;
+
+ while (num--)
+ {
+ bitmap[qword++] = ~0ULL;
+ }
+
+ /* Set remaining bits */
+ bitmap[qword] |= __bitmask[len];
+}
+
+/* mem_alloc function
+ * mem_alloc allocates memory with min. size = UNIT_SIZE
+ * block_ctrl points to a block_ctrl_t structure with virtual address
+ * size is the requested number of bytes
+ * minimum allocation size is UNIT_SIZE
+ * returns a pointer to the newly allocated block
+ * input: block_ctrl - pointer to the memory control block
+ * size - size requested in bytes
+ * output: pointer to the allocated area
+ */
+static void *mem_alloc(block_ctrl_t *block_ctrl, size_t size, size_t align)
+{
+ uint64_t *bitmap = NULL;
+ size_t window_pos = 0;
+ void *retval = NULL;
+ size_t blocks_found = 0;
+ uint64_t bitmap_window = 0ULL;
+ size_t blocks_required = 0ULL;
+ size_t first_block = 0;
+ size_t width = 0;
+ size_t width_ones = 0;
+
+ if (NULL == block_ctrl || 0 == size)
+ {
+ CMD_ERROR(" %s:%d invalid control block or size provided "
+ "block_ctrl = %p and size = %d \n",
+ __func__,
+ __LINE__,
+ block_ctrl,
+ size);
+ return retval;
+ }
+
+ bitmap = block_ctrl->bitmap;
+
+ blocks_required = div_round_up(size, UNIT_SIZE);
+
+ window_pos = 0;
+ first_block = window_pos;
+
+ do
+ {
+ /* read 64-bit bitmap window from window_pos (0-BITMAP_LEN*64) */
+ bitmap_window = bitmap_read(bitmap, window_pos);
+ /* find number of contiguous 0s from right */
+ width = mem_ctzll(bitmap_window);
+
+ /* increment number of blocks found with number of contig. 0s
+ in bitmap window */
+ blocks_found += width;
+ /* check if a fit is found */
+ if (blocks_found >= blocks_required)
+ {
+ /* calculate return address from virtual address and
+ first block number */
+ retval = (uint8_t *)(block_ctrl) + first_block * UNIT_SIZE;
+ if (first_block + blocks_required > BITMAP_LEN * QWORD_WIDTH)
+ {
+ CMD_ERROR("%s:%d Allocation error - Required blocks exceeds "
+ "bitmap window. Block index = %d, Blocks required"
+ " = %zu and Bitmap window = %d \n",
+ __func__,
+ __LINE__,
+ first_block,
+ blocks_required,
+ (BITMAP_LEN * QWORD_WIDTH));
+ return NULL;
+ }
+ /* save length in the reserved area right after the bitmap */
+ block_ctrl->sizes[first_block] = (uint16_t)blocks_required;
+ /* set bit maps from bit position (0<->BITMAP_LEN*64 -1) =
+ * first_block(0<->BITMAP_LEN*64-1)
+ * with blocks_required length in bitmap
+ */
+ set_bitmap(bitmap, first_block, blocks_required);
+ break;
+ }
+ else
+ {
+ /* did not find fit check if bitmap_window has at least a 1*/
+ if (bitmap_window)
+ {
+ /* bit field of 0s not contiguous, clear blocks_found adjust
+ * first_block and window_pos find width of contiguous 1 bits
+ * and move window position will read next 64-bit wide window
+ * from bitmap
+ */
+ bitmap_window >>= (width + 1);
+ width_ones = mem_ctzll(~bitmap_window);
+ blocks_found = 0;
+ window_pos += width + 1 + width_ones;
+ if (align && window_pos % align)
+ {
+ window_pos += align - window_pos % align;
+ }
+ first_block = window_pos;
+ }
+ else
+ {
+ /* bit field of 0s is contiguous, but fit not found yet
+ * move window_pos an search more 0s */
+ window_pos += width;
+ }
+ }
+ } while (window_pos < BITMAP_LEN * QWORD_WIDTH);
+ return retval;
+}
+/*
+ * deallocates previously allocated blocks
+ * block_ctrl is a pointer to block_ctrl_t structure
+ * block is a result from a previous mem_alloc call
+ */
+static void mem_free(block_ctrl_t *block_ctrl, void *block)
+{
+ size_t first_block = 0;
+ uint32_t length = 0;
+ uint8_t *start_of_block = block;
+ uint64_t *bitmap = NULL;
+
+ if (NULL == block_ctrl || NULL == block)
+ {
+ CMD_ERROR("%s:%d One of the parameters is NULL. block_ctrl = %p "
+ "block = %p\n",
+ __func__,
+ __LINE__,
+ block_ctrl,
+ block);
+ return;
+ }
+
+ if ((uintptr_t)block % UNIT_SIZE)
+ {
+ CMD_ERROR("%s:%d Block address(%p) must be multiple of Unit size(%d)\n",
+ __func__,
+ __LINE__,
+ block,
+ UNIT_SIZE);
+ return;
+ }
+
+ bitmap = block_ctrl->bitmap;
+
+ /* find start of block in block numbers using the address of start of
+ * buffer and block retrieve first_block and length of block from integer
+ * at the start of block
+ */
+ first_block =
+ (uintptr_t)(start_of_block - (uint8_t *)(block_ctrl)) / UNIT_SIZE;
+
+ length = block_ctrl->sizes[first_block];
+
+ if (length + first_block > BITMAP_LEN * QWORD_WIDTH)
+ {
+ CMD_ERROR("%s:%d Invalid block address provided - "
+ "block length exceeds bitmap window. block index = %d "
+ "and block length: %d\n",
+ __func__,
+ __LINE__,
+ first_block,
+ length);
+ return;
+ }
+ /* clear bitmap from bitmap position (0<->BITMAP_LEN*64 - 1) for length*/
+ clear_bitmap(bitmap, first_block, length);
+
+#ifndef ICP_DISABLE_SECURE_MEM_FREE
+ qae_memzero_explicit(block, length * UNIT_SIZE);
+#endif
+}
+
+static dev_mem_info_t *userMemLookupBySize(size_t size,
+ int node,
+ void **block,
+ const size_t align)
+{
+ dev_mem_info_t *pCurr = NULL;
+ size_t link_num = 0;
+
+ for (pCurr = pUserMemListHead; pCurr != NULL; pCurr = pCurr->pNext_user)
+ {
+ if (g_strict_node && (pCurr->nodeId != node))
+ {
+ continue;
+ }
+ *block = mem_alloc((block_ctrl_t *)pCurr, size, align);
+ if (NULL != *block)
+ {
+ return pCurr;
+ }
+ /* Prevent from visiting whole chain, because after the first
+ * several node, the chance to get one is very small.
+ * Another consideration is to prevent new allocation from old
+ * link, so that the old link could be released
+ */
+ link_num++;
+ if (link_num >= g_max_lookup_num)
+ {
+ break;
+ }
+ }
+ return NULL;
+}
+
+static inline void *init_slab_and_alloc(block_ctrl_t *slab,
+ const size_t size,
+ const size_t phys_align_unit)
+{
+ const size_t last = slab->mem_info.size / CHUNK_SIZE;
+ dev_mem_info_t *p_ctrl_blk = &slab->mem_info;
+ const size_t reserved = div_round_up(sizeof(block_ctrl_t), UNIT_SIZE);
+ void *virt_addr = NULL;
+
+ /* initialise the bitmap to 1 for reserved blocks */
+ slab->bitmap[0] = (1ULL << reserved) - 1;
+ /* make a barrier to stop search at the end of the bitmap */
+ slab->bitmap[last] = QWORD_ALL_ONE;
+
+ virt_addr = mem_alloc(slab, size, phys_align_unit);
+ if (NULL != virt_addr)
+ {
+ ADD_ELEMENT_TO_HEAD_LIST(
+ p_ctrl_blk, pUserMemListHead, pUserMemListTail, _user);
+ }
+ return virt_addr;
+}
+
+static inline int push_slab(dev_mem_info_t *slab)
+{
+ if (g_cache_size + slab->size <= g_max_cache)
+ {
+ g_cache_size += slab->size;
+ ADD_ELEMENT_TO_HEAD_LIST(slab, pUserCacheHead, pUserCacheTail, _user);
+ return 0;
+ }
+ return -ENOMEM;
+}
+
+static inline dev_mem_info_t *pop_slab(const int node)
+{
+ dev_mem_info_t *slab = NULL;
+
+ for (slab = pUserCacheHead; slab != NULL; slab = slab->pNext_user)
+ {
+ if (node != NUMA_ANY_NODE)
+ if (g_strict_node && (node != slab->nodeId))
+ continue;
+
+ g_cache_size -= slab->size;
+ REMOVE_ELEMENT_FROM_LIST(slab, pUserCacheHead, pUserCacheTail, _user);
+ return slab;
+ }
+ return NULL;
+}
+
+static inline void free_slab(const int fd, dev_mem_info_t *slab)
+{
+ dev_mem_info_t memInfo;
+ int ret = 0;
+
+ del_slab_from_hash(slab);
+
+ memcpy(&memInfo, slab, sizeof(dev_mem_info_t));
+ /* Need to disconnect from orignal chain */
+ ret = qae_munmap(memInfo.virt_addr, memInfo.size);
+ if (ret)
+ {
+ CMD_ERROR("%s:%d munmap failed, ret = %d\n", __func__, __LINE__, ret);
+ }
+ if (LARGE == memInfo.type)
+ {
+ ret = qae_munmap(slab, getpagesize());
+ if (ret)
+ {
+ CMD_ERROR(
+ "%s:%d munmap failed, ret = %d\n", __func__, __LINE__, ret);
+ }
+ }
+
+#ifndef __FreeBSD__
+ if (HUGE_PAGE == memInfo.type)
+ {
+ hugepage_free_slab(&memInfo);
+ }
+ else
+#endif
+ {
+ ret = mem_ioctl(fd, DEV_MEM_IOC_MEMFREE, &memInfo);
+ if (unlikely(ret))
+ {
+ CMD_ERROR("%s:%d ioctl call for mem free failed, ret = %d\n",
+ __func__,
+ __LINE__,
+ ret);
+ }
+ }
+}
+
+static inline dev_mem_info_t *find_slab(const int fd,
+ const size_t size,
+ const int node,
+ void **addr,
+ const size_t align)
+{
+ dev_mem_info_t *slab = userMemLookupBySize(size, node, addr, align);
+
+ if (NULL == slab)
+ {
+ slab = pop_slab(node);
+ if (NULL != slab)
+ {
+ *addr = init_slab_and_alloc((block_ctrl_t *)slab, size, align);
+ if (NULL == *addr)
+ {
+ CMD_ERROR("%s:%d Memory allocation failed Virtual address: %p "
+ " Size: %x \n",
+ __func__,
+ __LINE__,
+ slab,
+ size);
+ free_slab(fd, slab);
+ return NULL;
+ }
+ }
+ }
+ return slab;
+}
+
+/**************************************
+ * Memory functions
+ *************************************/
+void *qaeMemAlloc(size_t memsize)
+{
+ void *memPtr = NULL;
+ memPtr = calloc(memsize, sizeof(uint8_t));
+ return memPtr;
+}
+
+void qaeMemFree(void **ptr)
+{
+ if ((!ptr) || !(*ptr))
+ {
+ CMD_ERROR("%s:%d Trying to Free NULL Pointer\n", __func__, __LINE__);
+ return;
+ }
+ free(*ptr);
+ *ptr = NULL;
+}
+
+static inline int check_pid(void)
+{
+ static pid_t pid = 0;
+
+ if (pid != getpid())
+ {
+ pid = getpid();
+ return 1;
+ }
+ return 0;
+}
+
+static inline int qaeOpenFd(void)
+{
+ /* Check if it is a new process or child. */
+ const int is_new_pid = check_pid();
+
+ if (fd < 0 || is_new_pid)
+ {
+ /* Reset all control structures. */
+ free_page_table_fptr(&g_page_table);
+ memset(&g_page_table, 0, sizeof(g_page_table));
+ memset(&g_slab_list, 0, sizeof(g_slab_list));
+ g_cache_size = 0;
+
+ pUserCacheHead = NULL;
+ pUserCacheTail = NULL;
+ pUserMemListHead = NULL;
+ pUserMemListTail = NULL;
+ pUserLargeMemListHead = NULL;
+ pUserLargeMemListTail = NULL;
+
+ CMD_DEBUG("%s:%d Memory file handle is not initialized. "
+ "Initializing it now \n",
+ __func__,
+ __LINE__);
+
+ if (fd > 0)
+ close(fd);
+ fd = qae_open(QAE_MEM, O_RDWR);
+ if (fd < 0)
+ {
+ CMD_ERROR("%s:%d Unable to initialize memory file handle %s \n",
+ __func__,
+ __LINE__,
+ QAE_MEM);
+ return -ENOENT;
+ }
+
+ if (init_hugepages(fd))
+ return -EIO;
+ }
+ return 0;
+}
+
+int32_t qaeMemInit()
+{
+ int32_t fd_status = 0;
+ int32_t status = 0;
+
+ status = mem_mutex_lock(&mutex);
+ if (status)
+ {
+ CMD_ERROR("%s:%d Error on thread mutex lock %s\n",
+ __func__,
+ __LINE__,
+ strerror(status));
+ return -EIO;
+ }
+
+ fd_status = qaeOpenFd();
+
+ status = mem_mutex_unlock(&mutex);
+ if (status)
+ {
+ CMD_ERROR("%s:%d Error on thread mutex unlock %s\n",
+ __func__,
+ __LINE__,
+ strerror(status));
+ return -EIO;
+ }
+ return fd_status;
+}
+
+#ifdef __FreeBSD__
+int qaeMemInitAndReturnFd(int *mem_fd)
+{
+ int status = -1;
+ if (NULL != mem_fd)
+ {
+ status = qaeMemInit();
+ }
+ if (status == 0)
+ {
+ *mem_fd = fd;
+ }
+ return status;
+}
+#endif /* __FreeBSD__ */
+
+static void destroyList(const int fd, dev_mem_info_t *pList)
+{
+ dev_mem_info_t *pCurr = pList;
+
+ while (pCurr)
+ {
+ dev_mem_info_t *next = pCurr->pNext_user;
+ free_slab(fd, pCurr);
+ pCurr = next;
+ }
+}
+
+static inline void reset_cache(const int fd)
+{
+ dev_mem_info_t *slab = NULL;
+ do
+ {
+ slab = pop_slab(NUMA_ANY_NODE);
+ if (NULL != slab)
+ free_slab(fd, slab);
+ } while (slab != NULL);
+}
+
+void qaeMemDestroy(void)
+{
+ int ret = 0;
+
+ /* Free all of the chains */
+ ret = mem_mutex_lock(&mutex);
+ if (unlikely(ret))
+ {
+ CMD_ERROR(
+ "%s:%d Error(%d) on thread mutex lock \n", __func__, __LINE__, ret);
+ return;
+ }
+
+ /* release all control buffers */
+ free_page_table_fptr(&g_page_table);
+ reset_cache(fd);
+ destroyList(fd, pUserMemListHead);
+ destroyList(fd, pUserLargeMemListHead);
+
+ pUserCacheHead = NULL;
+ pUserCacheTail = NULL;
+ pUserMemListHead = NULL;
+ pUserMemListTail = NULL;
+ pUserLargeMemListHead = NULL;
+ pUserLargeMemListTail = NULL;
+
+ /* Send ioctl to kernel space to remove block for this pid */
+ if (fd > 0)
+ {
+ ret = mem_ioctl(fd, DEV_MEM_IOC_RELEASE, NULL);
+ if (ret)
+ {
+ CMD_ERROR("%s:%d ioctl call for mem release failed, ret = %d\n",
+ __func__,
+ __LINE__,
+ ret);
+ }
+ close(fd);
+ fd = -1;
+ }
+
+ ret = mem_mutex_unlock(&mutex);
+ if (unlikely(ret))
+ {
+ CMD_ERROR("%s:%d Error(%d) on thread mutex unlock\n",
+ __func__,
+ __LINE__,
+ ret);
+ }
+}
+
+#ifndef __FreeBSD__
+static inline void *mem_protect(void *const addr, const size_t len)
+{
+ int ret = 0;
+
+ ret = qae_madvise(addr, len, MADV_DONTFORK);
+ if (0 != ret)
+ {
+ munmap(addr, len);
+ return NULL;
+ }
+ return addr;
+}
+#endif
+
+static inline void *mmap_phy_addr(const int fd,
+ const uint64_t phy_addr,
+ const size_t len)
+{
+ void *addr = NULL;
+
+#ifdef __FreeBSD__
+ addr =
+ qae_mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_SHARED, fd, phy_addr);
+ if (0 != mlock(addr, len))
+ {
+ munmap(addr, len);
+ return NULL;
+ }
+#endif
+#ifndef __FreeBSD__
+ addr = qae_mmap(NULL,
+ len,
+ PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_LOCKED,
+ fd,
+ phy_addr);
+#endif
+
+ if (MAP_FAILED == addr)
+ return NULL;
+
+#ifndef __FreeBSD__
+ addr = mem_protect(addr, len);
+#endif
+
+ return addr;
+}
+
+static inline dev_mem_info_t *ioctl_alloc_slab(const int fd,
+ const size_t size,
+ const int node,
+ enum slabType type)
+{
+ dev_mem_info_t params = {0};
+ int ret = 0;
+ dev_mem_info_t *slab = NULL;
+
+ params.size = size;
+ params.nodeId = node;
+ params.type = type;
+
+ ret = mem_ioctl(fd, DEV_MEM_IOC_MEMALLOC, &params);
+ if (ret)
+ {
+ CMD_ERROR("%s:%d ioctl call for mem allocation failed, ret = %d\n",
+ __func__,
+ __LINE__,
+ ret);
+ return NULL;
+ }
+
+ if (node != params.nodeId)
+ {
+ g_strict_node = 0;
+ }
+
+ if (SMALL == type)
+ slab = mmap_phy_addr(fd, params.phy_addr, params.size);
+ else
+ slab = mmap_phy_addr(fd, params.phy_addr, getpagesize());
+
+ if (NULL == slab)
+ {
+ CMD_ERROR("%s:%d mmap on memory allocated through ioctl failed\n",
+ __func__,
+ __LINE__);
+ ret = mem_ioctl(fd, DEV_MEM_IOC_MEMFREE, &params);
+ if (unlikely(ret))
+ {
+ CMD_ERROR("%s:%d ioctl call for mem free failed, ret = %d\n",
+ __func__,
+ __LINE__,
+ ret);
+ }
+ return NULL;
+ }
+
+ if (SMALL == type)
+ slab->virt_addr = slab;
+ else
+ {
+ slab->virt_addr = mmap_phy_addr(fd, params.phy_addr, params.size);
+
+ if (NULL == slab->virt_addr)
+ {
+ CMD_ERROR("%s:%d mmap failed for large memory allocation\n",
+ __func__,
+ __LINE__);
+ munmap(slab, getpagesize());
+ ret = mem_ioctl(fd, DEV_MEM_IOC_MEMFREE, &params);
+ if (unlikely(ret))
+ {
+ CMD_ERROR("%s:%d ioctl call for mem free failed, ret = %d\n",
+ __func__,
+ __LINE__,
+ ret);
+ }
+ return NULL;
+ }
+ }
+
+ return slab;
+}
+
+static inline dev_mem_info_t *alloc_slab(const int fd,
+ const size_t size,
+ const int node,
+ enum slabType type)
+{
+ dev_mem_info_t *slab = NULL;
+
+ if (HUGE_PAGE == type)
+ {
+#ifndef __FreeBSD__
+ slab = hugepage_alloc_slab(fd, size, node, type);
+#endif
+ }
+ else
+ {
+ slab = ioctl_alloc_slab(fd, size, node, type);
+ }
+
+ /* Store a slab into the hash table for a fast lookup. */
+ if (slab)
+ add_slab_to_hash(slab);
+
+ return slab;
+}
+
+static inline void *alloc_addr(size_t size,
+ const int node,
+ const size_t phys_alignment_byte)
+{
+ dev_mem_info_t *p_ctrl_blk = NULL;
+ void *pVirtAddress = NULL;
+ size_t allocate_pages = 0;
+ enum slabType mem_type = SMALL;
+
+ const size_t phys_align_unit = phys_alignment_byte / UNIT_SIZE;
+ const size_t reserved = div_round_up(sizeof(block_ctrl_t), UNIT_SIZE);
+ /* calculate units needed */
+ const size_t requested_pages = div_round_up(size, UNIT_SIZE) + reserved;
+
+ if (0 != qaeOpenFd())
+ return NULL;
+
+ if (requested_pages > QAE_NUM_PAGES_PER_ALLOC * QAE_PAGE_SIZE / UNIT_SIZE ||
+ phys_alignment_byte >= QAE_NUM_PAGES_PER_ALLOC * QAE_PAGE_SIZE)
+ {
+ mem_type = LARGE;
+ /* Huge page and Large memory are mutually exclusive
+ * Since Large slabs are NOT 2 MB aligned, but huge
+ * pages are always 2 MB aligned.
+ */
+ if (hugepage_enabled())
+ return NULL;
+
+ size = MAX(size, phys_alignment_byte);
+ allocate_pages = div_round_up(size, UNIT_SIZE);
+ }
+ else
+ {
+ allocate_pages = QAE_NUM_PAGES_PER_ALLOC * QAE_PAGE_SIZE / UNIT_SIZE;
+ if (hugepage_enabled())
+ mem_type = HUGE_PAGE;
+
+ p_ctrl_blk = find_slab(fd, size, node, &pVirtAddress, phys_align_unit);
+
+ if (p_ctrl_blk)
+ {
+ p_ctrl_blk->allocations += 1;
+ return pVirtAddress;
+ }
+ }
+
+ /* Try to allocate memory as much as possible */
+ p_ctrl_blk = alloc_slab(fd, allocate_pages * UNIT_SIZE, node, mem_type);
+ if (NULL == p_ctrl_blk)
+ return NULL;
+
+ store_mmap_range(&g_page_table,
+ p_ctrl_blk->virt_addr,
+ p_ctrl_blk->phy_addr,
+ p_ctrl_blk->size,
+ hugepage_enabled());
+
+ if (LARGE == mem_type)
+ {
+ p_ctrl_blk->allocations = 1;
+
+ ADD_ELEMENT_TO_HEAD_LIST(
+ p_ctrl_blk, pUserLargeMemListHead, pUserLargeMemListTail, _user);
+
+ pVirtAddress = p_ctrl_blk->virt_addr;
+ }
+ else
+ {
+ p_ctrl_blk->allocations = 1;
+
+ if ((uintptr_t)p_ctrl_blk->virt_addr % QAE_PAGE_SIZE)
+ {
+ CMD_ERROR("%s:%d Bad virtual address alignment %lux %x %lux\n",
+ __func__,
+ __LINE__,
+ (uintptr_t)p_ctrl_blk->virt_addr,
+ QAE_NUM_PAGES_PER_ALLOC,
+ QAE_PAGE_SIZE);
+ free_slab(fd, p_ctrl_blk);
+
+ return NULL;
+ }
+ pVirtAddress = init_slab_and_alloc(
+ (block_ctrl_t *)p_ctrl_blk, size, phys_align_unit);
+ if (NULL == pVirtAddress)
+ {
+ CMD_ERROR("%s:%d Memory allocation failed Virtual address: %p "
+ " Size: %x \n",
+ __func__,
+ __LINE__,
+ p_ctrl_blk,
+ size);
+ free_slab(fd, p_ctrl_blk);
+
+ return NULL;
+ }
+ }
+ return pVirtAddress;
+}
+
+void *qaeMemAllocNUMA(size_t size, int node, size_t phys_alignment_byte)
+{
+ void *pVirtAddress = NULL;
+ int ret = 0;
+ /* Maximum supported alignment is 4M. */
+ const size_t MAX_PHYS_ALIGN = 0x400000;
+
+ if (!size)
+ {
+ CMD_ERROR("%s:%d Size cannot be zero \n", __func__, __LINE__);
+ return NULL;
+ }
+
+ if (!phys_alignment_byte || phys_alignment_byte > MAX_PHYS_ALIGN ||
+ (phys_alignment_byte & (phys_alignment_byte - 1)))
+ {
+ CMD_ERROR("%s:%d Invalid alignment parameter %d. It must be non zero, "
+ "not more than %d and multiple of 2 \n",
+ __func__,
+ __LINE__,
+ phys_alignment_byte,
+ MAX_PHYS_ALIGN);
+ return NULL;
+ }
+
+ ret = mem_mutex_lock(&mutex);
+ if (unlikely(ret))
+ {
+ CMD_ERROR("%s:%d Error on thread mutex lock %s\n",
+ __func__,
+ __LINE__,
+ strerror(ret));
+ return NULL;
+ }
+
+ pVirtAddress = alloc_addr(size, node, phys_alignment_byte);
+
+ ret = mem_mutex_unlock(&mutex);
+ if (unlikely(ret))
+ {
+ CMD_ERROR("%s:%d Error on thread mutex unlock %s\n",
+ __func__,
+ __LINE__,
+ strerror(ret));
+ return NULL;
+ }
+ return pVirtAddress;
+}
+
+static inline void free_addr(void **p_va)
+{
+ dev_mem_info_t *p_ctrl_blk = NULL;
+
+ if (0 != qaeOpenFd())
+ return;
+
+ if ((p_ctrl_blk = find_slab_in_hash(*p_va)) == NULL)
+ {
+ CMD_ERROR("%s:%d Unable to free as lookup failed on address (%p) "
+ "provided \n",
+ __func__,
+ __LINE__,
+ *p_va);
+ return;
+ }
+ if (SMALL == p_ctrl_blk->type || HUGE_PAGE == p_ctrl_blk->type)
+ {
+ mem_free((block_ctrl_t *)p_ctrl_blk, *p_va);
+
+ p_ctrl_blk->allocations -= 1;
+
+ if (p_ctrl_blk->allocations)
+ {
+ *p_va = NULL;
+ return;
+ }
+
+ REMOVE_ELEMENT_FROM_LIST(
+ p_ctrl_blk, pUserMemListHead, pUserMemListTail, _user);
+ if (0 != push_slab(p_ctrl_blk))
+ free_slab(fd, p_ctrl_blk);
+ }
+ else
+ {
+ REMOVE_ELEMENT_FROM_LIST(
+ p_ctrl_blk, pUserLargeMemListHead, pUserLargeMemListTail, _user);
+ free_slab(fd, p_ctrl_blk);
+ }
+ *p_va = NULL;
+}
+
+void qaeMemFreeNUMA(void **ptr)
+{
+ int ret = 0;
+
+ if (NULL == ptr)
+ {
+ CMD_ERROR(
+ "%s:%d Input parameter cannot be NULL \n", __func__, __LINE__);
+ return;
+ }
+ if (NULL == *ptr)
+ {
+ CMD_ERROR(
+ "%s:%d Address to be freed cannot be NULL \n", __func__, __LINE__);
+ return;
+ }
+ ret = mem_mutex_lock(&mutex);
+ if (ret)
+ {
+ CMD_ERROR("%s:%d Error on thread mutex lock %s\n",
+ __func__,
+ __LINE__,
+ strerror(ret));
+ *ptr = NULL;
+ return;
+ }
+
+ free_addr(ptr);
+
+ ret = mem_mutex_unlock(&mutex);
+ if (ret)
+ {
+ CMD_ERROR("%s:%d Error on thread mutex unlock %s\n",
+ __func__,
+ __LINE__,
+ strerror(ret));
+ }
+ return;
+}
+
+/*translate a virtual address to a physical address */
+uint64_t qaeVirtToPhysNUMA(void *pVirtAddress)
+{
+ return load_addr_fptr(&g_page_table, pVirtAddress);
+}
+
+static int32_t memoryRemap(dev_mem_info_t *head)
+{
+ // NOT SUPPORTED
+ if (NULL != head)
+ {
+ CMD_ERROR("%s:%d not supported \n", __func__, __LINE__);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+void qaeAtFork()
+{
+ int ret = 0;
+ int32_t status0 = 0;
+ int32_t status1 = 0;
+ int32_t status2 = 0;
+
+ ret = mem_mutex_lock(&mutex);
+ if (unlikely(ret))
+ {
+ CMD_ERROR(
+ "%s:%d Error(%d) on thread mutex lock \n", __func__, __LINE__, ret);
+ return;
+ }
+
+ status0 = memoryRemap(pUserCacheHead);
+ status1 = memoryRemap(pUserMemListHead);
+ status2 = memoryRemap(pUserLargeMemListHead);
+
+ ret = mem_mutex_unlock(&mutex);
+ if (unlikely(ret))
+ {
+ CMD_ERROR("%s:%d Error on thread mutex unlock %s\n",
+ __func__,
+ __LINE__,
+ strerror(ret));
+ goto fork_exit;
+ }
+
+fork_exit:
+ if (unlikely(status0))
+ {
+ CMD_ERROR(
+ "%s:%d Failed to remap memory allocations \n", __func__, __LINE__);
+ }
+ if (unlikely(status1))
+ {
+ CMD_ERROR(
+ "%s:%d Failed to remap memory allocations \n", __func__, __LINE__);
+ }
+ if (unlikely(status2))
+ {
+ CMD_ERROR("%s:%d Failed to remap large memory allocations \n",
+ __func__,
+ __LINE__);
+ }
+ return;
+}
--- /dev/null
+++ b/quickassist/utilities/libusdm_drv/linux/user_space/qae_page_table.h
@@ -0,0 +1,431 @@
+/*******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007-2019 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Corporation
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2007-2019 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * version: QAT1.7.L.4.7.0-00006
+ *
+ * @file qae_page_table.h
+ *
+ * This file provides user-space page tables (similar to Intel x86/x64
+ * page tables) for fast virtual to physical address translation. Essentially,
+ * this is an implementation of the trie data structure optimized for the x86 HW
+ * constraints.
+ * Memory required:
+ * - 8 Mb to cover 4 Gb address space.
+ * I.e. if only 1 Gb is used it will require additional 2 Mb.
+ *
+ ******************************************************************************/
+
+#ifndef QAE_PAGE_TABLE_H_1
+#define QAE_PAGE_TABLE_H_1
+
+#define __STDC_WANT_LIB_EXT1__ 1
+#include <stdint.h>
+#include <sys/mman.h>
+#ifdef __linux__
+#include <string.h>
+#endif
+
+#ifndef __FreeBSD__ /* FreeBSD, already defined in machine param.h */
+#define PAGE_SIZE (0x1000)
+#define PAGE_SHIFT (12)
+#endif
+
+#define QAE_PAGE_MASK (~(PAGE_SIZE - 1))
+#define LEVEL_SIZE (PAGE_SIZE / sizeof(uint64_t))
+
+#define HUGEPAGE_SIZE (0x200000)
+#define HUGEPAGE_SHIFT (21)
+#define HUGEPAGE_MASK (~(HUGEPAGE_SIZE - 1))
+
+typedef struct
+{
+ uint64_t offset : 12;
+ uint64_t idxl0 : 9;
+ uint64_t idxl1 : 9;
+ uint64_t idxl2 : 9;
+ uint64_t idxl3 : 9;
+ uint64_t idxl4 : 9;
+} page_entry_t;
+
+typedef struct
+{
+ uint64_t offset : 21;
+ uint64_t idxl1 : 9;
+ uint64_t idxl2 : 9;
+ uint64_t idxl3 : 9;
+ uint64_t idxl4 : 9;
+} hugepage_entry_t;
+
+typedef union {
+ uint64_t addr;
+ page_entry_t pg_entry;
+ hugepage_entry_t hpg_entry;
+} page_index_t;
+
+typedef struct page_table_t
+{
+ union {
+ uint64_t pa;
+ struct page_table_t *pt;
+ } next[LEVEL_SIZE];
+} page_table_t;
+
+typedef void (*free_page_table_fptr_t)(page_table_t *const table);
+typedef void (*store_addr_fptr_t)(page_table_t *, uintptr_t, uint64_t);
+typedef uint64_t (*load_addr_fptr_t)(page_table_t *, void *);
+typedef uint64_t (*load_key_fptr_t)(page_table_t *, void *);
+
+#ifndef __FreeBSD__
+void set_free_page_table_fptr(free_page_table_fptr_t fp);
+void set_loadaddr_fptr(load_addr_fptr_t fp);
+void set_loadkey_fptr(load_key_fptr_t fp);
+#endif /* __FreeBSD__ */
+
+static inline void *qae_memzero(void *const ptr, const size_t count)
+{
+ uint32_t lim = 0;
+ volatile unsigned char *volatile dstPtr = ptr;
+
+ while (lim < count)
+ {
+ dstPtr[lim++] = '\0';
+ }
+ return (void *)dstPtr;
+}
+
+/*
+ * Fills a memory zone with 0,
+ * returns pointer to the memory zone.
+ */
+static inline void *qae_memzero_explicit(void *const ptr, const size_t count)
+{
+ if (!ptr)
+ {
+ return NULL;
+ }
+#if (defined(__linux__))
+#ifdef __STDC_LIB_EXT1__
+ errno_t result =
+ memset_s(ptr, sizeof(ptr), 0, count); /* Supported on C11 standard */
+ if (result != 0)
+ {
+ return NULL;
+ }
+ return ptr;
+#endif /* __STDC_LIB_EXT1__ */
+#elif (defined(__FreeBSD__))
+ explicit_bzero(ptr, count);
+ return ptr;
+#endif /* __linux__ */
+ return qae_memzero(ptr, count); /* Platform-independent secure memset */
+}
+
+static inline void *next_level(page_table_t *volatile *ptr)
+{
+ page_table_t *old_ptr = *ptr;
+ page_table_t *new_ptr;
+
+ if (NULL != old_ptr)
+ return old_ptr;
+
+ new_ptr = mmap(NULL,
+ sizeof(page_table_t),
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS,
+ -1,
+ 0);
+ if ((void *)-1 == new_ptr)
+ return NULL;
+
+ if (!__sync_bool_compare_and_swap(ptr, NULL, new_ptr))
+ munmap(new_ptr, sizeof(page_table_t));
+
+ return *ptr;
+}
+
+static inline void free_page_level(page_table_t *const level, const size_t iter)
+{
+ size_t i = 0;
+
+ if (0 == iter)
+ return;
+
+ for (i = 0; i < LEVEL_SIZE; ++i)
+ {
+ page_table_t *pt = level->next[i].pt;
+ if (NULL != pt)
+ {
+ free_page_level(pt, iter - 1);
+ munmap(pt, sizeof(page_table_t));
+ }
+ }
+}
+
+static inline void free_page_table(page_table_t *const table)
+{
+ /* There are 1+4 levels in 64-bit page table for 4KB pages. */
+ free_page_level(table, 4);
+ /* Reset global root table. */
+ memset(table, 0, sizeof(page_table_t));
+}
+
+#ifndef __FreeBSD__
+static inline void free_page_table_hpg(page_table_t *const table)
+{
+ /* There are 1+3 levels in 64-bit page table for 2MB hugepages. */
+ free_page_level(table, 3);
+ /* Reset global root table. */
+ memset(table, 0, sizeof(page_table_t));
+}
+#endif /*__FreeBSD__ */
+
+static inline void store_addr(page_table_t *level,
+ uintptr_t virt,
+ uint64_t phys)
+{
+ page_index_t id;
+
+ id.addr = virt;
+
+ level = next_level(&level->next[id.pg_entry.idxl4].pt);
+ if (NULL == level)
+ return;
+
+ level = next_level(&level->next[id.pg_entry.idxl3].pt);
+ if (NULL == level)
+ return;
+
+ level = next_level(&level->next[id.pg_entry.idxl2].pt);
+ if (NULL == level)
+ return;
+
+ level = next_level(&level->next[id.pg_entry.idxl1].pt);
+ if (NULL == level)
+ return;
+
+ level->next[id.pg_entry.idxl0].pa = phys;
+}
+
+static inline void store_addr_hpg(page_table_t *level,
+ uintptr_t virt,
+ uint64_t phys)
+{
+ page_index_t id;
+
+ id.addr = virt;
+
+ level = next_level(&level->next[id.hpg_entry.idxl4].pt);
+ if (NULL == level)
+ return;
+
+ level = next_level(&level->next[id.hpg_entry.idxl3].pt);
+ if (NULL == level)
+ return;
+
+ level = next_level(&level->next[id.hpg_entry.idxl2].pt);
+ if (NULL == level)
+ return;
+
+ level->next[id.hpg_entry.idxl1].pa = phys;
+}
+
+static inline uint64_t get_key(const uint64_t phys)
+{
+ /* For 4KB page: use bits 20-31 of a physical address as a hash key.
+ * It provides a good distribution for 1Mb/2Mb slabs and a moderate
+ * distribution for 128Kb/256Kb/512Kbslabs.
+ */
+ return (phys >> 20) & ~QAE_PAGE_MASK;
+}
+
+static inline void store_mmap_range(page_table_t *p_level,
+ void *p_virt,
+ uint64_t p_phys,
+ size_t p_size,
+ int hp_en)
+{
+ size_t offset;
+ size_t page_size = PAGE_SIZE;
+ uint64_t page_mask = QAE_PAGE_MASK;
+ store_addr_fptr_t store_addr_ptr = store_addr;
+ const uintptr_t virt = (uintptr_t)p_virt;
+
+ if (hp_en)
+ {
+ page_size = HUGEPAGE_SIZE;
+ page_mask = HUGEPAGE_MASK;
+ store_addr_ptr = store_addr_hpg;
+ }
+ /* Store the key into the physical address itself,
+ * for 4KB pages: 12 lower bits are always 0 (physical page addresses
+ * are 4KB-aligned).
+ * for 2MB pages: 21 lower bits are always 0 (physical page addresses
+ * are 2MB-aligned)
+ */
+ p_phys = (p_phys & page_mask) | get_key(p_phys);
+ for (offset = 0; offset < p_size; offset += page_size)
+ {
+ store_addr_ptr(p_level, virt + offset, p_phys + offset);
+ }
+}
+
+static inline uint64_t load_addr(page_table_t *level, void *virt)
+{
+ page_index_t id;
+ uint64_t phy_addr;
+
+ id.addr = (uintptr_t)virt;
+
+ level = level->next[id.pg_entry.idxl4].pt;
+ if (NULL == level)
+ return 0;
+
+ level = level->next[id.pg_entry.idxl3].pt;
+ if (NULL == level)
+ return 0;
+
+ level = level->next[id.pg_entry.idxl2].pt;
+ if (NULL == level)
+ return 0;
+
+ level = level->next[id.pg_entry.idxl1].pt;
+ if (NULL == level)
+ return 0;
+
+ phy_addr = level->next[id.pg_entry.idxl0].pa;
+ return (phy_addr & QAE_PAGE_MASK) | id.pg_entry.offset;
+}
+
+static inline uint64_t load_addr_hpg(page_table_t *level, void *virt)
+{
+ page_index_t id;
+ uint64_t phy_addr;
+
+ id.addr = (uintptr_t)virt;
+
+ level = level->next[id.hpg_entry.idxl4].pt;
+ if (NULL == level)
+ return 0;
+
+ level = level->next[id.hpg_entry.idxl3].pt;
+ if (NULL == level)
+ return 0;
+
+ level = level->next[id.hpg_entry.idxl2].pt;
+ if (NULL == level)
+ return 0;
+
+ phy_addr = level->next[id.hpg_entry.idxl1].pa;
+ return (phy_addr & HUGEPAGE_MASK) | id.hpg_entry.offset;
+}
+
+static inline uint64_t load_key(page_table_t *level, void *virt)
+{
+ page_index_t id;
+ uint64_t phy_addr;
+
+ id.addr = (uintptr_t)virt;
+
+ level = level->next[id.pg_entry.idxl4].pt;
+ if (NULL == level)
+ return 0;
+
+ level = level->next[id.pg_entry.idxl3].pt;
+ if (NULL == level)
+ return 0;
+
+ level = level->next[id.pg_entry.idxl2].pt;
+ if (NULL == level)
+ return 0;
+
+ level = level->next[id.pg_entry.idxl1].pt;
+ if (NULL == level)
+ return 0;
+
+ phy_addr = level->next[id.pg_entry.idxl0].pa;
+ return phy_addr & ~QAE_PAGE_MASK;
+}
+
+#ifndef __FreeBSD__
+static inline uint64_t load_key_hpg(page_table_t *level, void *virt)
+{
+ page_index_t id;
+ uint64_t phy_addr;
+
+ id.addr = (uintptr_t)virt;
+
+ level = level->next[id.hpg_entry.idxl4].pt;
+ if (NULL == level)
+ return 0;
+
+ level = level->next[id.hpg_entry.idxl3].pt;
+ if (NULL == level)
+ return 0;
+
+ level = level->next[id.hpg_entry.idxl2].pt;
+ if (NULL == level)
+ return 0;
+
+ phy_addr = level->next[id.hpg_entry.idxl1].pa;
+ /* the hash key is of 4KB long for both normal page and huge page */
+ return phy_addr & ~QAE_PAGE_MASK;
+}
+#endif /* __FreeBSD__ */
+
+#endif
--- /dev/null
+++ b/quickassist/utilities/libusdm_drv/qae_mem.h
@@ -0,0 +1,244 @@
+/***************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007-2019 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Corporation
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2007-2019 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * version: QAT1.7.L.4.7.0-00006
+ *
+ ***************************************************************************/
+/**
+ ***************************************************************************
+ * @file qae_mem.h
+ *
+ * This file provides linux/FreeBSD memory allocation for quick assist API
+ *
+ ****************************************************************************/
+#ifndef QAE_MEM_H_
+#define QAE_MEM_H_
+
+#ifdef __KERNEL__
+#ifdef __FreeBSD__
+#include <sys/types.h>
+#else
+#include <linux/types.h>
+#endif
+#else
+#include <stdint.h>
+#endif
+
+/**
+ *****************************************************************************
+ * @ingroup CommonMemoryDriver
+ * qaeMemAlloc
+ *
+ * @brief
+ * When used in user space, allocates memsize bytes of virtual memory.
+ * When used in kernel space, allocates memsize bytes of contigous and
+ * pinned memory.
+ *
+ * @param[in] memsize - the amount of memory in bytes to be allocated
+ *
+ * @retval pointer to the allocated memory or NULL if the allocation failed
+ *
+ * @pre
+ * none
+ * @post
+ * memory is allocated and the pointer to the allocated memory location
+ * is returned
+ *
+ ****************************************************************************/
+void *qaeMemAlloc(size_t memsize);
+
+/**
+ *****************************************************************************
+ * @ingroup CommonMemoryDriver
+ * qaeMemFree
+ *
+ * @brief
+ * Frees memory allocated by the qaeMemAlloc function.
+ * Applicable for both user and kernel spaces.
+ *
+ * @param[in] ptr - Address of the pointer to the memory to be freed
+ *
+ * @retval none
+ *
+ * @pre
+ * *ptr points to memory previously allocated by qaeMemAlloc
+ * @post
+ * memory is freed and pointer value is set to NULL
+ *
+ ****************************************************************************/
+void qaeMemFree(void **ptr);
+
+/**
+ *****************************************************************************
+ * @ingroup CommonMemoryDriver
+ * qaeMemAllocNUMA
+ *
+ * @brief
+ * Allocates and returns virtual memory mapped to pinned, contiguous
+ * physical memory aligned to phys_alignment_byte. This API enables
+ * user to choose a CPU node nearest to QAT device. This API is applicable
+ * for both user and kernel spaces. Based on the address space used,
+ * memory mapped from corresponding virtual address space will be returned.
+ *
+ * @param[in] size - A non-zero value representing the amount of memory in
+ * bytes to be allocated.It cannot exceed 4MB
+ * @param[in] node - NUMA node
+ * @param[in] phys_alignment_byte - A non-zero value representing memory
+ * boundary alignment in bytes. It must
+ * be in powers of 2 not exceeding 4KB.
+ *
+ * @retval pointer to the allocated memory or NULL if the allocation failed
+ *
+ * @pre
+ * none
+ * @post
+ * memory is allocated and pointer to the allocated memory is returned
+ *
+ ****************************************************************************/
+void *qaeMemAllocNUMA(size_t size, int node, size_t phys_alignment_byte);
+
+/**
+ *****************************************************************************
+ * @ingroup CommonMemoryDriver
+ * qaeMemFreeNUMA
+ *
+ * @brief
+ * Frees memory allocated by the qaeMemAllocNUMA function.
+ * Applicable for both user and kernel spaces.
+ *
+ * @param[in] ptr - Address of pointer to the memory to be freed
+ *
+ * @retval none
+ *
+ * @pre
+ * *ptr points to memory previously allocated by qaeMemAllocNUMA
+ * @post
+ * memory is freed and the pointer value is set to NULL
+ *
+ ****************************************************************************/
+void qaeMemFreeNUMA(void **ptr);
+
+/**
+ *****************************************************************************
+ * @ingroup CommonMemoryDriver
+ * qaeVirtToPhysNUMA
+ *
+ * @brief
+ * Converts a virtual address provided by qaeMemAllocNUMA to a
+ * physical one. Applicable for both user and kernel spaces.
+ *
+ * @param[in] pVirtAddr - pointer to the virtual address
+ *
+ * @retval pointer to the physical address or 0(NULL) on error
+ *
+ * @pre
+ * pVirtAddr points to memory previously allocated by qaeMemAllocNUMA
+ * @post
+ * Appropriate physical address is provided
+ *
+ ****************************************************************************/
+uint64_t qaeVirtToPhysNUMA(void *pVirtAddr);
+
+#ifdef __FreeBSD__
+/**
+ *****************************************************************************
+ * @ingroup CommonMemoryDriver
+ * qaeMemInitAndReturnFd
+ *
+ * @description
+ * Returns the FD obtained by qaeMemInit
+ *
+ * @param[in]
+ * ptr - Address of fd which is updated by qaeMemInit
+ *
+ * @retval
+ * status from qaeMemInit. 0 if the open of the device was successful and
+ * non-zero otherwise
+ *
+ * @pre
+ * File "/dev/qae_mem" is opened successfully
+ * @post
+ * none
+ *
+ ****************************************************************************/
+int qaeMemInitAndReturnFd(int *mem_fd);
+#endif
+
+#ifndef __KERNEL__
+/*! Define a constant for user space to select any available NUMA node */
+#define NUMA_ANY_NODE (-1)
+
+/**
+ ***************************************************************************
+ * @ingroup CommonMemoryDriver
+ * qaeAtFork
+ *
+ * @brief
+ * Must be called when child process is forked to adjust the kernel
+ * memory map page.
+ *
+ * @param[in] - none
+ *
+ * @retval none
+ *
+ ****************************************************************************/
+void qaeAtFork(void);
+#endif
+
+#endif /* #ifndef QAE_MEM_H_ */
--- a/quickassist/include/cpa.h
+++ b/quickassist/include/cpa.h
@@ -694,13 +694,21 @@ typedef enum _CpaInstanceEvent
* implementation may send this event if the hardware device is about to
* be reset.
*/
- CPA_INSTANCE_EVENT_RESTARTED
+ CPA_INSTANCE_EVENT_RESTARTED,
/**< Event type that triggers the registered instance notification callback
* function when and instance has restarted. The reason why an instance has
* restarted is implementation specific. For example a hardware
* implementation may send this event after the hardware device has
* been reset.
*/
+
+ CPA_INSTANCE_EVENT_FATAL_ERROR
+ /**< Event type that triggers the registered instance notification callback
+ * function when an error has been detected that requires the device
+ * to be reset.
+ * This event will be sent by all instances using the device, both on the
+ * host and guests.
+ */
} CpaInstanceEvent;
#ifdef __cplusplus