drivers: import drivers

Imported from:
https://github.com/coolsnowwolf/lede
https://github.com/mdsdtech/packages-driver-rm520ngl

Signed-off-by: Tianling Shen <cnsztl@immortalwrt.org>
This commit is contained in:
Tianling Shen
2024-01-27 20:30:51 +08:00
parent d1e046da3c
commit 16dff3464e
125 changed files with 67237 additions and 0 deletions

View File

@@ -0,0 +1,22 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=fibocom-qmi-wwan
PKG_VERSION:=1.0
PKG_RELEASE:=1
include $(INCLUDE_DIR)/kernel.mk
include $(INCLUDE_DIR)/package.mk
define KernelPackage/usb-net-qmi-wwan-fibocom
SUBMENU:=USB Support
TITLE:=QMI WWAN driver for Fibocom modules
DEPENDS:=+kmod-usb-net +kmod-usb-wdm
FILES:=$(PKG_BUILD_DIR)/qmi_wwan_f.ko
AUTOLOAD:=$(call AutoLoad,82,qmi_wwan_f)
endef
define Build/Compile
+$(KERNEL_MAKE) M="$(PKG_BUILD_DIR)" modules
endef
$(eval $(call KernelPackage,usb-net-qmi-wwan-fibocom))

View File

@@ -0,0 +1,38 @@
obj-m += qmi_wwan_f.o
PWD := $(shell pwd)
OUTPUTDIR=/lib/modules/`uname -r`/kernel/drivers/net/usb/
ifeq ($(ARCH),)
ARCH := $(shell uname -m)
endif
ifeq ($(CROSS_COMPILE),)
CROSS_COMPILE :=
endif
ifeq ($(KDIR),)
KDIR := /lib/modules/$(shell uname -r)/build
ifeq ($(ARCH),i686)
ifeq ($(wildcard $KDIR/arch/$ARCH),)
ARCH=i386
endif
endif
endif
ifneq ($(findstring &,${PWD}),)
$(warning "${PWD}")
$(warning "current directory contain special char '&' !")
$(error "please remove it!")
endif
default:
$(MAKE) ARCH=${ARCH} CROSS_COMPILE=${CROSS_COMPILE} -C $(KDIR) M=$(PWD) modules
install: default
cp $(PWD)/qmi_wwan_f.ko /lib/modules/$(shell uname -r)/kernel/drivers/net/usb/
depmod
modprobe -r qmi_wwan_f
modprobe -r qmi_wwan
modprobe qmi_wwan_f
clean:
rm -rf *~ .tmp_versions modules.order Module.symvers
find . -type f -name "*~" -o -name "*.o" -o -name "*.ko" -o -name "*.cmd" -o -name "*.mod.c" | xargs rm -rf

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,22 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=quectel-gobinet
PKG_VERSION:=1.6.3
PKG_RELEASE:=1
include $(INCLUDE_DIR)/kernel.mk
include $(INCLUDE_DIR)/package.mk
define KernelPackage/usb-net-gobinet
SUBMENU:=USB Support
TITLE:=Gobinet driver for Quectel modules
DEPENDS:=+kmod-usb-net
FILES:=$(PKG_BUILD_DIR)/GobiNet.ko
AUTOLOAD:=$(call AutoLoad,81,GobiNet)
endef
define Build/Compile
+$(KERNEL_MAKE) M="$(PKG_BUILD_DIR)" modules
endef
$(eval $(call KernelPackage,usb-net-gobinet))

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,43 @@
obj-m := GobiNet.o
GobiNet-objs := GobiUSBNet.o QMIDevice.o QMI.o
PWD := $(shell pwd)
OUTPUTDIR=/lib/modules/`uname -r`/kernel/drivers/net/usb/
ifeq ($(ARCH),)
ARCH := $(shell uname -m)
endif
ifeq ($(CROSS_COMPILE),)
CROSS_COMPILE :=
endif
ifeq ($(KDIR),)
KDIR := /lib/modules/$(shell uname -r)/build
ifeq ($(ARCH),i686)
ifeq ($(wildcard $KDIR/arch/$ARCH),)
ARCH=i386
endif
endif
endif
$(shell rm -rf usbnet.h)
ifneq ($(wildcard $(KDIR)/drivers/usb/net/usbnet.h),)
$(shell ln -s $(KDIR)/drivers/usb/net/usbnet.h usbnet.h)
endif
ifneq ($(wildcard $(KDIR)/drivers/net/usb/usbnet.h),)
$(shell ln -s $(KDIR)/drivers/net/usb/usbnet.h usbnet.h)
endif
default:
ln -sf makefile Makefile
$(MAKE) ARCH=${ARCH} CROSS_COMPILE=${CROSS_COMPILE} -C $(KDIR) M=$(PWD) modules
install: default
mkdir -p $(OUTPUTDIR)
cp -f GobiNet.ko $(OUTPUTDIR)
depmod
modprobe -r GobiNet
modprobe GobiNet
clean:
rm -rf Makefile usbnet.h
rm -rf *.o *~ core .depend .*.cmd *.ko *.mod.c .tmp_versions Module.* modules.order

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,337 @@
/*===========================================================================
FILE:
QMI.h
DESCRIPTION:
Qualcomm QMI driver header
FUNCTIONS:
Generic QMUX functions
ParseQMUX
FillQMUX
Generic QMI functions
GetTLV
ValidQMIMessage
GetQMIMessageID
Get sizes of buffers needed by QMI requests
QMUXHeaderSize
QMICTLGetClientIDReqSize
QMICTLReleaseClientIDReqSize
QMICTLReadyReqSize
QMIWDSSetEventReportReqSize
QMIWDSGetPKGSRVCStatusReqSize
QMIDMSGetMEIDReqSize
QMICTLSyncReqSize
Fill Buffers with QMI requests
QMICTLGetClientIDReq
QMICTLReleaseClientIDReq
QMICTLReadyReq
QMIWDSSetEventReportReq
QMIWDSGetPKGSRVCStatusReq
QMIDMSGetMEIDReq
QMICTLSetDataFormatReq
QMICTLSyncReq
Parse data from QMI responses
QMICTLGetClientIDResp
QMICTLReleaseClientIDResp
QMIWDSEventResp
QMIDMSGetMEIDResp
Copyright (c) 2011, Code Aurora Forum. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Code Aurora Forum nor
the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
===========================================================================*/
#pragma once
/*=========================================================================*/
// Definitions
/*=========================================================================*/
extern int quec_debug;
// DBG macro
#define DBG( format, arg... ) do { \
if (quec_debug == 1)\
{ \
printk( KERN_INFO "GobiNet::%s " format, __FUNCTION__, ## arg ); \
} }while(0)
#if 0
#define VDBG( format, arg... ) do { \
if (debug == 1)\
{ \
printk( KERN_INFO "GobiNet::%s " format, __FUNCTION__, ## arg ); \
} } while(0)
#else
#define VDBG( format, arg... ) do { } while(0)
#endif
#define INFO( format, arg... ) do { \
printk( KERN_INFO "GobiNet::%s " format, __FUNCTION__, ## arg ); \
}while(0)
// QMI Service Types
#define QMICTL 0
#define QMIWDS 1
#define QMIDMS 2
#define QMINAS 3
#define QMIUIM 11
#define QMIWDA 0x1A
#define u8 unsigned char
#define u16 unsigned short
#define u32 unsigned int
#define u64 unsigned long long
#define bool u8
#define true 1
#define false 0
#define ENOMEM 12
#define EFAULT 14
#define EINVAL 22
#ifndef ENOMSG
#define ENOMSG 42
#endif
#define ENODATA 61
#define TLV_TYPE_LINK_PROTO 0x10
/*=========================================================================*/
// Struct sQMUX
//
// Structure that defines a QMUX header
/*=========================================================================*/
typedef struct sQMUX
{
/* T\F, always 1 */
u8 mTF;
/* Size of message */
u16 mLength;
/* Control flag */
u8 mCtrlFlag;
/* Service Type */
u8 mQMIService;
/* Client ID */
u8 mQMIClientID;
}__attribute__((__packed__)) sQMUX;
#if 0
/*=========================================================================*/
// Generic QMUX functions
/*=========================================================================*/
// Remove QMUX headers from a buffer
int ParseQMUX(
u16 * pClientID,
void * pBuffer,
u16 buffSize );
// Fill buffer with QMUX headers
int FillQMUX(
u16 clientID,
void * pBuffer,
u16 buffSize );
/*=========================================================================*/
// Generic QMI functions
/*=========================================================================*/
// Get data buffer of a specified TLV from a QMI message
int GetTLV(
void * pQMIMessage,
u16 messageLen,
u8 type,
void * pOutDataBuf,
u16 bufferLen );
// Check mandatory TLV in a QMI message
int ValidQMIMessage(
void * pQMIMessage,
u16 messageLen );
// Get the message ID of a QMI message
int GetQMIMessageID(
void * pQMIMessage,
u16 messageLen );
/*=========================================================================*/
// Get sizes of buffers needed by QMI requests
/*=========================================================================*/
// Get size of buffer needed for QMUX
u16 QMUXHeaderSize( void );
// Get size of buffer needed for QMUX + QMICTLGetClientIDReq
u16 QMICTLGetClientIDReqSize( void );
// Get size of buffer needed for QMUX + QMICTLReleaseClientIDReq
u16 QMICTLReleaseClientIDReqSize( void );
// Get size of buffer needed for QMUX + QMICTLReadyReq
u16 QMICTLReadyReqSize( void );
// Get size of buffer needed for QMUX + QMIWDSSetEventReportReq
u16 QMIWDSSetEventReportReqSize( void );
// Get size of buffer needed for QMUX + QMIWDSGetPKGSRVCStatusReq
u16 QMIWDSGetPKGSRVCStatusReqSize( void );
u16 QMIWDSSetQMUXBindMuxDataPortSize( void );
// Get size of buffer needed for QMUX + QMIDMSGetMEIDReq
u16 QMIDMSGetMEIDReqSize( void );
// Get size of buffer needed for QMUX + QMIWDASetDataFormatReq
u16 QMIWDASetDataFormatReqSize( int qmap_mode );
// Get size of buffer needed for QMUX + QMICTLSyncReq
u16 QMICTLSyncReqSize( void );
/*=========================================================================*/
// Fill Buffers with QMI requests
/*=========================================================================*/
// Fill buffer with QMI CTL Get Client ID Request
int QMICTLGetClientIDReq(
void * pBuffer,
u16 buffSize,
u8 transactionID,
u8 serviceType );
// Fill buffer with QMI CTL Release Client ID Request
int QMICTLReleaseClientIDReq(
void * pBuffer,
u16 buffSize,
u8 transactionID,
u16 clientID );
// Fill buffer with QMI CTL Get Version Info Request
int QMICTLReadyReq(
void * pBuffer,
u16 buffSize,
u8 transactionID );
// Fill buffer with QMI WDS Set Event Report Request
int QMIWDSSetEventReportReq(
void * pBuffer,
u16 buffSize,
u16 transactionID );
// Fill buffer with QMI WDS Get PKG SRVC Status Request
int QMIWDSGetPKGSRVCStatusReq(
void * pBuffer,
u16 buffSize,
u16 transactionID );
u16 QMIWDSSetQMUXBindMuxDataPortReq(
void * pBuffer,
u16 buffSize,
u8 MuxId,
u16 transactionID );
// Fill buffer with QMI DMS Get Serial Numbers Request
int QMIDMSGetMEIDReq(
void * pBuffer,
u16 buffSize,
u16 transactionID );
// Fill buffer with QMI WDA Set Data Format Request
int QMIWDASetDataFormatReq(
void * pBuffer,
u16 buffSize,
bool bRawIPMode, int qmap_mode, u32 rx_size,
u16 transactionID );
#if 0
int QMIWDASetDataQmapReq(
void * pBuffer,
u16 buffSize,
u16 transactionID );
#endif
int QMICTLSyncReq(
void * pBuffer,
u16 buffSize,
u16 transactionID );
/*=========================================================================*/
// Parse data from QMI responses
/*=========================================================================*/
// Parse the QMI CTL Get Client ID Resp
int QMICTLGetClientIDResp(
void * pBuffer,
u16 buffSize,
u16 * pClientID );
// Verify the QMI CTL Release Client ID Resp is valid
int QMICTLReleaseClientIDResp(
void * pBuffer,
u16 buffSize );
// Parse the QMI WDS Set Event Report Resp/Indication or
// QMI WDS Get PKG SRVC Status Resp/Indication
int QMIWDSEventResp(
void * pBuffer,
u16 buffSize,
u32 * pTXOk,
u32 * pRXOk,
u32 * pTXErr,
u32 * pRXErr,
u32 * pTXOfl,
u32 * pRXOfl,
u64 * pTXBytesOk,
u64 * pRXBytesOk,
bool * pbLinkState,
bool * pbReconfigure );
// Parse the QMI DMS Get Serial Numbers Resp
int QMIDMSGetMEIDResp(
void * pBuffer,
u16 buffSize,
char * pMEID,
int meidSize );
// Parse the QMI DMS Get Serial Numbers Resp
int QMIWDASetDataFormatResp(
void * pBuffer,
u16 buffSize, bool bRawIPMode, int *qmap_enabled, int *rx_size, int *tx_size);
// Pasre the QMI CTL Sync Response
int QMICTLSyncResp(
void *pBuffer,
u16 buffSize );
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,368 @@
/*===========================================================================
FILE:
QMIDevice.h
DESCRIPTION:
Functions related to the QMI interface device
FUNCTIONS:
Generic functions
IsDeviceValid
PrintHex
GobiSetDownReason
GobiClearDownReason
GobiTestDownReason
Driver level asynchronous read functions
ResubmitIntURB
ReadCallback
IntCallback
StartRead
KillRead
Internal read/write functions
ReadAsync
UpSem
ReadSync
WriteSyncCallback
WriteSync
Internal memory management functions
GetClientID
ReleaseClientID
FindClientMem
AddToReadMemList
PopFromReadMemList
AddToNotifyList
NotifyAndPopNotifyList
AddToURBList
PopFromURBList
Internal userspace wrapper functions
UserspaceunlockedIOCTL
Userspace wrappers
UserspaceOpen
UserspaceIOCTL
UserspaceClose
UserspaceRead
UserspaceWrite
UserspacePoll
Initializer and destructor
RegisterQMIDevice
DeregisterQMIDevice
Driver level client management
QMIReady
QMIWDSCallback
SetupQMIWDSCallback
QMIDMSGetMEID
Copyright (c) 2011, Code Aurora Forum. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Code Aurora Forum nor
the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
===========================================================================*/
//---------------------------------------------------------------------------
// Pragmas
//---------------------------------------------------------------------------
#pragma once
//---------------------------------------------------------------------------
// Include Files
//---------------------------------------------------------------------------
#include "Structs.h"
#include "QMI.h"
/*=========================================================================*/
// Generic functions
/*=========================================================================*/
#ifdef __QUECTEL_INTER__
// Basic test to see if device memory is valid
static bool IsDeviceValid( sGobiUSBNet * pDev );
/*=========================================================================*/
// Driver level asynchronous read functions
/*=========================================================================*/
// Resubmit interrupt URB, re-using same values
static int ResubmitIntURB( struct urb * pIntURB );
// Read callback
// Put the data in storage and notify anyone waiting for data
#if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,18 ))
static void ReadCallback( struct urb * pReadURB );
#else
static void ReadCallback(struct urb *pReadURB, struct pt_regs *regs);
#endif
// Inturrupt callback
// Data is available, start a read URB
#if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,18 ))
static void IntCallback( struct urb * pIntURB );
#else
static void IntCallback(struct urb *pIntURB, struct pt_regs *regs);
#endif
/*=========================================================================*/
// Internal read/write functions
/*=========================================================================*/
// Start asynchronous read
// Reading client's data store, not device
static int ReadAsync(
sGobiUSBNet * pDev,
u16 clientID,
u16 transactionID,
void (*pCallback)(sGobiUSBNet *, u16, void *),
void * pData );
// Notification function for synchronous read
static void UpSem(
sGobiUSBNet * pDev,
u16 clientID,
void * pData );
// Start synchronous read
// Reading client's data store, not device
static int ReadSync(
sGobiUSBNet * pDev,
void ** ppOutBuffer,
u16 clientID,
u16 transactionID );
// Write callback
#if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,18 ))
static void WriteSyncCallback( struct urb * pWriteURB );
#else
static void WriteSyncCallback(struct urb *pWriteURB, struct pt_regs *regs);
#endif
// Start synchronous write
static int WriteSync(
sGobiUSBNet * pDev,
char * pInWriteBuffer,
int size,
u16 clientID );
/*=========================================================================*/
// Internal memory management functions
/*=========================================================================*/
// Create client and allocate memory
static int GetClientID(
sGobiUSBNet * pDev,
u8 serviceType );
// Release client and free memory
static void ReleaseClientID(
sGobiUSBNet * pDev,
u16 clientID );
// Find this client's memory
static sClientMemList * FindClientMem(
sGobiUSBNet * pDev,
u16 clientID );
// Add Data to this client's ReadMem list
static bool AddToReadMemList(
sGobiUSBNet * pDev,
u16 clientID,
u16 transactionID,
void * pData,
u16 dataSize );
// Remove data from this client's ReadMem list if it matches
// the specified transaction ID.
static bool PopFromReadMemList(
sGobiUSBNet * pDev,
u16 clientID,
u16 transactionID,
void ** ppData,
u16 * pDataSize );
// Add Notify entry to this client's notify List
static bool AddToNotifyList(
sGobiUSBNet * pDev,
u16 clientID,
u16 transactionID,
void (* pNotifyFunct)(sGobiUSBNet *, u16, void *),
void * pData );
// Remove first Notify entry from this client's notify list
// and Run function
static bool NotifyAndPopNotifyList(
sGobiUSBNet * pDev,
u16 clientID,
u16 transactionID );
// Add URB to this client's URB list
static bool AddToURBList(
sGobiUSBNet * pDev,
u16 clientID,
struct urb * pURB );
// Remove URB from this client's URB list
static struct urb * PopFromURBList(
sGobiUSBNet * pDev,
u16 clientID );
/*=========================================================================*/
// Internal userspace wrappers
/*=========================================================================*/
// Userspace unlocked ioctl
static long UserspaceunlockedIOCTL(
struct file * pFilp,
unsigned int cmd,
unsigned long arg );
/*=========================================================================*/
// Userspace wrappers
/*=========================================================================*/
// Userspace open
static int UserspaceOpen(
struct inode * pInode,
struct file * pFilp );
#if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,36 ))
// Userspace ioctl
static int UserspaceIOCTL(
struct inode * pUnusedInode,
struct file * pFilp,
unsigned int cmd,
unsigned long arg );
#endif
// Userspace close
#define quectel_no_for_each_process
#ifdef quectel_no_for_each_process
static int UserspaceClose(
struct inode * pInode,
struct file * pFilp );
#else
#if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,14 ))
static int UserspaceClose(
struct file * pFilp,
fl_owner_t unusedFileTable );
#else
static int UserspaceClose( struct file * pFilp );
#endif
#endif
// Userspace read (synchronous)
static ssize_t UserspaceRead(
struct file * pFilp,
char __user * pBuf,
size_t size,
loff_t * pUnusedFpos );
// Userspace write (synchronous)
static ssize_t UserspaceWrite(
struct file * pFilp,
const char __user * pBuf,
size_t size,
loff_t * pUnusedFpos );
static unsigned int UserspacePoll(
struct file * pFilp,
struct poll_table_struct * pPollTable );
/*=========================================================================*/
// Driver level client management
/*=========================================================================*/
// Check if QMI is ready for use
static bool QMIReady(
sGobiUSBNet * pDev,
u16 timeout );
// QMI WDS callback function
static void QMIWDSCallback(
sGobiUSBNet * pDev,
u16 clientID,
void * pData );
// Fire off reqests and start async read for QMI WDS callback
static int SetupQMIWDSCallback( sGobiUSBNet * pDev );
// Register client, send req and parse MEID response, release client
static int QMIDMSGetMEID( sGobiUSBNet * pDev );
// Register client, send req and parse Data format response, release client
static int QMIWDASetDataFormat( sGobiUSBNet * pDev, int qmap_mode, int *rx_urb_size );
#endif
// Print Hex data, for debug purposes
void QuecPrintHex(
void * pBuffer,
u16 bufSize );
// Sets mDownReason and turns carrier off
void QuecGobiSetDownReason(
sGobiUSBNet * pDev,
u8 reason );
// Clear mDownReason and may turn carrier on
void QuecGobiClearDownReason(
sGobiUSBNet * pDev,
u8 reason );
// Tests mDownReason and returns whether reason is set
bool QuecGobiTestDownReason(
sGobiUSBNet * pDev,
u8 reason );
// Start continuous read "thread"
int QuecStartRead( sGobiUSBNet * pDev );
// Kill continuous read "thread"
void QuecKillRead( sGobiUSBNet * pDev );
/*=========================================================================*/
// Initializer and destructor
/*=========================================================================*/
// QMI Device initialization function
int QuecRegisterQMIDevice( sGobiUSBNet * pDev );
// QMI Device cleanup function
void QuecDeregisterQMIDevice( sGobiUSBNet * pDev );
int QuecQMIWDASetDataFormat( sGobiUSBNet * pDev, int qmap_mode, int *rx_urb_size );
#define PrintHex QuecPrintHex
#define GobiSetDownReason QuecGobiSetDownReason
#define GobiClearDownReason QuecGobiClearDownReason
#define GobiTestDownReason QuecGobiTestDownReason
#define StartRead QuecStartRead
#define KillRead QuecKillRead
#define RegisterQMIDevice QuecRegisterQMIDevice
#define DeregisterQMIDevice QuecDeregisterQMIDevice

View File

@@ -0,0 +1,78 @@
Gobi3000 network driver 2011-07-29-1026
This readme covers important information concerning
the Gobi Net driver.
Table of Contents
1. What's new in this release
2. Known issues
3. Known platform issues
-------------------------------------------------------------------------------
1. WHAT'S NEW
This Release (Gobi3000 network driver 2011-07-29-1026)
a. Signal the device to leave low power mode on enumeration
b. Add "txQueueLength" parameter, which will set the Tx Queue Length
c. Send SetControlLineState message during driver/device removal
d. Change to new date-based versioning scheme
Prior Release (Gobi3000 network driver 1.0.60) 06/29/2011
a. Add UserspacePoll() function, to support select()
b. Fix possible deadlock on GobiUSBNetTXTimeout()
c. Fix memory leak on data transmission
Prior Release (Gobi3000 network driver 1.0.50) 05/18/2011
a. Add support for kernels up to 2.6.38
b. Add support for dynamic interface binding
Prior Release (Gobi3000 network driver 1.0.40) 02/28/2011
a. In cases of QMI read errors, discard the error and continue reading.
b. Add "interruptible" parameter, which may be disabled for debugging purposes.
Prior Release (Gobi3000 network driver 1.0.30) 01/05/2011
a. Fix rare kernel PANIC if a process terminates while file handle close
or device removal is in progress.
Prior Release (Gobi3000 network driver 1.0.20) 11/01/2010
a. Fix possible kernel WARNING if device removed before QCWWANDisconnect().
b. Fix multiple memory leaks in error cases.
Prior Release (Gobi3000 network driver 1.0.10) 09/17/2010
a. Initial release
-------------------------------------------------------------------------------
2. KNOWN ISSUES
No known issues.
-------------------------------------------------------------------------------
3. KNOWN PLATFORM ISSUES
a. Enabling autosuspend:
Autosuspend is supported by the Gobi3000 module and its drivers,
but by default it is not enabled by the open source kernel. As such,
the Gobi3000 module will not enter autosuspend unless the
user specifically turns on autosuspend with the command:
echo auto > /sys/bus/usb/devices/.../power/level
b. Ksoftirq using 100% CPU:
There is a known issue with the open source usbnet driver that can
result in infinite software interrupts. The fix for this is to test
(in the usbnet_bh() function) if the usb_device can submit URBs before
attempting to submit the response URB buffers.
c. NetworkManager does not recognize connection after resume:
After resuming from sleep/hibernate, NetworkManager may not recognize new
network connections by the Gobi device. This is a system issue not specific
to the Gobi device, which may result in dhcp not being run and the default
route not being updated. One way to fix this is to simply restart the
NetworkManager service.
-------------------------------------------------------------------------------

View File

@@ -0,0 +1,166 @@
Release Notes
[V1.6.3]
Date: 9/26/2021
enhancement:
1. change version to 1.6.3
fix:
[V1.6.2.16]
Date: 9/17/2021
enhancement:
fix:
1. add sdx6x platform support
[V1.6.2.15]
Date: 3/23/2021
enhancement:
fix:
1. add sdx12 platform support
[V1.6.2.14]
Date: 3/18/2021
enhancement:
fix:
1. fix kasam: use-after-free when do modem reboot stress test
2. wait qmi_sync_thread() finish in DeregisterQMIDevice(), usb will disconnect when driver is still in qmi_sync_thread()
[V1.6.2.13]
Date: 12/31/2020
enhancement:
fix:
1. fix quectel-CM open error when driver is still in qmi_sync_thread() but SOC enter sleep.
[V1.6.2.12]
Date: 12/31/2020
enhancement:
fix:
1. for multi-pdn-call, can not ping when usb resume for usb suspend state.
[V1.6.2.11]
Date: 11/7/2020
enhancement:
1. support QUECTEL_QMI_MERGE, for some SOC, control endpoint only support read max 64 bytes QMI.
for QMI that size > 64, we need read serval times, and merge.
fix:
[V1.6.2.10]
Date: 9/15/2020
enhancement:
fix:
1. for X55, fix panic on kernel V2.6 ~ V3.2
[V1.6.2.9]
Date: 7/24/2020
enhancement:
fix:
1. for X55, fix errors on Big Endian SOC.
[V1.6.2.8]
Date: 7/2/2020
enhancement:
1. support QMAPV5, UL AGG (porting from qmi_wwan_q)
fix:
1. fix errors kernel V2.6 .
[V1.6.2.7]
Date: 6/9/2020
enhancement:
fix:
1. when send qmi ctl request, clear qmi ctl response which's TID is same
[V1.6.2.6]
Date: 5/19/2020
enhancement:
1. support bridge mode for multi-pdn-call
fix:
[V1.6.2.5]
Date: 4/26/2020
enhancement:
1. fix netcard name as usbX (from ethX)
fix:
......
[Quectel_WCDMA&LTE_Linux&Android_GobiNet_Driver_V1.5.0]
Date: 2018/04/17
enhancement::
1. support EG20&RG500
2. fix set rx_urb_size as 1520. do not change accroding to MTU
[Quectel_WCDMA&LTE_Linux&Android_GobiNet_Driver_V1.4.3]
Date: 2018/04/16
enhancement::
1. increase QMAP's rx_urb_size to 32KB
[Quectel_WCDMA&LTE_Linux&Android_GobiNet_Driver_V1.4.2]
Date: 2018/04/03
bug fix:
1. fix qmi client can not be released when quectel-CM killed by ¡®kill -9¡¯
[Quectel_WCDMA&LTE_Linux&Android_GobiNet_Driver_V1.4.1]
Date: 2018/02/20
bug fix:
1. fix a compiler error on Kernel lager than 4.11
[Quectel_WCDMA&LTE_Linux&Android_GobiNet_Driver_V1.4.0]
Date: 2018/12/17
bug fix:
1. fix a USB DMA error when built as GobiNet.ko on Kernel lager than 4.15
[Quectel_WCDMA&LTE_Linux&Android_GobiNet_Driver_V1.3.8]
[Quectel_WCDMA&LTE_Linux&Android_GobiNet_Driver_V1.3.7]
Date: 2018/09/25
enhancement:
1. check skb length in tx_fixup functions.
2. when QMAP enabled, set FLAG_RX_ASSEMBLE to advoid 'RX errors' of ifconfig
[Quectel_WCDMA&LTE_Linux&Android_GobiNet_Driver_V1.3.6]
Date: 2018/09/11
enhancement:
1. support EG12 EM12
2. optimization QMAP source code
3. fix compile errors and warnnings on kernel version 4.15
[Quectel_WCDMA&LTE_Linux&Android_GobiNet_Driver_V1.3.5]
Date: 2018/05/12
enhancement:
1. provide two method to enable QMAP function.
1.1 set module parameters 'qmap_mode' to X(1~4) to enable QMAP.
1.2 ifconfig usb0 down, then 'echo X > /sys/class/usbX/qmap_mode' to enable QMAP
for above two method, X(1) used to enable 'IP Aggregation' and X(2~4) to enable 'IP Mux'
2. support bridge mode, also provide two method to enable bridge mode.
2.1 set module parameters 'bridge_mode' to 1 to enable bridge mode.
2.2 'echo 1 > /sys/class/usbX/bridge_mode' to enable bridge mode.
bridge mode setups:
brctl addbr br0; brctl addif br0 eth0; brctl addif usb0; ./quectel-CM; ifconfig br0 up; ifconfig eth0 up
then connect eth0 to PC by ethernet cable. and PC run DHCP tool to obtain network public IP address.
'WCDMA&LTE_QConnectManager_Linux&Android_V1.1.40' and later version is required to use QMAP and bridge mode.
[Quectel_WCDMA&LTE_Linux&Android_GobiNet_Driver_V1.3.4]
Date: 2018/05/07
enhancement:
1. support use 'AT$QCRMCALL=1,1' to setup data call.
when use 'AT$QCRMCALL=1,1', must set module parameters 'qcrmcall_mode' to 1,
and GobiNet Driver will do not tx&rx QMI.
[Quectel_WCDMA&LTE_Linux&Android_GobiNet_Driver_V1.3.3]
Date: 2018/04/04
optimization:
1. optimization QMAP source code
[Quectel_WCDMA&LTE_Linux&Android_GobiNet_Driver_V1.3.2]
Date: 2018/03/23
enhancement:
1. support Qualcomm Mux and Aggregation Protocol (QMAP)
1.1 IP Mux: GobiNet Driver register multiple netcards, one netcards corresponding to one PDP.
and GobiNet Driver will tx/rx multiple IP packets maybe belong to different PDPs in one URB.
1.2 IP Aggregation: GobiNet Driver will rx multiple IP packets in one URB, used to increase throughput theoretically by reducing the number of usb interrupts.
the max rx URB size of MDM9x07 is 4KB, the max rx URB size of MDM9x40&SDX20 is 16KB
[Quectel_WCDMA&LTE_Linux&Android_GobiNet_Driver_V1.3.1]
Date: 2017/11/20
enhancement:
1. support BG96

View File

@@ -0,0 +1,529 @@
/*===========================================================================
FILE:
Structs.h
DESCRIPTION:
Declaration of structures used by the Qualcomm Linux USB Network driver
FUNCTIONS:
none
Copyright (c) 2011, Code Aurora Forum. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Code Aurora Forum nor
the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
===========================================================================*/
//---------------------------------------------------------------------------
// Pragmas
//---------------------------------------------------------------------------
#pragma once
//---------------------------------------------------------------------------
// Include Files
//---------------------------------------------------------------------------
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/mii.h>
#include <linux/usb.h>
#include <linux/version.h>
#include <linux/cdev.h>
#include <linux/kthread.h>
#include <linux/poll.h>
#include <linux/completion.h>
#include <linux/hrtimer.h>
#include <linux/interrupt.h>
#define QUECTEL_WWAN_QMAP 4 //MAX is 7
#ifdef QUECTEL_WWAN_QMAP
#define QUECTEL_QMAP_MUX_ID 0x81
#endif
//#define QUECTEL_QMI_MERGE
#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
#define QUECTEL_BRIDGE_MODE
#endif
#if (LINUX_VERSION_CODE <= KERNEL_VERSION( 2,6,21 ))
static inline void skb_reset_mac_header(struct sk_buff *skb)
{
skb->mac.raw = skb->data;
}
#endif
#if (LINUX_VERSION_CODE <= KERNEL_VERSION( 2,6,22 ))
#define bool u8
#ifndef URB_FREE_BUFFER
#define URB_FREE_BUFFER_BY_SELF //usb_free_urb will not free, should free by self
#define URB_FREE_BUFFER 0x0100 /* Free transfer buffer with the URB */
#endif
/**
* usb_endpoint_type - get the endpoint's transfer type
* @epd: endpoint to be checked
*
* Returns one of USB_ENDPOINT_XFER_{CONTROL, ISOC, BULK, INT} according
* to @epd's transfer type.
*/
static inline int usb_endpoint_type(const struct usb_endpoint_descriptor *epd)
{
return epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
}
#endif
#if (LINUX_VERSION_CODE <= KERNEL_VERSION( 2,6,18 ))
/**
* usb_endpoint_dir_in - check if the endpoint has IN direction
* @epd: endpoint to be checked
*
* Returns true if the endpoint is of type IN, otherwise it returns false.
*/
static inline int usb_endpoint_dir_in(const struct usb_endpoint_descriptor *epd)
{
return ((epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN);
}
/**
* usb_endpoint_dir_out - check if the endpoint has OUT direction
* @epd: endpoint to be checked
*
* Returns true if the endpoint is of type OUT, otherwise it returns false.
*/
static inline int usb_endpoint_dir_out(
const struct usb_endpoint_descriptor *epd)
{
return ((epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_OUT);
}
/**
* usb_endpoint_xfer_int - check if the endpoint has interrupt transfer type
* @epd: endpoint to be checked
*
* Returns true if the endpoint is of type interrupt, otherwise it returns
* false.
*/
static inline int usb_endpoint_xfer_int(
const struct usb_endpoint_descriptor *epd)
{
return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
USB_ENDPOINT_XFER_INT);
}
static inline int usb_autopm_set_interface(struct usb_interface *intf)
{ return 0; }
static inline int usb_autopm_get_interface(struct usb_interface *intf)
{ return 0; }
static inline int usb_autopm_get_interface_async(struct usb_interface *intf)
{ return 0; }
static inline void usb_autopm_put_interface(struct usb_interface *intf)
{ }
static inline void usb_autopm_put_interface_async(struct usb_interface *intf)
{ }
static inline void usb_autopm_enable(struct usb_interface *intf)
{ }
static inline void usb_autopm_disable(struct usb_interface *intf)
{ }
static inline void usb_mark_last_busy(struct usb_device *udev)
{ }
#endif
#if (LINUX_VERSION_CODE <= KERNEL_VERSION( 2,6,24 ))
#include "usbnet.h"
#else
#include <linux/usb/usbnet.h>
#endif
#if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,25 ))
#include <linux/fdtable.h>
#else
#include <linux/file.h>
#endif
// Used in recursion, defined later below
struct sGobiUSBNet;
#if defined(QUECTEL_WWAN_QMAP)
#define QUECTEL_UL_DATA_AGG 1
#if defined(QUECTEL_UL_DATA_AGG)
struct ul_agg_ctx {
/* QMIWDS_ADMIN_SET_DATA_FORMAT_RESP TLV_0x17 and TLV_0x18 */
uint ul_data_aggregation_max_datagrams; //UplinkDataAggregationMaxDatagramsTlv
uint ul_data_aggregation_max_size; //UplinkDataAggregationMaxSizeTlv
uint dl_minimum_padding;
};
#endif
#endif
/*=========================================================================*/
// Struct sReadMemList
//
// Structure that defines an entry in a Read Memory linked list
/*=========================================================================*/
typedef struct sReadMemList
{
/* Data buffer */
void * mpData;
/* Transaction ID */
u16 mTransactionID;
/* Size of data buffer */
u16 mDataSize;
/* Next entry in linked list */
struct sReadMemList * mpNext;
} sReadMemList;
/*=========================================================================*/
// Struct sNotifyList
//
// Structure that defines an entry in a Notification linked list
/*=========================================================================*/
typedef struct sNotifyList
{
/* Function to be run when data becomes available */
void (* mpNotifyFunct)(struct sGobiUSBNet *, u16, void *);
/* Transaction ID */
u16 mTransactionID;
/* Data to provide as parameter to mpNotifyFunct */
void * mpData;
/* Next entry in linked list */
struct sNotifyList * mpNext;
} sNotifyList;
/*=========================================================================*/
// Struct sURBList
//
// Structure that defines an entry in a URB linked list
/*=========================================================================*/
typedef struct sURBList
{
/* The current URB */
struct urb * mpURB;
/* Next entry in linked list */
struct sURBList * mpNext;
} sURBList;
/*=========================================================================*/
// Struct sClientMemList
//
// Structure that defines an entry in a Client Memory linked list
// Stores data specific to a Service Type and Client ID
/*=========================================================================*/
typedef struct sClientMemList
{
/* Client ID for this Client */
u16 mClientID;
/* Linked list of Read entries */
/* Stores data read from device before sending to client */
sReadMemList * mpList;
/* Linked list of Notification entries */
/* Stores notification functions to be run as data becomes
available or the device is removed */
sNotifyList * mpReadNotifyList;
/* Linked list of URB entries */
/* Stores pointers to outstanding URBs which need canceled
when the client is deregistered or the device is removed */
sURBList * mpURBList;
/* Next entry in linked list */
struct sClientMemList * mpNext;
/* Wait queue object for poll() */
wait_queue_head_t mWaitQueue;
} sClientMemList;
/*=========================================================================*/
// Struct sURBSetupPacket
//
// Structure that defines a USB Setup packet for Control URBs
// Taken from USB CDC specifications
/*=========================================================================*/
typedef struct sURBSetupPacket
{
/* Request type */
u8 mRequestType;
/* Request code */
u8 mRequestCode;
/* Value */
u16 mValue;
/* Index */
u16 mIndex;
/* Length of Control URB */
u16 mLength;
} sURBSetupPacket;
// Common value for sURBSetupPacket.mLength
#define DEFAULT_READ_URB_LENGTH 0x1000
#ifdef QUECTEL_QMI_MERGE
#define MERGE_PACKET_IDENTITY 0x2c7c
#define MERGE_PACKET_VERSION 0x0001
#define MERGE_PACKET_MAX_PAYLOAD_SIZE 56
typedef struct sQMIMsgHeader {
u16 idenity;
u16 version;
u16 cur_len;
u16 total_len;
} sQMIMsgHeader;
typedef struct sQMIMsgPacket {
sQMIMsgHeader header;
u16 len;
char buf[DEFAULT_READ_URB_LENGTH];
} sQMIMsgPacket;
#endif
#ifdef CONFIG_PM
#if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,29 ))
/*=========================================================================*/
// Struct sAutoPM
//
// Structure used to manage AutoPM thread which determines whether the
// device is in use or may enter autosuspend. Also submits net
// transmissions asynchronously.
/*=========================================================================*/
typedef struct sAutoPM
{
/* Thread for atomic autopm function */
struct task_struct * mpThread;
/* Signal for completion when it's time for the thread to work */
struct completion mThreadDoWork;
/* Time to exit? */
bool mbExit;
/* List of URB's queued to be sent to the device */
sURBList * mpURBList;
/* URB list lock (for adding and removing elements) */
spinlock_t mURBListLock;
/* Length of the URB list */
atomic_t mURBListLen;
/* Active URB */
struct urb * mpActiveURB;
/* Active URB lock (for adding and removing elements) */
spinlock_t mActiveURBLock;
/* Duplicate pointer to USB device interface */
struct usb_interface * mpIntf;
} sAutoPM;
#endif
#endif /* CONFIG_PM */
/*=========================================================================*/
// Struct sQMIDev
//
// Structure that defines the data for the QMI device
/*=========================================================================*/
typedef struct sQMIDev
{
/* Device number */
dev_t mDevNum;
/* Device class */
struct class * mpDevClass;
/* cdev struct */
struct cdev mCdev;
/* is mCdev initialized? */
bool mbCdevIsInitialized;
/* Pointer to read URB */
struct urb * mpReadURB;
//#define READ_QMI_URB_ERROR
#ifdef READ_QMI_URB_ERROR
struct timer_list mReadUrbTimer;
#endif
#ifdef QUECTEL_QMI_MERGE
sQMIMsgPacket * mpQmiMsgPacket;
#endif
/* Read setup packet */
sURBSetupPacket * mpReadSetupPacket;
/* Read buffer attached to current read URB */
void * mpReadBuffer;
/* Inturrupt URB */
/* Used to asynchronously notify when read data is available */
struct urb * mpIntURB;
/* Buffer used by Inturrupt URB */
void * mpIntBuffer;
/* Pointer to memory linked list for all clients */
sClientMemList * mpClientMemList;
/* Spinlock for client Memory entries */
spinlock_t mClientMemLock;
/* Transaction ID associated with QMICTL "client" */
atomic_t mQMICTLTransactionID;
} sQMIDev;
typedef struct {
u32 qmap_enabled;
u32 dl_data_aggregation_max_datagrams;
u32 dl_data_aggregation_max_size ;
u32 ul_data_aggregation_max_datagrams;
u32 ul_data_aggregation_max_size;
u32 dl_minimum_padding;
} QMAP_SETTING;
/*=========================================================================*/
// Struct sGobiUSBNet
//
// Structure that defines the data associated with the Qualcomm USB device
/*=========================================================================*/
typedef struct sGobiUSBNet
{
atomic_t refcount;
/* Net device structure */
struct usbnet * mpNetDev;
#ifdef QUECTEL_WWAN_QMAP
unsigned link_state;
int qmap_mode;
int qmap_size;
int qmap_version;
struct net_device *mpQmapNetDev[QUECTEL_WWAN_QMAP];
struct tasklet_struct txq;
QMAP_SETTING qmap_settings;
#if defined(QUECTEL_UL_DATA_AGG)
struct ul_agg_ctx agg_ctx;
#endif
#ifdef QUECTEL_BRIDGE_MODE
int m_qmap_bridge_mode[QUECTEL_WWAN_QMAP];
#endif
#endif
#if 1 //def DATA_MODE_RP
bool mbMdm9x07;
bool mbMdm9x06; //for BG96
/* QMI "device" work in IP Mode or ETH Mode */
bool mbRawIPMode;
#ifdef QUECTEL_BRIDGE_MODE
int m_bridge_mode;
uint m_bridge_ipv4;
unsigned char mHostMAC[6];
#endif
int m_qcrmcall_mode;
#endif
struct completion mQMIReadyCompletion;
bool mbQMIReady;
bool mbProbeDone;
bool mbQMISyncIng;
/* Usb device interface */
struct usb_interface * mpIntf;
/* Pointers to usbnet_open and usbnet_stop functions */
int (* mpUSBNetOpen)(struct net_device *);
int (* mpUSBNetStop)(struct net_device *);
/* Reason(s) why interface is down */
/* Used by Gobi*DownReason */
unsigned long mDownReason;
#define NO_NDIS_CONNECTION 0
#define CDC_CONNECTION_SPEED 1
#define DRIVER_SUSPENDED 2
#define NET_IFACE_STOPPED 3
/* QMI "device" status */
bool mbQMIValid;
bool mbDeregisterQMIDevice;
/* QMI "device" memory */
sQMIDev mQMIDev;
/* Device MEID */
char mMEID[14];
struct hrtimer timer;
struct tasklet_struct bh;
unsigned long
pending_num : 8,
pending_size : 16;
struct sk_buff *pending_pool[16];
#ifdef CONFIG_PM
#if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,29 ))
/* AutoPM thread */
sAutoPM mAutoPM;
#endif
#endif /* CONFIG_PM */
} sGobiUSBNet;
/*=========================================================================*/
// Struct sQMIFilpStorage
//
// Structure that defines the storage each file handle contains
// Relates the file handle to a client
/*=========================================================================*/
typedef struct sQMIFilpStorage
{
/* Client ID */
u16 mClientID;
/* Device pointer */
sGobiUSBNet * mpDev;
} sQMIFilpStorage;

View File

@@ -0,0 +1,26 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=quectel-mhi-pcie
PKG_VERSION:=1.3.6
PKG_RELEASE:=1
include $(INCLUDE_DIR)/kernel.mk
include $(INCLUDE_DIR)/package.mk
define KernelPackage/mhi-pcie
SUBMENU:=Network Devices
TITLE:=Kernel PCIe driver for MHI device
DEPENDS:=@PCI_SUPPORT
FILES:=$(PKG_BUILD_DIR)/pcie_mhi.ko
AUTOLOAD:=$(call AutoLoad,90,pcie_mhi)
endef
define KernelPackage/mhi-pcie/description
Kernel module for register a custom pciemhi platform device.
endef
define Build/Compile
+$(KERNEL_MAKE) M="$(PKG_BUILD_DIR)" modules
endef
$(eval $(call KernelPackage,mhi-pcie))

View File

@@ -0,0 +1,34 @@
#ccflags-y += -g
obj-m += pcie_mhi.o
pcie_mhi-objs := core/mhi_init.o core/mhi_main.o core/mhi_pm.o core/mhi_boot.o core/mhi_dtr.o controllers/mhi_qti.o
pcie_mhi-objs += devices/mhi_uci.o
ifeq (1,1)
pcie_mhi-objs += devices/mhi_netdev_quectel.o
else
pcie_mhi-objs += devices/mhi_netdev.o
pcie_mhi-objs += devices/rmnet_handler.o
endif
PWD := $(shell pwd)
ifeq ($(ARCH),)
ARCH := $(shell uname -m)
endif
ifeq ($(CROSS_COMPILE),)
CROSS_COMPILE :=
endif
ifeq ($(KDIR),)
KDIR := /lib/modules/$(shell uname -r)/build
endif
pcie_mhi: clean
$(MAKE) ARCH=${ARCH} CROSS_COMPILE=${CROSS_COMPILE} -C $(KDIR) M=$(PWD) modules
#cp pcie_mhi.ko /tftpboot/
clean:
$(MAKE) ARCH=${ARCH} CROSS_COMPILE=${CROSS_COMPILE} -C $(KDIR) M=$(PWD) clean
find . -name *.o.ur-safe | xargs rm -f
install: pcie_mhi
sudo cp pcie_mhi.ko /lib/modules/${shell uname -r}/kernel/drivers/pci/
sudo depmod

View File

@@ -0,0 +1,36 @@
1. porting pcie_mhi driver as next
$ git diff drivers/Makefile
diff --git a/drivers/Makefile b/drivers/Makefile
index 77fbc52..e45837e 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -184,3 +184,4 @@ obj-$(CONFIG_FPGA) += fpga/
obj-$(CONFIG_FSI) += fsi/
obj-$(CONFIG_TEE) += tee/
obj-$(CONFIG_MULTIPLEXER) += mux/
+obj-y += pcie_mhi/
$ tree drivers/pcie_mhi/ -L 1
drivers/pcie_mhi/
controllers
core
devices
Makefile
2. check RG500 attach pcie_mhi driver successful
root@OpenWrt:/# lspci
00:00.0 Class 0604: 17cb:0302
01:00.0 Class ff00: 17cb:0306
root@OpenWrt:~# dmesg | grep mhi
[ 138.483252] mhi_init Quectel_Linux_PCIE_MHI_Driver_V1.3.0.6
[ 138.492350] mhi_pci_probe pci_dev->name = 0000:01:00.0, domain=0, bus=1, slot=0, vendor=17CB, device=0306
3. how to use, see next logs
log/QXDM_OVER_PCIE.txt
log/AT_OVER_PCIE.txt
log/MBIM_OVER_PCIE.txt
log/QMI_OVER_PCIE.txt

View File

@@ -0,0 +1,119 @@
Release Notes
[V1.3.6]
Date: 01/08/2023
enhancement:
1. support Linux Kernel V6.4
2. support change mtu
fix:
1. fix compile error on ipq's spf12.x
Release Notes
[V1.3.5]
Date: 25/02/2023
enhancement:
1. support efuse SDX sleep
2. support IPQ9574 SFE
fix:
1. fix cannot find the node when dialing. Nodes in the /sys/bus/mhi_q/devices directory named hex
[V1.3.4]
Date: 12/8/2022
enhancement:
1. only allow to enable autosuspend when module is in MHI_EE_AMSS
2. show pcie link speed and width when driver probe
3. check pcie link status by read pcie vid and pid when driver probe,
if pcie link is down, return -EIO
4. support RM520 (1eac:1004)
5. support qmap command packet
fix:
1. fix tx queue is wrong stop when do uplink TPUT
2. fix after QFirehose, module fail to bootup at very small probability
3. mhi uci add mutex lock for concurrent reads/writes
[V1.3.3]
Date: 30/6/2022
enhancement:
1. remove one un-necessary kmalloc when do qfirehose
2. support mhi monitor (like usbmon), usage: cat /sys/kernel/debug/mhi_q/0306_00\:01.00/mhimon
3. set ring size of event 0 to 256 (from 1024), required by x6x
4. support PCIE local network card mhi_swip0 (chan 46/47), default disabled
5. porting IPQ5018 mhi rate controll code from spf11.5
6. set pcie rmnet download max qmap packet size to 15KB (same to IPQ MHI Driver)
7. support set different mac address for rmnet net card
8. when mhi netdev fail to malloc, use delay_work instead work
9. optimize code for 'when driver load, modem is still in MHI_EE_PTHRU'
fix:
1. Fix not synchronize access rp/wp when mhi_queue_xxx and mhi_process_xxx_ring run on different CPU
2. set dma mask when driver probe, some SOC like rpi_4 need it
[V1.3.2]
Date: 12/16/2021
enhancement:
1. support Linux Kernel V5.14
2. mhi_netdev_quectel.c do not print log in softirq context
[V1.3.1]
Date: 9/26/2021
enhancement:
fix:
[V1.3.0.19]
Date: 9/18/2021
enhancement:
1. support sdx62 (17cb:0308)
2. support IPQ5018's NSS
3. use 'qsdk/qca/src/data-kernel/drivers/rmnet-nss/rmnet_nss.c' instead myself rmnet_nss.c
and pcie_mhi.ko must load after then rmnet_nss.ko
4. allow bhi irq is not 0 (for ipq5018)
fix:
[V1.3.0.18]
Date: 4/14/2021
enhancement:
1. support mbim multiple call, usage:
# insmod pcie_mhi.ko mhi_mbim_enabeld=1 qmap_mode=4
# quectel-mbim-proxy -d /dev/mhi_MBIM &
# quectel-CM -n X
fix:
[V1.3.0.17]
Date: 3/11/2021
enhancement:
fix:
1. fix CPU loading very high when TPUT test when only one MSI interrupt
2. fix error on latest X24 modem
[V1.3.0.16]
Date: 11/18/2020
enhancement:
fix:
1. add ring size to 32, for in-bound chan, if one ring is full, modem will not generate MSI interrupt for all chan
[V1.3.0.15]
Date: 10/30/2020
enhancement:
1. support multi-modems, named as /dev/mhi_<chan_name>X
fix:
1. fix compile error on kernel v5.8
[V1.3.0.14]
Date: 10/9/2020
enhancement:
1. suppport EM120&EM160
fix:
1. fix compile error on kernel v5.6
2. support runtime suspend
[V1.3.0.13]
Date: 9/7/2020
enhancement:
1. suppport EM120&EM160
fix:
1. fix error on X55 + PCIE2.0(e.g IPQ4019)
2. support runtime suspend
[V1.3.0.12]
Date: 7/7/2020
enhancement:
1. suppport create only none netcard (enabled by marco MHI_NETDEV_ONE_CARD_MODE),
fix:

View File

@@ -0,0 +1,13 @@
menu "MHI controllers"
config MHI_QTI
tristate "MHI QTI"
depends on MHI_BUS
help
If you say yes to this option, MHI bus support for QTI modem chipsets
will be enabled. QTI PCIe based modems uses MHI as the communication
protocol. MHI control driver is the bus master for such modems. As the
bus master driver, it oversees power management operations such as
suspend, resume, powering on and off the device.
endmenu

View File

@@ -0,0 +1 @@
obj-$(CONFIG_MHI_QTI) += mhi_qti.o mhi_arch_qti.o

View File

@@ -0,0 +1,275 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.*/
#include <linux/async.h>
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/dma-direction.h>
#include <linux/list.h>
#include <linux/of.h>
#include <linux/memblock.h>
#include <linux/module.h>
#include <linux/msm-bus.h>
#include <linux/msm_pcie.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include "../core/mhi.h"
#include "mhi_qti.h"
struct arch_info {
struct mhi_dev *mhi_dev;
struct msm_bus_scale_pdata *msm_bus_pdata;
u32 bus_client;
struct pci_saved_state *pcie_state;
struct pci_saved_state *ref_pcie_state;
struct dma_iommu_mapping *mapping;
};
struct mhi_bl_info {
struct mhi_device *mhi_device;
async_cookie_t cookie;
void *ipc_log;
};
/* ipc log markings */
#define DLOG "Dev->Host: "
#define HLOG "Host: "
#ifdef CONFIG_MHI_DEBUG
#define MHI_IPC_LOG_PAGES (100)
enum MHI_DEBUG_LEVEL mhi_ipc_log_lvl = MHI_MSG_LVL_VERBOSE;
#else
#define MHI_IPC_LOG_PAGES (10)
enum MHI_DEBUG_LEVEL mhi_ipc_log_lvl = MHI_MSG_LVL_ERROR;
#endif
static int mhi_arch_set_bus_request(struct mhi_controller *mhi_cntrl, int index)
{
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
struct arch_info *arch_info = mhi_dev->arch_info;
MHI_LOG("Setting bus request to index %d\n", index);
if (arch_info->bus_client)
return msm_bus_scale_client_update_request(
arch_info->bus_client,
index);
/* default return success */
return 0;
}
static void mhi_bl_dl_cb(struct mhi_device *mhi_dev,
struct mhi_result *mhi_result)
{
struct mhi_bl_info *mhi_bl_info = mhi_device_get_devdata(mhi_dev);
char *buf = mhi_result->buf_addr;
/* force a null at last character */
buf[mhi_result->bytes_xferd - 1] = 0;
ipc_log_string(mhi_bl_info->ipc_log, "%s %s", DLOG, buf);
}
static void mhi_bl_dummy_cb(struct mhi_device *mhi_dev,
struct mhi_result *mhi_result)
{
}
static void mhi_bl_remove(struct mhi_device *mhi_dev)
{
struct mhi_bl_info *mhi_bl_info = mhi_device_get_devdata(mhi_dev);
ipc_log_string(mhi_bl_info->ipc_log, HLOG "Received Remove notif.\n");
/* wait for boot monitor to exit */
async_synchronize_cookie(mhi_bl_info->cookie + 1);
}
static void mhi_bl_boot_monitor(void *data, async_cookie_t cookie)
{
struct mhi_bl_info *mhi_bl_info = data;
struct mhi_device *mhi_device = mhi_bl_info->mhi_device;
struct mhi_controller *mhi_cntrl = mhi_device->mhi_cntrl;
/* 15 sec timeout for booting device */
const u32 timeout = msecs_to_jiffies(15000);
/* wait for device to enter boot stage */
wait_event_timeout(mhi_cntrl->state_event, mhi_cntrl->ee == MHI_EE_AMSS
|| mhi_cntrl->ee == MHI_EE_DISABLE_TRANSITION,
timeout);
if (mhi_cntrl->ee == MHI_EE_AMSS) {
ipc_log_string(mhi_bl_info->ipc_log, HLOG
"Device successfully booted to mission mode\n");
mhi_unprepare_from_transfer(mhi_device);
} else {
ipc_log_string(mhi_bl_info->ipc_log, HLOG
"Device failed to boot to mission mode, ee = %s\n",
TO_MHI_EXEC_STR(mhi_cntrl->ee));
}
}
static int mhi_bl_probe(struct mhi_device *mhi_dev,
const struct mhi_device_id *id)
{
char node_name[32];
struct mhi_bl_info *mhi_bl_info;
mhi_bl_info = devm_kzalloc(&mhi_dev->dev, sizeof(*mhi_bl_info),
GFP_KERNEL);
if (!mhi_bl_info)
return -ENOMEM;
snprintf(node_name, sizeof(node_name), "mhi_bl_%04x_%02u.%02u.%02u",
mhi_dev->dev_id, mhi_dev->domain, mhi_dev->bus, mhi_dev->slot);
mhi_bl_info->ipc_log = ipc_log_context_create(MHI_IPC_LOG_PAGES,
node_name, 0);
if (!mhi_bl_info->ipc_log)
return -EINVAL;
mhi_bl_info->mhi_device = mhi_dev;
mhi_device_set_devdata(mhi_dev, mhi_bl_info);
ipc_log_string(mhi_bl_info->ipc_log, HLOG
"Entered SBL, Session ID:0x%x\n",
mhi_dev->mhi_cntrl->session_id);
/* start a thread to monitor entering mission mode */
mhi_bl_info->cookie = async_schedule(mhi_bl_boot_monitor, mhi_bl_info);
return 0;
}
static const struct mhi_device_id mhi_bl_match_table[] = {
{ .chan = "BL" },
{},
};
static struct mhi_driver mhi_bl_driver = {
.id_table = mhi_bl_match_table,
.remove = mhi_bl_remove,
.probe = mhi_bl_probe,
.ul_xfer_cb = mhi_bl_dummy_cb,
.dl_xfer_cb = mhi_bl_dl_cb,
.driver = {
.name = "MHI_BL",
.owner = THIS_MODULE,
},
};
int mhi_arch_pcie_init(struct mhi_controller *mhi_cntrl)
{
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
struct arch_info *arch_info = mhi_dev->arch_info;
char node[32];
if (!arch_info) {
arch_info = devm_kzalloc(&mhi_dev->pci_dev->dev,
sizeof(*arch_info), GFP_KERNEL);
if (!arch_info)
return -ENOMEM;
mhi_dev->arch_info = arch_info;
snprintf(node, sizeof(node), "mhi_%04x_%02u.%02u.%02u",
mhi_cntrl->dev_id, mhi_cntrl->domain, mhi_cntrl->bus,
mhi_cntrl->slot);
mhi_cntrl->log_buf = ipc_log_context_create(MHI_IPC_LOG_PAGES,
node, 0);
mhi_cntrl->log_lvl = mhi_ipc_log_lvl;
/* save reference state for pcie config space */
arch_info->ref_pcie_state = pci_store_saved_state(
mhi_dev->pci_dev);
mhi_driver_register(&mhi_bl_driver);
}
return mhi_arch_set_bus_request(mhi_cntrl, 1);
}
void mhi_arch_pcie_deinit(struct mhi_controller *mhi_cntrl)
{
mhi_arch_set_bus_request(mhi_cntrl, 0);
}
int mhi_arch_link_off(struct mhi_controller *mhi_cntrl, bool graceful)
{
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
struct arch_info *arch_info = mhi_dev->arch_info;
struct pci_dev *pci_dev = mhi_dev->pci_dev;
int ret;
MHI_LOG("Entered\n");
if (graceful) {
pci_clear_master(pci_dev);
ret = pci_save_state(mhi_dev->pci_dev);
if (ret) {
MHI_ERR("Failed with pci_save_state, ret:%d\n", ret);
return ret;
}
arch_info->pcie_state = pci_store_saved_state(pci_dev);
pci_disable_device(pci_dev);
}
/*
* We will always attempt to put link into D3hot, however
* link down may have happened due to error fatal, so
* ignoring the return code
*/
pci_set_power_state(pci_dev, PCI_D3hot);
/* release the resources */
mhi_arch_set_bus_request(mhi_cntrl, 0);
MHI_LOG("Exited\n");
return 0;
}
int mhi_arch_link_on(struct mhi_controller *mhi_cntrl)
{
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
struct arch_info *arch_info = mhi_dev->arch_info;
struct pci_dev *pci_dev = mhi_dev->pci_dev;
int ret;
MHI_LOG("Entered\n");
/* request resources and establish link trainning */
ret = mhi_arch_set_bus_request(mhi_cntrl, 1);
if (ret)
MHI_LOG("Could not set bus frequency, ret:%d\n", ret);
ret = pci_set_power_state(pci_dev, PCI_D0);
if (ret) {
MHI_ERR("Failed to set PCI_D0 state, ret:%d\n", ret);
return ret;
}
ret = pci_enable_device(pci_dev);
if (ret) {
MHI_ERR("Failed to enable device, ret:%d\n", ret);
return ret;
}
ret = pci_load_and_free_saved_state(pci_dev, &arch_info->pcie_state);
if (ret)
MHI_LOG("Failed to load saved cfg state\n");
pci_restore_state(pci_dev);
pci_set_master(pci_dev);
MHI_LOG("Exited\n");
return 0;
}

View File

@@ -0,0 +1,715 @@
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/dma-direction.h>
#include <linux/list.h>
#include <linux/of.h>
#include <linux/memblock.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/interrupt.h>
#include <linux/version.h>
#include "../core/mhi.h"
#include "mhi_qcom.h"
#if 1
#ifndef PCI_IRQ_MSI
#define PCI_IRQ_MSI (1 << 1) /* Allow MSI interrupts */
#if (LINUX_VERSION_CODE < KERNEL_VERSION( 3,10,53 ))
int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec)
{
int nvec = maxvec;
int rc;
if (maxvec < minvec)
return -ERANGE;
do {
rc = pci_enable_msi_block(dev, nvec);
if (rc < 0) {
return rc;
} else if (rc > 0) {
if (rc < minvec)
return -ENOSPC;
nvec = rc;
}
} while (rc);
return nvec;
}
#endif
static int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
unsigned int max_vecs, unsigned int flags)
{
return pci_enable_msi_range(dev, min_vecs, max_vecs);
}
static void pci_free_irq_vectors(struct pci_dev *dev)
{
pci_disable_msi(dev);
}
static int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
{
return dev->irq + nr;
}
#endif
#endif
static struct pci_device_id mhi_pcie_device_id[] = {
{PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0303)}, //SDX20
{PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0304)}, //SDX24
{PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0305)},
{PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0306)}, //SDX55
{PCI_DEVICE(0x2C7C, 0x0512)},
{PCI_DEVICE(MHI_PCIE_VENDOR_ID, MHI_PCIE_DEBUG_ID)},
{0},
};
MODULE_DEVICE_TABLE(pci, mhi_pcie_device_id);
static struct pci_driver mhi_pcie_driver;
void mhi_deinit_pci_dev(struct mhi_controller *mhi_cntrl)
{
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
struct pci_dev *pci_dev = mhi_dev->pci_dev;
pci_free_irq_vectors(pci_dev);
iounmap(mhi_cntrl->regs);
mhi_cntrl->regs = NULL;
pci_clear_master(pci_dev);
pci_release_region(pci_dev, mhi_dev->resn);
pci_disable_device(pci_dev);
}
static int mhi_init_pci_dev(struct mhi_controller *mhi_cntrl)
{
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
struct pci_dev *pci_dev = mhi_dev->pci_dev;
int ret;
resource_size_t start, len;
int i;
mhi_dev->resn = MHI_PCI_BAR_NUM;
ret = pci_assign_resource(pci_dev, mhi_dev->resn);
if (ret) {
MHI_ERR("Error assign pci resources, ret:%d\n", ret);
return ret;
}
ret = pci_enable_device(pci_dev);
if (ret) {
MHI_ERR("Error enabling device, ret:%d\n", ret);
goto error_enable_device;
}
ret = pci_request_region(pci_dev, mhi_dev->resn, "mhi");
if (ret) {
MHI_ERR("Error pci_request_region, ret:%d\n", ret);
goto error_request_region;
}
pci_set_master(pci_dev);
start = pci_resource_start(pci_dev, mhi_dev->resn);
len = pci_resource_len(pci_dev, mhi_dev->resn);
mhi_cntrl->regs = ioremap_nocache(start, len);
MHI_LOG("mhi_cntrl->regs = %p\n", mhi_cntrl->regs);
if (!mhi_cntrl->regs) {
MHI_ERR("Error ioremap region\n");
goto error_ioremap;
}
ret = pci_alloc_irq_vectors(pci_dev, 1, mhi_cntrl->msi_required, PCI_IRQ_MSI);
if (IS_ERR_VALUE((ulong)ret) || ret < mhi_cntrl->msi_required) {
if (ret == -ENOSPC) {
/* imx_3.14.52_1.1.0_ga
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index f06e8f0..6a9614f 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -376,6 +376,13 @@ static int dw_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev,
if (msgvec > 5)
msgvec = 0;
+#if 1 //Add by Quectel 20190419
+ if (msgvec > 0 && pdev->vendor == 0x17cb) {
+ dev_info(&pdev->dev, "%s quectel fixup pos=%d, msg_ctr=%04x, msgvec=%d\n", __func__, desc->msi_attrib.pos, msg_ctr, msgvec);
+ msgvec = 0;
+ }
+#endif
+
irq = assign_irq((1 << msgvec), desc, &pos);
if (irq < 0)
return irq;
*/
}
//imx_4.1.15_2.0.0_ga & DELL_OPTIPLEX_7010 only alloc one msi interrupt for one pcie device
if (ret != 1) {
MHI_ERR("Failed to enable MSI, ret=%d, msi_required=%d\n", ret, mhi_cntrl->msi_required);
goto error_req_msi;
}
}
mhi_cntrl->msi_allocated = ret;
MHI_LOG("msi_required = %d, msi_allocated = %d, msi_irq = %u\n", mhi_cntrl->msi_required, mhi_cntrl->msi_allocated, pci_dev->irq);
for (i = 0; i < mhi_cntrl->msi_allocated; i++) {
mhi_cntrl->irq[i] = pci_irq_vector(pci_dev, i);
if (mhi_cntrl->irq[i] < 0) {
ret = mhi_cntrl->irq[i];
goto error_get_irq_vec;
}
}
#if 0
/* configure runtime pm */
pm_runtime_set_autosuspend_delay(&pci_dev->dev, MHI_RPM_SUSPEND_TMR_MS);
pm_runtime_dont_use_autosuspend(&pci_dev->dev);
pm_suspend_ignore_children(&pci_dev->dev, true);
/*
* pci framework will increment usage count (twice) before
* calling local device driver probe function.
* 1st pci.c pci_pm_init() calls pm_runtime_forbid
* 2nd pci-driver.c local_pci_probe calls pm_runtime_get_sync
* Framework expect pci device driver to call
* pm_runtime_put_noidle to decrement usage count after
* successful probe and and call pm_runtime_allow to enable
* runtime suspend.
*/
pm_runtime_mark_last_busy(&pci_dev->dev);
pm_runtime_put_noidle(&pci_dev->dev);
#endif
return 0;
error_get_irq_vec:
pci_free_irq_vectors(pci_dev);
error_req_msi:
iounmap(mhi_cntrl->regs);
error_ioremap:
pci_clear_master(pci_dev);
error_request_region:
pci_disable_device(pci_dev);
error_enable_device:
pci_release_region(pci_dev, mhi_dev->resn);
return ret;
}
#ifdef CONFIG_PM
static int mhi_runtime_idle(struct device *dev)
{
struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev);
MHI_LOG("Entered returning -EBUSY\n");
/*
* RPM framework during runtime resume always calls
* rpm_idle to see if device ready to suspend.
* If dev.power usage_count count is 0, rpm fw will call
* rpm_idle cb to see if device is ready to suspend.
* if cb return 0, or cb not defined the framework will
* assume device driver is ready to suspend;
* therefore, fw will schedule runtime suspend.
* In MHI power management, MHI host shall go to
* runtime suspend only after entering MHI State M2, even if
* usage count is 0. Return -EBUSY to disable automatic suspend.
*/
return -EBUSY;
}
static int mhi_runtime_suspend(struct device *dev)
{
int ret = 0;
struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev);
MHI_LOG("Enter\n");
mutex_lock(&mhi_cntrl->pm_mutex);
ret = mhi_pm_suspend(mhi_cntrl);
if (ret) {
MHI_LOG("Abort due to ret:%d\n", ret);
goto exit_runtime_suspend;
}
ret = mhi_arch_link_off(mhi_cntrl, true);
if (ret)
MHI_ERR("Failed to Turn off link ret:%d\n", ret);
exit_runtime_suspend:
mutex_unlock(&mhi_cntrl->pm_mutex);
MHI_LOG("Exited with ret:%d\n", ret);
return ret;
}
static int mhi_runtime_resume(struct device *dev)
{
int ret = 0;
struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev);
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
MHI_LOG("Enter\n");
mutex_lock(&mhi_cntrl->pm_mutex);
if (!mhi_dev->powered_on) {
MHI_LOG("Not fully powered, return success\n");
mutex_unlock(&mhi_cntrl->pm_mutex);
return 0;
}
/* turn on link */
ret = mhi_arch_link_on(mhi_cntrl);
if (ret)
goto rpm_resume_exit;
/* enter M0 state */
ret = mhi_pm_resume(mhi_cntrl);
rpm_resume_exit:
mutex_unlock(&mhi_cntrl->pm_mutex);
MHI_LOG("Exited with :%d\n", ret);
return ret;
}
static int mhi_system_resume(struct device *dev)
{
int ret = 0;
struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev);
ret = mhi_runtime_resume(dev);
if (ret) {
MHI_ERR("Failed to resume link\n");
} else {
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
}
return ret;
}
int mhi_system_suspend(struct device *dev)
{
struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev);
MHI_LOG("Entered\n");
/* if rpm status still active then force suspend */
if (!pm_runtime_status_suspended(dev))
return mhi_runtime_suspend(dev);
pm_runtime_set_suspended(dev);
pm_runtime_disable(dev);
MHI_LOG("Exit\n");
return 0;
}
#endif
/* checks if link is down */
static int mhi_link_status(struct mhi_controller *mhi_cntrl, void *priv)
{
struct mhi_dev *mhi_dev = priv;
u16 dev_id;
int ret;
/* try reading device id, if dev id don't match, link is down */
ret = pci_read_config_word(mhi_dev->pci_dev, PCI_DEVICE_ID, &dev_id);
return (ret || dev_id != mhi_cntrl->dev_id) ? -EIO : 0;
}
static int mhi_runtime_get(struct mhi_controller *mhi_cntrl, void *priv)
{
struct mhi_dev *mhi_dev = priv;
struct device *dev = &mhi_dev->pci_dev->dev;
return pm_runtime_get(dev);
}
static void mhi_runtime_put(struct mhi_controller *mhi_cntrl, void *priv)
{
struct mhi_dev *mhi_dev = priv;
struct device *dev = &mhi_dev->pci_dev->dev;
pm_runtime_put_noidle(dev);
}
static void mhi_status_cb(struct mhi_controller *mhi_cntrl,
void *priv,
enum MHI_CB reason)
{
struct mhi_dev *mhi_dev = priv;
struct device *dev = &mhi_dev->pci_dev->dev;
if (reason == MHI_CB_IDLE) {
MHI_LOG("Schedule runtime suspend 1\n");
pm_runtime_mark_last_busy(dev);
pm_request_autosuspend(dev);
}
}
int mhi_debugfs_trigger_m0(void *data, u64 val)
{
struct mhi_controller *mhi_cntrl = data;
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
MHI_LOG("Trigger M3 Exit\n");
pm_runtime_get(&mhi_dev->pci_dev->dev);
pm_runtime_put(&mhi_dev->pci_dev->dev);
return 0;
}
int mhi_debugfs_trigger_m3(void *data, u64 val)
{
struct mhi_controller *mhi_cntrl = data;
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
MHI_LOG("Trigger M3 Entry\n");
pm_runtime_mark_last_busy(&mhi_dev->pci_dev->dev);
pm_request_autosuspend(&mhi_dev->pci_dev->dev);
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(debugfs_trigger_m0_fops, NULL,
mhi_debugfs_trigger_m0, "%llu\n");
DEFINE_SIMPLE_ATTRIBUTE(debugfs_trigger_m3_fops, NULL,
mhi_debugfs_trigger_m3, "%llu\n");
static int mhi_init_debugfs_trigger_go(void *data, u64 val)
{
struct mhi_controller *mhi_cntrl = data;
MHI_LOG("Trigger power up sequence\n");
mhi_async_power_up(mhi_cntrl);
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(mhi_init_debugfs_trigger_go_fops, NULL,
mhi_init_debugfs_trigger_go, "%llu\n");
int mhi_init_debugfs_debug_show(struct seq_file *m, void *d)
{
seq_puts(m, "Enable debug mode to debug external soc\n");
seq_puts(m,
"Usage: echo 'devid,timeout,domain,smmu_cfg' > debug_mode\n");
seq_puts(m, "No spaces between parameters\n");
seq_puts(m, "\t1. devid : 0 or pci device id to register\n");
seq_puts(m, "\t2. timeout: mhi cmd/state transition timeout\n");
seq_puts(m, "\t3. domain: Rootcomplex\n");
seq_puts(m, "\t4. smmu_cfg: smmu configuration mask:\n");
seq_puts(m, "\t\t- BIT0: ATTACH\n");
seq_puts(m, "\t\t- BIT1: S1 BYPASS\n");
seq_puts(m, "\t\t-BIT2: FAST_MAP\n");
seq_puts(m, "\t\t-BIT3: ATOMIC\n");
seq_puts(m, "\t\t-BIT4: FORCE_COHERENT\n");
seq_puts(m, "\t\t-BIT5: GEOMETRY\n");
seq_puts(m, "\tAll timeout are in ms, enter 0 to keep default\n");
seq_puts(m, "Examples inputs: '0x307,10000'\n");
seq_puts(m, "\techo '0,10000,1'\n");
seq_puts(m, "\techo '0x307,10000,0,0x3d'\n");
seq_puts(m, "firmware image name will be changed to debug.mbn\n");
return 0;
}
static int mhi_init_debugfs_debug_open(struct inode *node, struct file *file)
{
return single_open(file, mhi_init_debugfs_debug_show, NULL);
}
static ssize_t mhi_init_debugfs_debug_write(struct file *fp,
const char __user *ubuf,
size_t count,
loff_t *pos)
{
char *buf = kmalloc(count + 1, GFP_KERNEL);
/* #,devid,timeout,domain,smmu-cfg */
int args[5] = {0};
static char const *dbg_fw = "debug.mbn";
int ret;
struct mhi_controller *mhi_cntrl = fp->f_inode->i_private;
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
struct pci_device_id *id;
if (!buf)
return -ENOMEM;
ret = copy_from_user(buf, ubuf, count);
if (ret)
goto error_read;
buf[count] = 0;
get_options(buf, ARRAY_SIZE(args), args);
kfree(buf);
/* override default parameters */
mhi_cntrl->fw_image = dbg_fw;
mhi_cntrl->edl_image = dbg_fw;
if (args[0] >= 2 && args[2])
mhi_cntrl->timeout_ms = args[2];
if (args[0] >= 3 && args[3])
mhi_cntrl->domain = args[3];
if (args[0] >= 4 && args[4])
mhi_dev->smmu_cfg = args[4];
/* If it's a new device id register it */
if (args[0] && args[1]) {
/* find the debug_id and overwrite it */
for (id = mhi_pcie_device_id; id->vendor; id++)
if (id->device == MHI_PCIE_DEBUG_ID) {
id->device = args[1];
pci_unregister_driver(&mhi_pcie_driver);
ret = pci_register_driver(&mhi_pcie_driver);
}
}
mhi_dev->debug_mode = true;
debugfs_create_file("go", 0444, mhi_cntrl->parent, mhi_cntrl,
&mhi_init_debugfs_trigger_go_fops);
pr_info(
"%s: ret:%d pcidev:0x%x smm_cfg:%u timeout:%u\n",
__func__, ret, args[1], mhi_dev->smmu_cfg,
mhi_cntrl->timeout_ms);
return count;
error_read:
kfree(buf);
return ret;
}
static const struct file_operations debugfs_debug_ops = {
.open = mhi_init_debugfs_debug_open,
.release = single_release,
.read = seq_read,
.write = mhi_init_debugfs_debug_write,
};
static struct mhi_controller * mhi_platform_probe(struct pci_dev *pci_dev)
{
struct mhi_controller *mhi_cntrl;
struct mhi_dev *mhi_dev;
u64 addr_win[2];
int ret;
mhi_cntrl = mhi_alloc_controller(sizeof(*mhi_dev));
if (!mhi_cntrl) {
pr_err("mhi_alloc_controller fail\n");
return NULL;
}
mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
mhi_cntrl->dev_id = pci_dev->device;
mhi_cntrl->domain = pci_domain_nr(pci_dev->bus);
mhi_cntrl->bus = pci_dev->bus->number;
mhi_cntrl->slot = PCI_SLOT(pci_dev->devfn);
mhi_dev->smmu_cfg = 0;
#if 0 //def CONFIG_HAVE_MEMBLOCK
addr_win[0] = memblock_start_of_DRAM();
addr_win[1] = memblock_end_of_DRAM();
#else
#define MHI_MEM_BASE_DEFAULT 0x000000000
#define MHI_MEM_SIZE_DEFAULT 0x2000000000
addr_win[0] = MHI_MEM_BASE_DEFAULT;
addr_win[1] = MHI_MEM_SIZE_DEFAULT;
if (sizeof(dma_addr_t) == 4) {
addr_win[1] = 0xFFFFFFFF;
}
#endif
mhi_cntrl->iova_start = addr_win[0];
mhi_cntrl->iova_stop = addr_win[1];
mhi_dev->pci_dev = pci_dev;
mhi_cntrl->pci_dev = pci_dev;
/* setup power management apis */
mhi_cntrl->status_cb = mhi_status_cb;
mhi_cntrl->runtime_get = mhi_runtime_get;
mhi_cntrl->runtime_put = mhi_runtime_put;
mhi_cntrl->link_status = mhi_link_status;
ret = mhi_arch_platform_init(mhi_dev);
if (ret)
goto error_probe;
ret = mhi_register_mhi_controller(mhi_cntrl);
if (ret)
goto error_register;
if (mhi_cntrl->parent)
debugfs_create_file("debug_mode", 0444, mhi_cntrl->parent,
mhi_cntrl, &debugfs_debug_ops);
return mhi_cntrl;
error_register:
mhi_arch_platform_deinit(mhi_dev);
error_probe:
mhi_free_controller(mhi_cntrl);
return NULL;
}
int mhi_pci_probe(struct pci_dev *pci_dev,
const struct pci_device_id *device_id)
{
struct mhi_controller *mhi_cntrl = NULL;
u32 domain = pci_domain_nr(pci_dev->bus);
u32 bus = pci_dev->bus->number;
u32 slot = PCI_SLOT(pci_dev->devfn);
struct mhi_dev *mhi_dev;
int ret;
pr_info("%s pci_dev->name = %s, domain=%d, bus=%d, slot=%d, vendor=%04X, device=%04X\n",
__func__, dev_name(&pci_dev->dev), domain, bus, slot, pci_dev->vendor, pci_dev->device);
mhi_cntrl = mhi_platform_probe(pci_dev);
if (!mhi_cntrl) {
pr_err("mhi_platform_probe fail\n");
return -EPROBE_DEFER;
}
mhi_cntrl->dev_id = pci_dev->device;
mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
mhi_dev->pci_dev = pci_dev;
mhi_dev->powered_on = true;
ret = mhi_arch_pcie_init(mhi_cntrl);
if (ret) {
MHI_ERR("Error mhi_arch_pcie_init, ret:%d\n", ret);
return ret;
}
ret = mhi_arch_iommu_init(mhi_cntrl);
if (ret) {
MHI_ERR("Error mhi_arch_iommu_init, ret:%d\n", ret);
goto error_iommu_init;
}
ret = mhi_init_pci_dev(mhi_cntrl);
if (ret) {
MHI_ERR("Error mhi_init_pci_dev, ret:%d\n", ret);
goto error_init_pci;
}
/* start power up sequence if not in debug mode */
if (!mhi_dev->debug_mode) {
ret = mhi_async_power_up(mhi_cntrl);
if (ret) {
MHI_ERR("Error mhi_async_power_up, ret:%d\n", ret);
goto error_power_up;
}
}
#if 0
pm_runtime_mark_last_busy(&pci_dev->dev);
pm_runtime_allow(&pci_dev->dev);
pm_runtime_disable(&pci_dev->dev);
#endif
if (mhi_cntrl->dentry) {
debugfs_create_file("m0", 0444, mhi_cntrl->dentry, mhi_cntrl,
&debugfs_trigger_m0_fops);
debugfs_create_file("m3", 0444, mhi_cntrl->dentry, mhi_cntrl,
&debugfs_trigger_m3_fops);
}
dev_set_drvdata(&pci_dev->dev, mhi_cntrl);
MHI_LOG("Return successful\n");
return 0;
error_power_up:
mhi_deinit_pci_dev(mhi_cntrl);
error_init_pci:
mhi_arch_iommu_deinit(mhi_cntrl);
error_iommu_init:
mhi_arch_pcie_deinit(mhi_cntrl);
return ret;
}
static void mhi_pci_remove(struct pci_dev *pci_dev)
{
struct mhi_controller *mhi_cntrl = (struct mhi_controller *)dev_get_drvdata(&pci_dev->dev);
if (mhi_cntrl && mhi_cntrl->pci_dev == pci_dev) {
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
MHI_LOG("%s\n", dev_name(&pci_dev->dev));
if (!mhi_dev->debug_mode) {
mhi_power_down(mhi_cntrl, 1);
}
mhi_deinit_pci_dev(mhi_cntrl);
mhi_arch_iommu_deinit(mhi_cntrl);
mhi_arch_pcie_deinit(mhi_cntrl);
mhi_unregister_mhi_controller(mhi_cntrl);
}
}
static const struct dev_pm_ops pm_ops = {
SET_RUNTIME_PM_OPS(mhi_runtime_suspend,
mhi_runtime_resume,
mhi_runtime_idle)
SET_SYSTEM_SLEEP_PM_OPS(mhi_system_suspend, mhi_system_resume)
};
static struct pci_driver mhi_pcie_driver = {
.name = "mhi",
.id_table = mhi_pcie_device_id,
.probe = mhi_pci_probe,
.remove = mhi_pci_remove,
.driver = {
.pm = &pm_ops
}
};
int __init mhi_controller_qcom_init(void)
{
return pci_register_driver(&mhi_pcie_driver);
};
void mhi_controller_qcom_exit(void)
{
pr_info("%s enter\n", __func__);
pci_unregister_driver(&mhi_pcie_driver);
pr_info("%s exit\n", __func__);
}

View File

@@ -0,0 +1,92 @@
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _MHI_QCOM_
#define _MHI_QCOM_
/* iova cfg bitmask */
#define MHI_SMMU_ATTACH BIT(0)
#define MHI_SMMU_S1_BYPASS BIT(1)
#define MHI_SMMU_FAST BIT(2)
#define MHI_SMMU_ATOMIC BIT(3)
#define MHI_SMMU_FORCE_COHERENT BIT(4)
#define MHI_PCIE_VENDOR_ID (0x17cb)
#define MHI_PCIE_DEBUG_ID (0xffff)
#define MHI_RPM_SUSPEND_TMR_MS (3000)
#define MHI_PCI_BAR_NUM (0)
struct mhi_dev {
struct pci_dev *pci_dev;
u32 smmu_cfg;
int resn;
void *arch_info;
bool powered_on;
bool debug_mode;
};
void mhi_deinit_pci_dev(struct mhi_controller *mhi_cntrl);
int mhi_pci_probe(struct pci_dev *pci_dev,
const struct pci_device_id *device_id);
#if (LINUX_VERSION_CODE < KERNEL_VERSION( 3,10,65 ))
static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
{
int rc = dma_set_mask(dev, mask);
if (rc == 0)
dma_set_coherent_mask(dev, mask);
return rc;
}
#endif
static inline int mhi_arch_iommu_init(struct mhi_controller *mhi_cntrl)
{
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
mhi_cntrl->dev = &mhi_dev->pci_dev->dev;
return dma_set_mask_and_coherent(mhi_cntrl->dev, DMA_BIT_MASK(64));
}
static inline void mhi_arch_iommu_deinit(struct mhi_controller *mhi_cntrl)
{
}
static inline int mhi_arch_pcie_init(struct mhi_controller *mhi_cntrl)
{
return 0;
}
static inline void mhi_arch_pcie_deinit(struct mhi_controller *mhi_cntrl)
{
}
static inline int mhi_arch_platform_init(struct mhi_dev *mhi_dev)
{
return 0;
}
static inline void mhi_arch_platform_deinit(struct mhi_dev *mhi_dev)
{
}
static inline int mhi_arch_link_off(struct mhi_controller *mhi_cntrl,
bool graceful)
{
return 0;
}
static inline int mhi_arch_link_on(struct mhi_controller *mhi_cntrl)
{
return 0;
}
#endif /* _MHI_QCOM_ */

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,44 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.*/
#ifndef _MHI_QTI_
#define _MHI_QTI_
/* iova cfg bitmask */
#define MHI_SMMU_ATTACH BIT(0)
#define MHI_SMMU_S1_BYPASS BIT(1)
#define MHI_SMMU_FAST BIT(2)
#define MHI_SMMU_ATOMIC BIT(3)
#define MHI_SMMU_FORCE_COHERENT BIT(4)
#define MHI_PCIE_VENDOR_ID (0x17cb)
#define MHI_PCIE_DEBUG_ID (0xffff)
/* runtime suspend timer */
#define MHI_RPM_SUSPEND_TMR_MS (2000)
#define MHI_PCI_BAR_NUM (0)
struct mhi_dev {
struct pci_dev *pci_dev;
u32 smmu_cfg;
int resn;
void *arch_info;
bool powered_on;
dma_addr_t iova_start;
dma_addr_t iova_stop;
bool lpm_disabled;
};
void mhi_deinit_pci_dev(struct mhi_controller *mhi_cntrl);
int mhi_pci_probe(struct pci_dev *pci_dev,
const struct pci_device_id *device_id);
void mhi_pci_device_removed(struct pci_dev *pci_dev);
int mhi_arch_pcie_init(struct mhi_controller *mhi_cntrl);
void mhi_arch_pcie_deinit(struct mhi_controller *mhi_cntrl);
int mhi_arch_iommu_init(struct mhi_controller *mhi_cntrl);
void mhi_arch_iommu_deinit(struct mhi_controller *mhi_cntrl);
int mhi_arch_link_off(struct mhi_controller *mhi_cntrl, bool graceful);
int mhi_arch_link_on(struct mhi_controller *mhi_cntrl);
#endif /* _MHI_QTI_ */

View File

@@ -0,0 +1 @@
obj-$(CONFIG_MHI_BUS) +=mhi_init.o mhi_main.o mhi_pm.o mhi_boot.o mhi_dtr.o

View File

@@ -0,0 +1,908 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. */
#ifndef _MHI_H_
#define _MHI_H_
#define PCIE_MHI_DRIVER_VERSION "V1.3.6"
#define ENABLE_MHI_MON
//#define ENABLE_IP_SW0
// #define ENABLE_ADPL
// #define ENABLE_QDSS
#include <linux/miscdevice.h>
typedef enum
{
MHI_CLIENT_LOOPBACK_OUT = 0,
MHI_CLIENT_LOOPBACK_IN = 1,
MHI_CLIENT_SAHARA_OUT = 2,
MHI_CLIENT_SAHARA_IN = 3,
MHI_CLIENT_DIAG_OUT = 4,
MHI_CLIENT_DIAG_IN = 5,
MHI_CLIENT_SSR_OUT = 6,
MHI_CLIENT_SSR_IN = 7,
MHI_CLIENT_QDSS_OUT = 8,
MHI_CLIENT_QDSS_IN = 9,
MHI_CLIENT_EFS_OUT = 10,
MHI_CLIENT_EFS_IN = 11,
MHI_CLIENT_MBIM_OUT = 12,
MHI_CLIENT_MBIM_IN = 13,
MHI_CLIENT_QMI_OUT = 14,
MHI_CLIENT_QMI_IN = 15,
MHI_CLIENT_QMI_2_OUT = 16,
MHI_CLIENT_QMI_2_IN = 17,
MHI_CLIENT_IP_CTRL_1_OUT = 18,
MHI_CLIENT_IP_CTRL_1_IN = 19,
MHI_CLIENT_IPCR_OUT = 20,
MHI_CLIENT_IPCR_IN = 21,
MHI_CLIENT_TEST_FW_OUT = 22,
MHI_CLIENT_TEST_FW_IN = 23,
MHI_CLIENT_RESERVED_0 = 24,
MHI_CLIENT_BOOT_LOG_IN = 25,
MHI_CLIENT_DCI_OUT = 26,
MHI_CLIENT_DCI_IN = 27,
MHI_CLIENT_QBI_OUT = 28,
MHI_CLIENT_QBI_IN = 29,
MHI_CLIENT_RESERVED_1_LOWER = 30,
MHI_CLIENT_RESERVED_1_UPPER = 31,
MHI_CLIENT_DUN_OUT = 32,
MHI_CLIENT_DUN_IN = 33,
MHI_CLIENT_EDL_OUT = 34,
MHI_CLIENT_EDL_IN = 35,
MHI_CLIENT_ADB_FB_OUT = 36,
MHI_CLIENT_ADB_FB_IN = 37,
MHI_CLIENT_RESERVED_2_LOWER = 38,
MHI_CLIENT_RESERVED_2_UPPER = 41,
MHI_CLIENT_CSVT_OUT = 42,
MHI_CLIENT_CSVT_IN = 43,
MHI_CLIENT_SMCT_OUT = 44,
MHI_CLIENT_SMCT_IN = 45,
MHI_CLIENT_IP_SW_0_OUT = 46,
MHI_CLIENT_IP_SW_0_IN = 47,
MHI_CLIENT_IP_SW_1_OUT = 48,
MHI_CLIENT_IP_SW_1_IN = 49,
MHI_CLIENT_RESERVED_3_LOWER = 50,
MHI_CLIENT_RESERVED_3_UPPER = 59,
MHI_CLIENT_TEST_0_OUT = 60,
MHI_CLIENT_TEST_0_IN = 61,
MHI_CLIENT_TEST_1_OUT = 62,
MHI_CLIENT_TEST_1_IN = 63,
MHI_CLIENT_TEST_2_OUT = 64,
MHI_CLIENT_TEST_2_IN = 65,
MHI_CLIENT_TEST_3_OUT = 66,
MHI_CLIENT_TEST_3_IN = 67,
MHI_CLIENT_RESERVED_4_LOWER = 68,
MHI_CLIENT_RESERVED_4_UPPER = 91,
MHI_CLIENT_OEM_0_OUT = 92,
MHI_CLIENT_OEM_0_IN = 93,
MHI_CLIENT_OEM_1_OUT = 94,
MHI_CLIENT_OEM_1_IN = 95,
MHI_CLIENT_OEM_2_OUT = 96,
MHI_CLIENT_OEM_2_IN = 97,
MHI_CLIENT_OEM_3_OUT = 98,
MHI_CLIENT_OEM_3_IN = 99,
MHI_CLIENT_IP_HW_0_OUT = 100,
MHI_CLIENT_IP_HW_0_IN = 101,
MHI_CLIENT_ADPL = 102,
MHI_CLIENT_IP_HW_QDSS = 103,
// MHI_CLIENT_RESERVED_5_LOWER = 103,
MHI_CLIENT_RESERVED_5_UPPER = 127,
MHI_MAX_CHANNELS = 128
}MHI_CLIENT_CHANNEL_TYPE;
/* Event Ring Index */
typedef enum
{
SW_EVT_RING = 0,
PRIMARY_EVENT_RING = SW_EVT_RING,
#ifdef ENABLE_IP_SW0
SW_0_OUT_EVT_RING,
SW_0_IN_EVT_RING,
#endif
IPA_OUT_EVENT_RING,
IPA_IN_EVENT_RING,
#ifdef ENABLE_ADPL
ADPL_EVT_RING,
#endif
#ifdef ENABLE_QDSS
QDSS_EVT_RING,
#endif
MAX_EVT_RING_IDX
}MHI_EVT_RING_IDX;
#define MHI_VERSION 0x01000000
#define MHIREGLEN_VALUE 0x100 /* **** WRONG VALUE *** */
#define MHI_MSI_INDEX 1
#define MAX_NUM_MHI_DEVICES 1
#define NUM_MHI_XFER_RINGS 128
#define NUM_MHI_EVT_RINGS MAX_EVT_RING_IDX
#define NUM_MHI_HW_EVT_RINGS 4
#define NUM_MHI_XFER_RING_ELEMENTS 16
#define NUM_MHI_EVT_RING_ELEMENTS (NUM_MHI_IPA_IN_RING_ELEMENTS*2) //must *2, event ring full will make x55 dump
#define NUM_MHI_IPA_IN_RING_ELEMENTS 512
#define NUM_MHI_IPA_OUT_RING_ELEMENTS 512 //donot use ul agg, so increase
#define NUM_MHI_DIAG_IN_RING_ELEMENTS 128
#define NUM_MHI_SW_IP_RING_ELEMENTS 512
#ifdef ENABLE_ADPL
#define NUM_MHI_ADPL_RING_ELEMENTS 256
#endif
#ifdef ENABLE_QDSS
#define NUM_MHI_QDSS_RING_ELEMENTS 256
#endif
/*
* for if set Interrupt moderation time as 1ms,
and transfer more than NUM_MHI_CHAN_RING_ELEMENTS data are sent to the modem in 1ms.
e.g. firehose upgrade.
modem will not trigger irq for these transfer.
*/
#define NUM_MHI_CHAN_RING_ELEMENTS 32 //8
#define MHI_EVT_CMD_QUEUE_SIZE 160
#define MHI_EVT_STATE_QUEUE_SIZE 128
#define MHI_EVT_XFER_QUEUE_SIZE 1024
#define CHAN_INBOUND(_x) ((_x)%2)
#define CHAN_SBL(_x) (((_x) == MHI_CLIENT_SAHARA_OUT) || \
((_x) == MHI_CLIENT_SAHARA_IN) || \
((_x) == MHI_CLIENT_BOOT_LOG_IN))
#define CHAN_EDL(_x) (((_x) == MHI_CLIENT_EDL_OUT) || \
((_x) == MHI_CLIENT_EDL_IN))
struct mhi_chan;
struct mhi_event;
struct mhi_ctxt;
struct mhi_cmd;
struct image_info;
struct bhi_vec_entry;
struct mhi_timesync;
struct mhi_buf_info;
/**
* enum MHI_CB - MHI callback
* @MHI_CB_IDLE: MHI entered idle state
* @MHI_CB_PENDING_DATA: New data available for client to process
* @MHI_CB_LPM_ENTER: MHI host entered low power mode
* @MHI_CB_LPM_EXIT: MHI host about to exit low power mode
* @MHI_CB_EE_RDDM: MHI device entered RDDM execution enviornment
* @MHI_CB_EE_MISSION_MODE: MHI device entered Mission Mode exec env
* @MHI_CB_SYS_ERROR: MHI device enter error state (may recover)
* @MHI_CB_FATAL_ERROR: MHI device entered fatal error
*/
enum MHI_CB {
MHI_CB_IDLE,
MHI_CB_PENDING_DATA,
MHI_CB_LPM_ENTER,
MHI_CB_LPM_EXIT,
MHI_CB_EE_RDDM,
MHI_CB_EE_MISSION_MODE,
MHI_CB_SYS_ERROR,
MHI_CB_FATAL_ERROR,
};
/**
* enum MHI_DEBUG_LEVL - various debugging level
*/
enum MHI_DEBUG_LEVEL {
MHI_MSG_LVL_VERBOSE,
MHI_MSG_LVL_INFO,
MHI_MSG_LVL_ERROR,
MHI_MSG_LVL_CRITICAL,
MHI_MSG_LVL_MASK_ALL,
};
/*
GSI_XFER_FLAG_BEI: Block event interrupt
1: Event generated by this ring element must not assert an interrupt to the host
0: Event generated by this ring element must assert an interrupt to the host
GSI_XFER_FLAG_EOT: Interrupt on end of transfer
1: If an EOT condition is encountered when processing this ring element, an event is generated by the device with its completion code set to EOT.
0: If an EOT condition is encountered for this ring element, a completion event is not be generated by the device, unless IEOB is 1
GSI_XFER_FLAG_EOB: Interrupt on end of block
1: Device notifies host after processing this ring element by sending a completion event
0: Completion event is not required after processing this ring element
GSI_XFER_FLAG_CHAIN: Chain bit that identifies the ring elements in a TD
*/
/**
* enum MHI_FLAGS - Transfer flags
* @MHI_EOB: End of buffer for bulk transfer
* @MHI_EOT: End of transfer
* @MHI_CHAIN: Linked transfer
*/
enum MHI_FLAGS {
MHI_EOB,
MHI_EOT,
MHI_CHAIN,
};
/**
* enum mhi_device_type - Device types
* @MHI_XFER_TYPE: Handles data transfer
* @MHI_TIMESYNC_TYPE: Use for timesync feature
* @MHI_CONTROLLER_TYPE: Control device
*/
enum mhi_device_type {
MHI_XFER_TYPE,
MHI_TIMESYNC_TYPE,
MHI_CONTROLLER_TYPE,
};
/**
* enum mhi_ee - device current execution enviornment
* @MHI_EE_PBL - device in PBL
* @MHI_EE_SBL - device in SBL
* @MHI_EE_AMSS - device in mission mode (firmware fully loaded)
* @MHI_EE_RDDM - device in ram dump collection mode
* @MHI_EE_WFW - device in WLAN firmware mode
* @MHI_EE_PTHRU - device in PBL but configured in pass thru mode
* @MHI_EE_EDL - device in emergency download mode
*/
enum mhi_ee {
MHI_EE_PBL = 0x0,
MHI_EE_SBL = 0x1,
MHI_EE_AMSS = 0x2,
MHI_EE_RDDM = 0x3,
MHI_EE_WFW = 0x4,
MHI_EE_PTHRU = 0x5,
MHI_EE_EDL = 0x6,
MHI_EE_FP = 0x7, /* FlashProg, Flash Programmer Environment */
MHI_EE_MAX_SUPPORTED = MHI_EE_FP,
MHI_EE_DISABLE_TRANSITION, /* local EE, not related to mhi spec */
MHI_EE_MAX,
};
/**
* enum mhi_dev_state - device current MHI state
*/
enum mhi_dev_state {
MHI_STATE_RESET = 0x0,
MHI_STATE_READY = 0x1,
MHI_STATE_M0 = 0x2,
MHI_STATE_M1 = 0x3,
MHI_STATE_M2 = 0x4,
MHI_STATE_M3 = 0x5,
MHI_STATE_BHI = 0x7,
MHI_STATE_SYS_ERR = 0xFF,
MHI_STATE_MAX,
};
extern const char * const mhi_ee_str[MHI_EE_MAX];
#define TO_MHI_EXEC_STR(ee) (((ee) >= MHI_EE_MAX) ? \
"INVALID_EE" : mhi_ee_str[ee])
/**
* struct image_info - firmware and rddm table table
* @mhi_buf - Contain device firmware and rddm table
* @entries - # of entries in table
*/
struct image_info {
struct mhi_buf *mhi_buf;
struct bhi_vec_entry *bhi_vec;
u32 entries;
};
/**
* struct mhi_controller - Master controller structure for external modem
* @dev: Device associated with this controller
* @of_node: DT that has MHI configuration information
* @regs: Points to base of MHI MMIO register space
* @bhi: Points to base of MHI BHI register space
* @bhie: Points to base of MHI BHIe register space
* @wake_db: MHI WAKE doorbell register address
* @dev_id: PCIe device id of the external device
* @domain: PCIe domain the device connected to
* @bus: PCIe bus the device assigned to
* @slot: PCIe slot for the modem
* @iova_start: IOMMU starting address for data
* @iova_stop: IOMMU stop address for data
* @fw_image: Firmware image name for normal booting
* @edl_image: Firmware image name for emergency download mode
* @fbc_download: MHI host needs to do complete image transfer
* @rddm_size: RAM dump size that host should allocate for debugging purpose
* @sbl_size: SBL image size
* @seg_len: BHIe vector size
* @fbc_image: Points to firmware image buffer
* @rddm_image: Points to RAM dump buffer
* @max_chan: Maximum number of channels controller support
* @mhi_chan: Points to channel configuration table
* @lpm_chans: List of channels that require LPM notifications
* @total_ev_rings: Total # of event rings allocated
* @hw_ev_rings: Number of hardware event rings
* @sw_ev_rings: Number of software event rings
* @msi_required: Number of msi required to operate
* @msi_allocated: Number of msi allocated by bus master
* @irq: base irq # to request
* @mhi_event: MHI event ring configurations table
* @mhi_cmd: MHI command ring configurations table
* @mhi_ctxt: MHI device context, shared memory between host and device
* @timeout_ms: Timeout in ms for state transitions
* @pm_state: Power management state
* @ee: MHI device execution environment
* @dev_state: MHI STATE
* @status_cb: CB function to notify various power states to but master
* @link_status: Query link status in case of abnormal value read from device
* @runtime_get: Async runtime resume function
* @runtimet_put: Release votes
* @time_get: Return host time in us
* @lpm_disable: Request controller to disable link level low power modes
* @lpm_enable: Controller may enable link level low power modes again
* @priv_data: Points to bus master's private data
*/
struct mhi_controller {
struct list_head node;
struct mhi_device *mhi_dev;
/* device node for iommu ops */
struct device *dev;
struct device_node *of_node;
/* mmio base */
phys_addr_t base_addr;
void __iomem *regs;
void __iomem *bhi;
void __iomem *bhie;
void __iomem *wake_db;
/* device topology */
u32 vendor;
u32 dev_id;
u32 domain;
u32 bus;
u32 slot;
u32 cntrl_idx;
struct device *cntrl_dev;
/* addressing window */
dma_addr_t iova_start;
dma_addr_t iova_stop;
/* fw images */
const char *fw_image;
const char *edl_image;
/* mhi host manages downloading entire fbc images */
bool fbc_download;
size_t rddm_size;
size_t sbl_size;
size_t seg_len;
u32 session_id;
u32 sequence_id;
struct image_info *fbc_image;
struct image_info *rddm_image;
/* physical channel config data */
u32 max_chan;
struct mhi_chan *mhi_chan;
struct list_head lpm_chans; /* these chan require lpm notification */
/* physical event config data */
u32 total_ev_rings;
u32 hw_ev_rings;
u32 sw_ev_rings;
u32 msi_required;
u32 msi_allocated;
u32 msi_irq_base;
int *irq; /* interrupt table */
struct mhi_event *mhi_event;
/* cmd rings */
struct mhi_cmd *mhi_cmd;
/* mhi context (shared with device) */
struct mhi_ctxt *mhi_ctxt;
u32 timeout_ms;
/* caller should grab pm_mutex for suspend/resume operations */
struct mutex pm_mutex;
bool pre_init;
rwlock_t pm_lock;
u32 pm_state;
enum mhi_ee ee;
enum mhi_dev_state dev_state;
bool wake_set;
atomic_t dev_wake;
atomic_t alloc_size;
atomic_t pending_pkts;
struct list_head transition_list;
spinlock_t transition_lock;
spinlock_t wlock;
/* debug counters */
u32 M0, M2, M3;
/* worker for different state transitions */
struct work_struct st_worker;
struct work_struct fw_worker;
struct work_struct syserr_worker;
struct delayed_work ready_worker;
wait_queue_head_t state_event;
/* shadow functions */
void (*status_cb)(struct mhi_controller *mhi_cntrl, void *priv,
enum MHI_CB reason);
int (*link_status)(struct mhi_controller *mhi_cntrl, void *priv);
void (*wake_get)(struct mhi_controller *mhi_cntrl, bool override);
void (*wake_put)(struct mhi_controller *mhi_cntrl, bool override);
int (*runtime_get)(struct mhi_controller *mhi_cntrl, void *priv);
void (*runtime_put)(struct mhi_controller *mhi_cntrl, void *priv);
void (*runtime_mark_last_busy)(struct mhi_controller *mhi_cntrl, void *priv);
u64 (*time_get)(struct mhi_controller *mhi_cntrl, void *priv);
int (*lpm_disable)(struct mhi_controller *mhi_cntrl, void *priv);
int (*lpm_enable)(struct mhi_controller *mhi_cntrl, void *priv);
int (*map_single)(struct mhi_controller *mhi_cntrl,
struct mhi_buf_info *buf);
void (*unmap_single)(struct mhi_controller *mhi_cntrl,
struct mhi_buf_info *buf);
/* channel to control DTR messaging */
struct mhi_device *dtr_dev;
/* bounce buffer settings */
bool bounce_buf;
size_t buffer_len;
/* supports time sync feature */
struct mhi_timesync *mhi_tsync;
struct mhi_device *tsync_dev;
/* kernel log level */
enum MHI_DEBUG_LEVEL klog_lvl;
int klog_slient;
/* private log level controller driver to set */
enum MHI_DEBUG_LEVEL log_lvl;
/* controller specific data */
void *priv_data;
void *log_buf;
struct dentry *dentry;
struct dentry *parent;
struct miscdevice miscdev;
#ifdef ENABLE_MHI_MON
spinlock_t lock;
/* Ref */
int nreaders; /* Under mon_lock AND mbus->lock */
struct list_head r_list; /* Chain of readers (usually one) */
struct kref ref; /* Under mon_lock */
/* Stats */
unsigned int cnt_events;
unsigned int cnt_text_lost;
#endif
};
#ifdef ENABLE_MHI_MON
struct mhi_tre;
struct mon_reader {
struct list_head r_link;
struct mhi_controller *m_bus;
void *r_data; /* Use container_of instead? */
void (*rnf_submit)(void *data, u32 chan, dma_addr_t wp, struct mhi_tre *mhi_tre, void *buf, size_t len);
void (*rnf_receive)(void *data, u32 chan, dma_addr_t wp, struct mhi_tre *mhi_tre, void *buf, size_t len);
void (*rnf_complete)(void *data, u32 chan, dma_addr_t wp, struct mhi_tre *mhi_tre);
};
#endif
/**
* struct mhi_device - mhi device structure associated bind to channel
* @dev: Device associated with the channels
* @mtu: Maximum # of bytes controller support
* @ul_chan_id: MHI channel id for UL transfer
* @dl_chan_id: MHI channel id for DL transfer
* @tiocm: Device current terminal settings
* @priv: Driver private data
*/
struct mhi_device {
struct device dev;
u32 vendor;
u32 dev_id;
u32 domain;
u32 bus;
u32 slot;
size_t mtu;
int ul_chan_id;
int dl_chan_id;
int ul_event_id;
int dl_event_id;
u32 tiocm;
const struct mhi_device_id *id;
const char *chan_name;
struct mhi_controller *mhi_cntrl;
struct mhi_chan *ul_chan;
struct mhi_chan *dl_chan;
atomic_t dev_wake;
enum mhi_device_type dev_type;
void *priv_data;
int (*ul_xfer)(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan,
void *buf, size_t len, enum MHI_FLAGS flags);
int (*dl_xfer)(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan,
void *buf, size_t size, enum MHI_FLAGS flags);
void (*status_cb)(struct mhi_device *mhi_dev, enum MHI_CB reason);
};
/**
* struct mhi_result - Completed buffer information
* @buf_addr: Address of data buffer
* @dir: Channel direction
* @bytes_xfer: # of bytes transferred
* @transaction_status: Status of last trasnferred
*/
struct mhi_result {
void *buf_addr;
enum dma_data_direction dir;
size_t bytes_xferd;
int transaction_status;
};
/**
* struct mhi_buf - Describes the buffer
* @page: buffer as a page
* @buf: cpu address for the buffer
* @phys_addr: physical address of the buffer
* @dma_addr: iommu address for the buffer
* @skb: skb of ip packet
* @len: # of bytes
* @name: Buffer label, for offload channel configurations name must be:
* ECA - Event context array data
* CCA - Channel context array data
*/
struct mhi_buf {
struct list_head node;
struct page *page;
void *buf;
phys_addr_t phys_addr;
dma_addr_t dma_addr;
struct sk_buff *skb;
size_t len;
const char *name; /* ECA, CCA */
};
/**
* struct mhi_driver - mhi driver information
* @id_table: NULL terminated channel ID names
* @ul_xfer_cb: UL data transfer callback
* @dl_xfer_cb: DL data transfer callback
* @status_cb: Asynchronous status callback
*/
struct mhi_driver {
const struct mhi_device_id *id_table;
int (*probe)(struct mhi_device *mhi_dev,
const struct mhi_device_id *id);
void (*remove)(struct mhi_device *mhi_dev);
void (*ul_xfer_cb)(struct mhi_device *mhi_dev, struct mhi_result *res);
void (*dl_xfer_cb)(struct mhi_device *mhi_dev, struct mhi_result *res);
void (*status_cb)(struct mhi_device *mhi_dev, enum MHI_CB mhi_cb);
struct device_driver driver;
};
#define to_mhi_driver(drv) container_of(drv, struct mhi_driver, driver)
#define to_mhi_device(dev) container_of(dev, struct mhi_device, dev)
static inline void mhi_device_set_devdata(struct mhi_device *mhi_dev,
void *priv)
{
mhi_dev->priv_data = priv;
}
static inline void *mhi_device_get_devdata(struct mhi_device *mhi_dev)
{
return mhi_dev->priv_data;
}
/**
* mhi_queue_transfer - Queue a buffer to hardware
* All transfers are asyncronous transfers
* @mhi_dev: Device associated with the channels
* @dir: Data direction
* @buf: Data buffer (skb for hardware channels)
* @len: Size in bytes
* @mflags: Interrupt flags for the device
*/
static inline int mhi_queue_transfer(struct mhi_device *mhi_dev,
enum dma_data_direction dir,
void *buf,
size_t len,
enum MHI_FLAGS mflags)
{
if (dir == DMA_TO_DEVICE)
return mhi_dev->ul_xfer(mhi_dev, mhi_dev->ul_chan, buf, len,
mflags);
else
return mhi_dev->dl_xfer(mhi_dev, mhi_dev->dl_chan, buf, len,
mflags);
}
static inline void *mhi_controller_get_devdata(struct mhi_controller *mhi_cntrl)
{
return mhi_cntrl->priv_data;
}
static inline void mhi_free_controller(struct mhi_controller *mhi_cntrl)
{
kfree(mhi_cntrl);
}
/**
* mhi_driver_register - Register driver with MHI framework
* @mhi_drv: mhi_driver structure
*/
int mhi_driver_register(struct mhi_driver *mhi_drv);
/**
* mhi_driver_unregister - Unregister a driver for mhi_devices
* @mhi_drv: mhi_driver structure
*/
void mhi_driver_unregister(struct mhi_driver *mhi_drv);
/**
* mhi_device_configure - configure ECA or CCA context
* For offload channels that client manage, call this
* function to configure channel context or event context
* array associated with the channel
* @mhi_div: Device associated with the channels
* @dir: Direction of the channel
* @mhi_buf: Configuration data
* @elements: # of configuration elements
*/
int mhi_device_configure(struct mhi_device *mhi_div,
enum dma_data_direction dir,
struct mhi_buf *mhi_buf,
int elements);
/**
* mhi_device_get - disable all low power modes
* Only disables lpm, does not immediately exit low power mode
* if controller already in a low power mode
* @mhi_dev: Device associated with the channels
*/
void mhi_device_get(struct mhi_device *mhi_dev);
/**
* mhi_device_get_sync - disable all low power modes
* Synchronously disable all low power, exit low power mode if
* controller already in a low power state
* @mhi_dev: Device associated with the channels
*/
int mhi_device_get_sync(struct mhi_device *mhi_dev);
/**
* mhi_device_put - re-enable low power modes
* @mhi_dev: Device associated with the channels
*/
void mhi_device_put(struct mhi_device *mhi_dev);
/**
* mhi_prepare_for_transfer - setup channel for data transfer
* Moves both UL and DL channel from RESET to START state
* @mhi_dev: Device associated with the channels
*/
int mhi_prepare_for_transfer(struct mhi_device *mhi_dev);
/**
* mhi_unprepare_from_transfer -unprepare the channels
* Moves both UL and DL channels to RESET state
* @mhi_dev: Device associated with the channels
*/
void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev);
/**
* mhi_get_no_free_descriptors - Get transfer ring length
* Get # of TD available to queue buffers
* @mhi_dev: Device associated with the channels
* @dir: Direction of the channel
*/
int mhi_get_no_free_descriptors(struct mhi_device *mhi_dev,
enum dma_data_direction dir);
/**
* mhi_poll - poll for any available data to consume
* This is only applicable for DL direction
* @mhi_dev: Device associated with the channels
* @budget: In descriptors to service before returning
*/
int mhi_poll(struct mhi_device *mhi_dev, u32 budget);
/**
* mhi_ioctl - user space IOCTL support for MHI channels
* Native support for setting TIOCM
* @mhi_dev: Device associated with the channels
* @cmd: IOCTL cmd
* @arg: Optional parameter, iotcl cmd specific
*/
long mhi_ioctl(struct mhi_device *mhi_dev, unsigned int cmd, unsigned long arg);
/**
* mhi_alloc_controller - Allocate mhi_controller structure
* Allocate controller structure and additional data for controller
* private data. You may get the private data pointer by calling
* mhi_controller_get_devdata
* @size: # of additional bytes to allocate
*/
struct mhi_controller *mhi_alloc_controller(size_t size);
/**
* of_register_mhi_controller - Register MHI controller
* Registers MHI controller with MHI bus framework. DT must be supported
* @mhi_cntrl: MHI controller to register
*/
int of_register_mhi_controller(struct mhi_controller *mhi_cntrl);
void mhi_unregister_mhi_controller(struct mhi_controller *mhi_cntrl);
/**
* mhi_bdf_to_controller - Look up a registered controller
* Search for controller based on device identification
* @domain: RC domain of the device
* @bus: Bus device connected to
* @slot: Slot device assigned to
* @dev_id: Device Identification
*/
struct mhi_controller *mhi_bdf_to_controller(u32 domain, u32 bus, u32 slot,
u32 dev_id);
/**
* mhi_prepare_for_power_up - Do pre-initialization before power up
* This is optional, call this before power up if controller do not
* want bus framework to automatically free any allocated memory during shutdown
* process.
* @mhi_cntrl: MHI controller
*/
int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl);
/**
* mhi_async_power_up - Starts MHI power up sequence
* @mhi_cntrl: MHI controller
*/
int mhi_async_power_up(struct mhi_controller *mhi_cntrl);
int mhi_sync_power_up(struct mhi_controller *mhi_cntrl);
/**
* mhi_power_down - Start MHI power down sequence
* @mhi_cntrl: MHI controller
* @graceful: link is still accessible, do a graceful shutdown process otherwise
* we will shutdown host w/o putting device into RESET state
*/
void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful);
/**
* mhi_unprepare_after_powre_down - free any allocated memory for power up
* @mhi_cntrl: MHI controller
*/
void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl);
/**
* mhi_pm_suspend - Move MHI into a suspended state
* Transition to MHI state M3 state from M0||M1||M2 state
* @mhi_cntrl: MHI controller
*/
int mhi_pm_suspend(struct mhi_controller *mhi_cntrl);
/**
* mhi_pm_resume - Resume MHI from suspended state
* Transition to MHI state M0 state from M3 state
* @mhi_cntrl: MHI controller
*/
int mhi_pm_resume(struct mhi_controller *mhi_cntrl);
/**
* mhi_download_rddm_img - Download ramdump image from device for
* debugging purpose.
* @mhi_cntrl: MHI controller
* @in_panic: If we trying to capture image while in kernel panic
*/
int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic);
/**
* mhi_force_rddm_mode - Force external device into rddm mode
* to collect device ramdump. This is useful if host driver assert
* and we need to see device state as well.
* @mhi_cntrl: MHI controller
*/
int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl);
/**
* mhi_get_remote_time_sync - Get external soc time relative to local soc time
* using MMIO method.
* @mhi_dev: Device associated with the channels
* @t_host: Pointer to output local soc time
* @t_dev: Pointer to output remote soc time
*/
int mhi_get_remote_time_sync(struct mhi_device *mhi_dev,
u64 *t_host,
u64 *t_dev);
/**
* mhi_get_mhi_state - Return MHI state of device
* @mhi_cntrl: MHI controller
*/
enum mhi_dev_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl);
/**
* mhi_set_mhi_state - Set device state
* @mhi_cntrl: MHI controller
* @state: state to set
*/
void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl,
enum mhi_dev_state state);
/**
* mhi_is_active - helper function to determine if MHI in active state
* @mhi_dev: client device
*/
static inline bool mhi_is_active(struct mhi_device *mhi_dev)
{
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
return (mhi_cntrl->dev_state >= MHI_STATE_M0 &&
mhi_cntrl->dev_state <= MHI_STATE_M3);
}
/**
* mhi_debug_reg_dump - dump MHI registers for debug purpose
* @mhi_cntrl: MHI controller
*/
void mhi_debug_reg_dump(struct mhi_controller *mhi_cntrl);
#ifdef CONFIG_MHI_DEBUG
#define MHI_VERB(fmt, ...) do { \
if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_VERBOSE) \
pr_debug("[D][mhi%d][%s] " fmt, mhi_cntrl->cntrl_idx, __func__, ##__VA_ARGS__);\
} while (0)
#else
#define MHI_VERB(fmt, ...)
#endif
#define MHI_LOG(fmt, ...) do { \
if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_INFO) \
pr_info("[I][mhi%d][%s] " fmt, mhi_cntrl->cntrl_idx, __func__, ##__VA_ARGS__);\
else if (!mhi_cntrl->klog_slient) \
printk(KERN_DEBUG "[I][mhi%d][%s] " fmt, mhi_cntrl->cntrl_idx, __func__, ##__VA_ARGS__);\
} while (0)
#define MHI_ERR(fmt, ...) do { \
if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_ERROR) \
pr_err("[E][mhi%d][%s] " fmt, mhi_cntrl->cntrl_idx, __func__, ##__VA_ARGS__); \
} while (0)
#define MHI_CRITICAL(fmt, ...) do { \
if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_CRITICAL) \
pr_alert("[C][mhi%d][%s] " fmt, mhi_cntrl->cntrl_idx, __func__, ##__VA_ARGS__); \
} while (0)
int mhi_register_mhi_controller(struct mhi_controller *mhi_cntrl);
void mhi_unregister_mhi_controller(struct mhi_controller *mhi_cntrl);
#ifndef MHI_NAME_SIZE
#define MHI_NAME_SIZE 32
/**
* * struct mhi_device_id - MHI device identification
* * @chan: MHI channel name
* * @driver_data: driver data;
* */
struct mhi_device_id {
const char chan[MHI_NAME_SIZE];
unsigned long driver_data;
};
#endif
#endif /* _MHI_H_ */

View File

@@ -0,0 +1,860 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. */
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
#include <linux/firmware.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/of.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
#include "mhi.h"
#include "mhi_internal.h"
/* Software defines */
/* BHI Version */
#define BHI_MAJOR_VERSION 0x1
#define BHI_MINOR_VERSION 0x1
#define MSMHWID_NUMDWORDS 6 /* Number of dwords that make the MSMHWID */
#define OEMPKHASH_NUMDWORDS 48 /* Number of dwords that make the OEM PK HASH */
#define IsPBLExecEnv(ExecEnv) ((ExecEnv == MHI_EE_PBL) || (ExecEnv == MHI_EE_EDL) )
typedef u32 ULONG;
typedef struct _bhi_info_type
{
ULONG bhi_ver_minor;
ULONG bhi_ver_major;
ULONG bhi_image_address_low;
ULONG bhi_image_address_high;
ULONG bhi_image_size;
ULONG bhi_rsvd1;
ULONG bhi_imgtxdb;
ULONG bhi_rsvd2;
ULONG bhi_msivec;
ULONG bhi_rsvd3;
ULONG bhi_ee;
ULONG bhi_status;
ULONG bhi_errorcode;
ULONG bhi_errdbg1;
ULONG bhi_errdbg2;
ULONG bhi_errdbg3;
ULONG bhi_sernum;
ULONG bhi_sblantirollbackver;
ULONG bhi_numsegs;
ULONG bhi_msmhwid[6];
ULONG bhi_oempkhash[48];
ULONG bhi_rsvd5;
}BHI_INFO_TYPE, *PBHI_INFO_TYPE;
static void PrintBhiInfo(struct mhi_controller *mhi_cntrl, BHI_INFO_TYPE *bhi_info)
{
ULONG index;
char str[128];
MHI_LOG("BHI Device Info...\n");
MHI_LOG("BHI Version = { Major = 0x%X Minor = 0x%X}\n", bhi_info->bhi_ver_major, bhi_info->bhi_ver_minor);
MHI_LOG("BHI Execution Environment = 0x%X\n", bhi_info->bhi_ee);
MHI_LOG("BHI Status = 0x%X\n", bhi_info->bhi_status);
MHI_LOG("BHI Error code = 0x%X { Dbg1 = 0x%X Dbg2 = 0x%X Dbg3 = 0x%X }\n", bhi_info->bhi_errorcode, bhi_info->bhi_errdbg1, bhi_info->bhi_errdbg2, bhi_info->bhi_errdbg3);
MHI_LOG("BHI Serial Number = 0x%X\n", bhi_info->bhi_sernum);
MHI_LOG("BHI SBL Anti-Rollback Ver = 0x%X\n", bhi_info->bhi_sblantirollbackver);
MHI_LOG("BHI Number of Segments = 0x%X\n", bhi_info->bhi_numsegs);
for (index = 0; index < 6; index++)
{
snprintf(str+3*index, sizeof(str)-3*index, "%02x ", bhi_info->bhi_msmhwid[index]);
}
MHI_LOG("BHI MSM HW-Id = %s\n", str);
for (index = 0; index < 24; index++)
{
snprintf(str+3*index, sizeof(str)-3*index, "%02x ", bhi_info->bhi_oempkhash[index]);
}
MHI_LOG("BHI OEM PK Hash = %s\n", str);
}
static u32 bhi_read_reg(struct mhi_controller *mhi_cntrl, u32 offset)
{
u32 out = 0;
int ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_EXECENV, &out);
return (ret) ? 0 : out;
}
static int BhiRead(struct mhi_controller *mhi_cntrl, BHI_INFO_TYPE *bhi_info)
{
ULONG index;
memset(bhi_info, 0x00, sizeof(BHI_INFO_TYPE));
/* bhi_ver */
bhi_info->bhi_ver_minor = bhi_read_reg(mhi_cntrl, BHI_BHIVERSION_MINOR);
bhi_info->bhi_ver_major = bhi_read_reg(mhi_cntrl, BHI_BHIVERSION_MINOR);
bhi_info->bhi_image_address_low = bhi_read_reg(mhi_cntrl, BHI_IMGADDR_LOW);
bhi_info->bhi_image_address_high = bhi_read_reg(mhi_cntrl, BHI_IMGADDR_HIGH);
bhi_info->bhi_image_size = bhi_read_reg(mhi_cntrl, BHI_IMGSIZE);
bhi_info->bhi_rsvd1 = bhi_read_reg(mhi_cntrl, BHI_RSVD1);
bhi_info->bhi_imgtxdb = bhi_read_reg(mhi_cntrl, BHI_IMGTXDB);
bhi_info->bhi_rsvd2 = bhi_read_reg(mhi_cntrl, BHI_RSVD2);
bhi_info->bhi_msivec = bhi_read_reg(mhi_cntrl, BHI_INTVEC);
bhi_info->bhi_rsvd3 = bhi_read_reg(mhi_cntrl, BHI_RSVD3);
bhi_info->bhi_ee = bhi_read_reg(mhi_cntrl, BHI_EXECENV);
bhi_info->bhi_status = bhi_read_reg(mhi_cntrl, BHI_STATUS);
bhi_info->bhi_errorcode = bhi_read_reg(mhi_cntrl, BHI_ERRCODE);
bhi_info->bhi_errdbg1 = bhi_read_reg(mhi_cntrl, BHI_ERRDBG1);
bhi_info->bhi_errdbg2 = bhi_read_reg(mhi_cntrl, BHI_ERRDBG2);
bhi_info->bhi_errdbg3 = bhi_read_reg(mhi_cntrl, BHI_ERRDBG3);
bhi_info->bhi_sernum = bhi_read_reg(mhi_cntrl, BHI_SERIALNU);
bhi_info->bhi_sblantirollbackver = bhi_read_reg(mhi_cntrl, BHI_SBLANTIROLLVER);
bhi_info->bhi_numsegs = bhi_read_reg(mhi_cntrl, BHI_NUMSEG);
for (index = 0; index < MSMHWID_NUMDWORDS; index++)
{
bhi_info->bhi_msmhwid[index] = bhi_read_reg(mhi_cntrl, BHI_MSMHWID(index));
}
for (index = 0; index < OEMPKHASH_NUMDWORDS; index++)
{
bhi_info->bhi_oempkhash[index] = bhi_read_reg(mhi_cntrl, BHI_OEMPKHASH(index));
}
bhi_info->bhi_rsvd5 = bhi_read_reg(mhi_cntrl, BHI_RSVD5);
PrintBhiInfo(mhi_cntrl, bhi_info);
/* Check the Execution Environment */
if (!IsPBLExecEnv(bhi_info->bhi_ee))
{
MHI_LOG("E - EE: 0x%X Expected PBL/EDL\n", bhi_info->bhi_ee);
}
/* Return the number of bytes read */
return 0;
}
/* setup rddm vector table for rddm transfer */
static void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
struct image_info *img_info)
{
struct mhi_buf *mhi_buf = img_info->mhi_buf;
struct bhi_vec_entry *bhi_vec = img_info->bhi_vec;
int i = 0;
for (i = 0; i < img_info->entries - 1; i++, mhi_buf++, bhi_vec++) {
MHI_VERB("Setting vector:%pad size:%zu\n",
&mhi_buf->dma_addr, mhi_buf->len);
bhi_vec->dma_addr = mhi_buf->dma_addr;
bhi_vec->size = mhi_buf->len;
}
}
/* collect rddm during kernel panic */
static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl)
{
int ret;
struct mhi_buf *mhi_buf;
u32 sequence_id;
u32 rx_status;
enum mhi_ee ee;
struct image_info *rddm_image = mhi_cntrl->rddm_image;
const u32 delayus = 2000;
u32 retry = (mhi_cntrl->timeout_ms * 1000) / delayus;
const u32 rddm_timeout_us = 200000;
int rddm_retry = rddm_timeout_us / delayus; /* time to enter rddm */
void __iomem *base = mhi_cntrl->bhie;
MHI_LOG("Entered with pm_state:%s dev_state:%s ee:%s\n",
to_mhi_pm_state_str(mhi_cntrl->pm_state),
TO_MHI_STATE_STR(mhi_cntrl->dev_state),
TO_MHI_EXEC_STR(mhi_cntrl->ee));
/*
* This should only be executing during a kernel panic, we expect all
* other cores to shutdown while we're collecting rddm buffer. After
* returning from this function, we expect device to reset.
*
* Normaly, we would read/write pm_state only after grabbing
* pm_lock, since we're in a panic, skipping it. Also there is no
* gurantee this state change would take effect since
* we're setting it w/o grabbing pmlock, it's best effort
*/
mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
/* update should take the effect immediately */
smp_wmb();
/* setup the RX vector table */
mhi_rddm_prepare(mhi_cntrl, rddm_image);
mhi_buf = &rddm_image->mhi_buf[rddm_image->entries - 1];
MHI_LOG("Starting BHIe programming for RDDM\n");
mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_HIGH_OFFS,
upper_32_bits(mhi_buf->dma_addr));
mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_LOW_OFFS,
lower_32_bits(mhi_buf->dma_addr));
mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len);
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0))
sequence_id = get_random_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK;
#else
sequence_id = prandom_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK;
#endif
if (unlikely(!sequence_id))
sequence_id = 1;
mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS,
BHIE_RXVECDB_SEQNUM_BMSK, BHIE_RXVECDB_SEQNUM_SHFT,
sequence_id);
MHI_LOG("Trigger device into RDDM mode\n");
mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR);
MHI_LOG("Waiting for device to enter RDDM\n");
while (rddm_retry--) {
ee = mhi_get_exec_env(mhi_cntrl);
if (ee == MHI_EE_RDDM)
break;
udelay(delayus);
}
if (rddm_retry <= 0) {
/* This is a hardware reset, will force device to enter rddm */
MHI_LOG(
"Did not enter RDDM triggering host req. reset to force rddm\n");
mhi_write_reg(mhi_cntrl, mhi_cntrl->regs,
MHI_SOC_RESET_REQ_OFFSET, MHI_SOC_RESET_REQ);
udelay(delayus);
}
ee = mhi_get_exec_env(mhi_cntrl);
MHI_LOG("Waiting for image download completion, current EE:%s\n",
TO_MHI_EXEC_STR(ee));
while (retry--) {
ret = mhi_read_reg_field(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS,
BHIE_RXVECSTATUS_STATUS_BMSK,
BHIE_RXVECSTATUS_STATUS_SHFT,
&rx_status);
if (ret)
return -EIO;
if (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL) {
MHI_LOG("RDDM successfully collected\n");
return 0;
}
udelay(delayus);
}
ee = mhi_get_exec_env(mhi_cntrl);
ret = mhi_read_reg(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS, &rx_status);
MHI_ERR("Did not complete RDDM transfer\n");
MHI_ERR("Current EE:%s\n", TO_MHI_EXEC_STR(ee));
MHI_ERR("RXVEC_STATUS:0x%x, ret:%d\n", rx_status, ret);
return -EIO;
}
/* download ramdump image from device */
int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic)
{
void __iomem *base = mhi_cntrl->bhie;
rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
struct image_info *rddm_image = mhi_cntrl->rddm_image;
struct mhi_buf *mhi_buf;
int ret;
u32 rx_status;
u32 sequence_id;
if (!rddm_image)
return -ENOMEM;
if (in_panic)
return __mhi_download_rddm_in_panic(mhi_cntrl);
MHI_LOG("Waiting for device to enter RDDM state from EE:%s\n",
TO_MHI_EXEC_STR(mhi_cntrl->ee));
ret = wait_event_timeout(mhi_cntrl->state_event,
mhi_cntrl->ee == MHI_EE_RDDM ||
MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
msecs_to_jiffies(mhi_cntrl->timeout_ms));
if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
MHI_ERR("MHI is not in valid state, pm_state:%s ee:%s\n",
to_mhi_pm_state_str(mhi_cntrl->pm_state),
TO_MHI_EXEC_STR(mhi_cntrl->ee));
return -EIO;
}
mhi_rddm_prepare(mhi_cntrl, mhi_cntrl->rddm_image);
/* vector table is the last entry */
mhi_buf = &rddm_image->mhi_buf[rddm_image->entries - 1];
read_lock_bh(pm_lock);
if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
read_unlock_bh(pm_lock);
return -EIO;
}
MHI_LOG("Starting BHIe Programming for RDDM\n");
mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_HIGH_OFFS,
upper_32_bits(mhi_buf->dma_addr));
mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_LOW_OFFS,
lower_32_bits(mhi_buf->dma_addr));
mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len);
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0))
sequence_id = get_random_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK;
#else
sequence_id = prandom_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK;
#endif
mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS,
BHIE_RXVECDB_SEQNUM_BMSK, BHIE_RXVECDB_SEQNUM_SHFT,
sequence_id);
read_unlock_bh(pm_lock);
MHI_LOG("Upper:0x%x Lower:0x%x len:0x%zx sequence:%u\n",
upper_32_bits(mhi_buf->dma_addr),
lower_32_bits(mhi_buf->dma_addr),
mhi_buf->len, sequence_id);
MHI_LOG("Waiting for image download completion\n");
/* waiting for image download completion */
wait_event_timeout(mhi_cntrl->state_event,
MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
mhi_read_reg_field(mhi_cntrl, base,
BHIE_RXVECSTATUS_OFFS,
BHIE_RXVECSTATUS_STATUS_BMSK,
BHIE_RXVECSTATUS_STATUS_SHFT,
&rx_status) || rx_status,
msecs_to_jiffies(mhi_cntrl->timeout_ms));
if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
return -EIO;
return (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL) ? 0 : -EIO;
}
EXPORT_SYMBOL(mhi_download_rddm_img);
static int mhi_fw_load_amss(struct mhi_controller *mhi_cntrl,
const struct mhi_buf *mhi_buf)
{
void __iomem *base = mhi_cntrl->bhie;
rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
u32 tx_status;
read_lock_bh(pm_lock);
if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
read_unlock_bh(pm_lock);
return -EIO;
}
MHI_LOG("Starting BHIe Programming\n");
mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_HIGH_OFFS,
upper_32_bits(mhi_buf->dma_addr));
mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_LOW_OFFS,
lower_32_bits(mhi_buf->dma_addr));
mhi_write_reg(mhi_cntrl, base, BHIE_TXVECSIZE_OFFS, mhi_buf->len);
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0))
mhi_cntrl->sequence_id = get_random_u32() & BHIE_TXVECSTATUS_SEQNUM_BMSK;
#else
mhi_cntrl->sequence_id = prandom_u32() & BHIE_TXVECSTATUS_SEQNUM_BMSK;
#endif
mhi_write_reg_field(mhi_cntrl, base, BHIE_TXVECDB_OFFS,
BHIE_TXVECDB_SEQNUM_BMSK, BHIE_TXVECDB_SEQNUM_SHFT,
mhi_cntrl->sequence_id);
read_unlock_bh(pm_lock);
MHI_LOG("Upper:0x%x Lower:0x%x len:0x%zx sequence:%u\n",
upper_32_bits(mhi_buf->dma_addr),
lower_32_bits(mhi_buf->dma_addr),
mhi_buf->len, mhi_cntrl->sequence_id);
MHI_LOG("Waiting for image transfer completion\n");
/* waiting for image download completion */
wait_event_timeout(mhi_cntrl->state_event,
MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
mhi_read_reg_field(mhi_cntrl, base,
BHIE_TXVECSTATUS_OFFS,
BHIE_TXVECSTATUS_STATUS_BMSK,
BHIE_TXVECSTATUS_STATUS_SHFT,
&tx_status) || tx_status,
msecs_to_jiffies(mhi_cntrl->timeout_ms));
if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
return -EIO;
return (tx_status == BHIE_TXVECSTATUS_STATUS_XFER_COMPL) ? 0 : -EIO;
}
static int mhi_fw_load_sbl(struct mhi_controller *mhi_cntrl,
dma_addr_t dma_addr,
size_t size)
{
u32 tx_status, val;
u32 ImgTxDb = 0x1;
int i, ret;
void __iomem *base = mhi_cntrl->bhi;
rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
struct {
char *name;
u32 offset;
} error_reg[] = {
{ "ERROR_CODE", BHI_ERRCODE },
{ "ERROR_DBG1", BHI_ERRDBG1 },
{ "ERROR_DBG2", BHI_ERRDBG2 },
{ "ERROR_DBG3", BHI_ERRDBG3 },
{ NULL },
};
MHI_LOG("Starting BHI programming\n");
/* program start sbl download via bhi protocol */
read_lock_bh(pm_lock);
if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
read_unlock_bh(pm_lock);
goto invalid_pm_state;
}
mhi_write_reg(mhi_cntrl, base, BHI_STATUS, 0);
mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_HIGH,
upper_32_bits(dma_addr));
mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_LOW,
lower_32_bits(dma_addr));
mhi_write_reg(mhi_cntrl, base, BHI_IMGSIZE, size);
mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICFG, MHICFG_NER_MASK, MHICFG_NER_SHIFT, NUM_MHI_EVT_RINGS);
mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICFG, MHICFG_NHWER_MASK, MHICFG_NHWER_SHIFT, NUM_MHI_HW_EVT_RINGS);
mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, mhi_cntrl->msi_irq_base);
mhi_write_reg(mhi_cntrl, base, BHI_IMGTXDB, ImgTxDb);
read_unlock_bh(pm_lock);
MHI_LOG("Waiting for image transfer completion\n");
/* waiting for image download completion */
ret = wait_event_timeout(mhi_cntrl->state_event,
MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
mhi_read_reg_field(mhi_cntrl, base, BHI_STATUS,
BHI_STATUS_MASK, BHI_STATUS_SHIFT,
&tx_status) || tx_status,
msecs_to_jiffies(mhi_cntrl->timeout_ms));
if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
goto invalid_pm_state;
if (tx_status == BHI_STATUS_ERROR) {
MHI_ERR("Image transfer failed\n");
read_lock_bh(pm_lock);
if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
for (i = 0; error_reg[i].name; i++) {
ret = mhi_read_reg(mhi_cntrl, base,
error_reg[i].offset, &val);
if (ret)
break;
MHI_ERR("reg:%s value:0x%x\n",
error_reg[i].name, val);
}
}
read_unlock_bh(pm_lock);
goto invalid_pm_state;
}
return (tx_status == BHI_STATUS_SUCCESS) ? 0 : -ETIMEDOUT;
invalid_pm_state:
return -EIO;
}
void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl,
struct image_info *image_info)
{
int i;
struct mhi_buf *mhi_buf = image_info->mhi_buf;
for (i = 0; i < image_info->entries; i++, mhi_buf++)
mhi_free_coherent(mhi_cntrl, mhi_buf->len, mhi_buf->buf,
mhi_buf->dma_addr);
kfree(image_info->mhi_buf);
kfree(image_info);
}
int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl,
struct image_info **image_info,
size_t alloc_size)
{
size_t seg_size = mhi_cntrl->seg_len;
/* requier additional entry for vec table */
int segments = DIV_ROUND_UP(alloc_size, seg_size) + 1;
int i;
struct image_info *img_info;
struct mhi_buf *mhi_buf;
MHI_LOG("Allocating bytes:%zu seg_size:%zu total_seg:%u\n",
alloc_size, seg_size, segments);
img_info = kzalloc(sizeof(*img_info), GFP_KERNEL);
if (!img_info)
return -ENOMEM;
/* allocate memory for entries */
img_info->mhi_buf = kcalloc(segments, sizeof(*img_info->mhi_buf),
GFP_KERNEL);
if (!img_info->mhi_buf)
goto error_alloc_mhi_buf;
/* allocate and populate vector table */
mhi_buf = img_info->mhi_buf;
for (i = 0; i < segments; i++, mhi_buf++) {
size_t vec_size = seg_size;
/* last entry is for vector table */
if (i == segments - 1)
vec_size = sizeof(struct bhi_vec_entry) * i;
mhi_buf->len = vec_size;
mhi_buf->buf = mhi_alloc_coherent(mhi_cntrl, vec_size,
&mhi_buf->dma_addr, GFP_KERNEL);
if (!mhi_buf->buf)
goto error_alloc_segment;
MHI_LOG("Entry:%d Address:0x%llx size:%zu\n", i,
(unsigned long long)mhi_buf->dma_addr,
mhi_buf->len);
}
img_info->bhi_vec = img_info->mhi_buf[segments - 1].buf;
img_info->entries = segments;
*image_info = img_info;
MHI_LOG("Successfully allocated bhi vec table\n");
return 0;
error_alloc_segment:
for (--i, --mhi_buf; i >= 0; i--, mhi_buf--)
mhi_free_coherent(mhi_cntrl, mhi_buf->len, mhi_buf->buf,
mhi_buf->dma_addr);
error_alloc_mhi_buf:
kfree(img_info);
return -ENOMEM;
}
static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl,
const struct firmware *firmware,
struct image_info *img_info)
{
size_t remainder = firmware->size;
size_t to_cpy;
const u8 *buf = firmware->data;
int i = 0;
struct mhi_buf *mhi_buf = img_info->mhi_buf;
struct bhi_vec_entry *bhi_vec = img_info->bhi_vec;
while (remainder) {
MHI_ASSERT(i >= img_info->entries, "malformed vector table");
to_cpy = min(remainder, mhi_buf->len);
memcpy(mhi_buf->buf, buf, to_cpy);
bhi_vec->dma_addr = mhi_buf->dma_addr;
bhi_vec->size = to_cpy;
MHI_VERB("Setting Vector:0x%llx size: %llu\n",
bhi_vec->dma_addr, bhi_vec->size);
buf += to_cpy;
remainder -= to_cpy;
i++;
bhi_vec++;
mhi_buf++;
}
}
void mhi_fw_load_worker(struct work_struct *work)
{
int ret;
struct mhi_controller *mhi_cntrl;
const char *fw_name;
const struct firmware *firmware;
struct image_info *image_info;
void *buf;
dma_addr_t dma_addr;
size_t size;
mhi_cntrl = container_of(work, struct mhi_controller, fw_worker);
MHI_LOG("Waiting for device to enter PBL from EE:%s\n",
TO_MHI_EXEC_STR(mhi_cntrl->ee));
ret = wait_event_timeout(mhi_cntrl->state_event,
MHI_IN_PBL(mhi_cntrl->ee) ||
MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
msecs_to_jiffies(mhi_cntrl->timeout_ms));
if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
MHI_ERR("MHI is not in valid state\n");
return;
}
MHI_LOG("Device current EE:%s\n", TO_MHI_EXEC_STR(mhi_cntrl->ee));
/* if device in pthru, we do not have to load firmware */
if (mhi_cntrl->ee == MHI_EE_PTHRU)
return;
fw_name = (mhi_cntrl->ee == MHI_EE_EDL) ?
mhi_cntrl->edl_image : mhi_cntrl->fw_image;
if (!fw_name || (mhi_cntrl->fbc_download && (!mhi_cntrl->sbl_size ||
!mhi_cntrl->seg_len))) {
MHI_ERR("No firmware image defined or !sbl_size || !seg_len\n");
return;
}
ret = request_firmware(&firmware, fw_name, mhi_cntrl->dev);
if (ret) {
MHI_ERR("Error loading firmware, ret:%d\n", ret);
return;
}
size = (mhi_cntrl->fbc_download) ? mhi_cntrl->sbl_size : firmware->size;
/* the sbl size provided is maximum size, not necessarily image size */
if (size > firmware->size)
size = firmware->size;
buf = mhi_alloc_coherent(mhi_cntrl, size, &dma_addr, GFP_KERNEL);
if (!buf) {
MHI_ERR("Could not allocate memory for image\n");
release_firmware(firmware);
return;
}
/* load sbl image */
memcpy(buf, firmware->data, size);
ret = mhi_fw_load_sbl(mhi_cntrl, dma_addr, size);
mhi_free_coherent(mhi_cntrl, size, buf, dma_addr);
/* error or in edl, we're done */
if (ret || mhi_cntrl->ee == MHI_EE_EDL) {
release_firmware(firmware);
return;
}
write_lock_irq(&mhi_cntrl->pm_lock);
mhi_cntrl->dev_state = MHI_STATE_RESET;
write_unlock_irq(&mhi_cntrl->pm_lock);
/*
* if we're doing fbc, populate vector tables while
* device transitioning into MHI READY state
*/
if (mhi_cntrl->fbc_download) {
ret = mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->fbc_image,
firmware->size);
if (ret) {
MHI_ERR("Error alloc size of %zu\n", firmware->size);
goto error_alloc_fw_table;
}
MHI_LOG("Copying firmware image into vector table\n");
/* load the firmware into BHIE vec table */
mhi_firmware_copy(mhi_cntrl, firmware, mhi_cntrl->fbc_image);
}
/* transitioning into MHI RESET->READY state */
ret = mhi_ready_state_transition(mhi_cntrl);
MHI_LOG("To Reset->Ready PM_STATE:%s MHI_STATE:%s EE:%s, ret:%d\n",
to_mhi_pm_state_str(mhi_cntrl->pm_state),
TO_MHI_STATE_STR(mhi_cntrl->dev_state),
TO_MHI_EXEC_STR(mhi_cntrl->ee), ret);
if (!mhi_cntrl->fbc_download) {
release_firmware(firmware);
return;
}
if (ret) {
MHI_ERR("Did not transition to READY state\n");
goto error_read;
}
/* wait for SBL event */
ret = wait_event_timeout(mhi_cntrl->state_event,
mhi_cntrl->ee == MHI_EE_SBL ||
MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
msecs_to_jiffies(mhi_cntrl->timeout_ms));
if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
MHI_ERR("MHI did not enter BHIE\n");
goto error_read;
}
/* start full firmware image download */
image_info = mhi_cntrl->fbc_image;
ret = mhi_fw_load_amss(mhi_cntrl,
/* last entry is vec table */
&image_info->mhi_buf[image_info->entries - 1]);
MHI_LOG("amss fw_load, ret:%d\n", ret);
release_firmware(firmware);
return;
error_read:
mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
mhi_cntrl->fbc_image = NULL;
error_alloc_fw_table:
release_firmware(firmware);
}
int BhiWrite(struct mhi_controller *mhi_cntrl, void __user *ubuf, size_t size)
{
int ret;
dma_addr_t dma_addr;
void *dma_buf;
MHI_LOG("Device current EE:%s, M:%s, PM:%s\n",
TO_MHI_EXEC_STR(mhi_get_exec_env(mhi_cntrl)),
TO_MHI_STATE_STR(mhi_get_mhi_state(mhi_cntrl)),
to_mhi_pm_state_str(mhi_cntrl->pm_state));
#if 0
if (mhi_get_exec_env(mhi_cntrl) == MHI_EE_EDL && mhi_cntrl->ee != MHI_EE_EDL) {
mhi_cntrl->ee = MHI_EE_EDL;
wait_event_timeout(mhi_cntrl->state_event,
MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
msecs_to_jiffies(mhi_cntrl->timeout_ms + 500));
}
#endif
#if 0
if (!MHI_IN_PBL(mhi_cntrl->ee) || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
MHI_ERR("MHI is not in valid BHI state\n");
return -EINVAL;
}
#endif
if (mhi_cntrl->ee != MHI_EE_EDL) {
MHI_ERR("MHI is not in EDL state\n");
return -EINVAL;
}
dma_buf = mhi_alloc_coherent(mhi_cntrl, size, &dma_addr, GFP_KERNEL);
if (!dma_buf) {
MHI_ERR("Could not allocate memory for image\n");
return -ENOMEM;
}
ret = copy_from_user(dma_buf, ubuf, size);
if (ret) {
MHI_ERR("IOCTL_BHI_WRITEIMAGE copy buf error, ret = %d\n", ret);
mhi_free_coherent(mhi_cntrl, size, dma_buf, dma_addr);;
return ret;
}
ret = mhi_fw_load_sbl(mhi_cntrl, dma_addr, size);
mhi_free_coherent(mhi_cntrl, size, dma_buf, dma_addr);
if (ret) {
MHI_ERR("ret = %d, ee=%d\n", ret, mhi_cntrl->ee);
goto error_state;
}
write_lock_irq(&mhi_cntrl->pm_lock);
mhi_cntrl->dev_state = MHI_STATE_RESET;
write_unlock_irq(&mhi_cntrl->pm_lock);
/* transitioning into MHI RESET->READY state */
ret = mhi_ready_state_transition(mhi_cntrl);
if (ret) {
MHI_ERR("Did not transition to READY state\n");
goto error_state;
}
MHI_LOG("To Reset->Ready PM_STATE:%s MHI_STATE:%s EE:%s, ret:%d\n",
to_mhi_pm_state_str(mhi_cntrl->pm_state),
TO_MHI_STATE_STR(mhi_cntrl->dev_state),
TO_MHI_EXEC_STR(mhi_cntrl->ee), ret);
/* wait for BHIE event */
ret = wait_event_timeout(mhi_cntrl->state_event,
mhi_cntrl->ee == MHI_EE_FP ||
MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
msecs_to_jiffies(mhi_cntrl->timeout_ms));
if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
MHI_ERR("MHI did not enter Flash Programmer Environment\n");
goto error_state;
}
MHI_LOG("MHI enter Flash Programmer Environment\n");
return 0;
error_state:
MHI_LOG("Device current EE:%s, M:%s\n",
TO_MHI_EXEC_STR(mhi_get_exec_env(mhi_cntrl)),
TO_MHI_STATE_STR(mhi_get_mhi_state(mhi_cntrl)));
return ret;
}
long bhi_get_dev_info(struct mhi_controller *mhi_cntrl, void __user *ubuf)
{
long ret = -EINVAL;
BHI_INFO_TYPE bhi_info;
ret = BhiRead(mhi_cntrl, &bhi_info);
if (ret) {
MHI_ERR("IOCTL_BHI_GETDEVINFO BhiRead error, ret = %ld\n", ret);
return ret;
}
ret = copy_to_user(ubuf, &bhi_info, sizeof(bhi_info));
if (ret) {
MHI_ERR("IOCTL_BHI_GETDEVINFO copy error, ret = %ld\n", ret);
}
return ret;
}
long bhi_write_image(struct mhi_controller *mhi_cntrl, void __user *ubuf)
{
long ret = -EINVAL;
size_t size;
ret = copy_from_user(&size, ubuf, sizeof(size));
if (ret) {
MHI_ERR("IOCTL_BHI_WRITEIMAGE copy size error, ret = %ld\n", ret);
return ret;
}
if (size <= 0) {
MHI_ERR("IOCTL_BHI_WRITEIMAGE copy size error, size\n");
return -EINVAL;
}
ret = BhiWrite(mhi_cntrl, ubuf+sizeof(size), size);
if (ret) {
MHI_ERR("IOCTL_BHI_WRITEIMAGE BhiWrite error, ret = %ld\n", ret);
}
return ret;
}

View File

@@ -0,0 +1,274 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.*/
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/of.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/termios.h>
#include <linux/wait.h>
#include "mhi.h"
#include "mhi_internal.h"
struct __packed dtr_ctrl_msg {
u32 preamble;
u32 msg_id;
u32 dest_id;
u32 size;
u32 msg;
};
#define CTRL_MAGIC (0x4C525443)
#define CTRL_MSG_DTR BIT(0)
#define CTRL_MSG_RTS BIT(1)
#define CTRL_MSG_DCD BIT(0)
#define CTRL_MSG_DSR BIT(1)
#define CTRL_MSG_RI BIT(3)
#define CTRL_HOST_STATE (0x10)
#define CTRL_DEVICE_STATE (0x11)
#define CTRL_GET_CHID(dtr) (dtr->dest_id & 0xFF)
static int mhi_dtr_tiocmset(struct mhi_controller *mhi_cntrl,
struct mhi_device *mhi_dev,
u32 tiocm)
{
struct dtr_ctrl_msg *dtr_msg = NULL;
struct mhi_chan *dtr_chan = mhi_cntrl->dtr_dev->ul_chan;
spinlock_t *res_lock = &mhi_dev->dev.devres_lock;
u32 cur_tiocm;
int ret = 0;
cur_tiocm = mhi_dev->tiocm & ~(TIOCM_CD | TIOCM_DSR | TIOCM_RI);
tiocm &= (TIOCM_DTR | TIOCM_RTS);
/* state did not changed */
if (cur_tiocm == tiocm)
return 0;
mutex_lock(&dtr_chan->mutex);
dtr_msg = kzalloc(sizeof(*dtr_msg), GFP_KERNEL);
if (!dtr_msg) {
ret = -ENOMEM;
goto tiocm_exit;
}
dtr_msg->preamble = CTRL_MAGIC;
dtr_msg->msg_id = CTRL_HOST_STATE;
dtr_msg->dest_id = mhi_dev->ul_chan_id;
dtr_msg->size = sizeof(u32);
if (tiocm & TIOCM_DTR)
dtr_msg->msg |= CTRL_MSG_DTR;
if (tiocm & TIOCM_RTS)
dtr_msg->msg |= CTRL_MSG_RTS;
/*
* 'minicom -D /dev/mhi_DUN' will send RTS:1 when open, and RTS:0 when exit.
* RTS:0 will prevent modem output AT response.
* But 'busybox microcom' do not send any RTS to modem.
* [75094.969783] mhi_uci_q 0306_00.03.00_DUN: mhi_dtr_tiocmset DTR:0 RTS:1
* [75100.210994] mhi_uci_q 0306_00.03.00_DUN: mhi_dtr_tiocmset DTR:0 RTS:0
*/
dev_dbg(&mhi_dev->dev, "%s DTR:%d RTS:%d\n", __func__,
!!(tiocm & TIOCM_DTR), !!(tiocm & TIOCM_RTS));
reinit_completion(&dtr_chan->completion);
ret = mhi_queue_transfer(mhi_cntrl->dtr_dev, DMA_TO_DEVICE, dtr_msg,
sizeof(*dtr_msg), MHI_EOT);
if (ret)
goto tiocm_exit;
ret = wait_for_completion_timeout(&dtr_chan->completion,
msecs_to_jiffies(mhi_cntrl->timeout_ms));
if (!ret) {
MHI_ERR("Failed to receive transfer callback\n");
ret = -EIO;
goto tiocm_exit;
}
ret = 0;
spin_lock_irq(res_lock);
mhi_dev->tiocm &= ~(TIOCM_DTR | TIOCM_RTS);
mhi_dev->tiocm |= tiocm;
spin_unlock_irq(res_lock);
tiocm_exit:
kfree(dtr_msg);
mutex_unlock(&dtr_chan->mutex);
return ret;
}
long mhi_ioctl(struct mhi_device *mhi_dev, unsigned int cmd, unsigned long arg)
{
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
int ret;
/* ioctl not supported by this controller */
if (!mhi_cntrl->dtr_dev)
return -EIO;
switch (cmd) {
case TIOCMGET:
return mhi_dev->tiocm;
case TIOCMSET:
{
u32 tiocm;
ret = get_user(tiocm, (u32 *)arg);
if (ret)
return ret;
return mhi_dtr_tiocmset(mhi_cntrl, mhi_dev, tiocm);
}
default:
break;
}
return -EINVAL;
}
EXPORT_SYMBOL(mhi_ioctl);
static int mhi_dtr_queue_inbound(struct mhi_controller *mhi_cntrl)
{
struct mhi_device *mhi_dev = mhi_cntrl->dtr_dev;
int nr_trbs = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE);
size_t mtu = mhi_dev->mtu;
void *buf;
int ret = -EIO, i;
for (i = 0; i < nr_trbs; i++) {
buf = kmalloc(mtu, GFP_KERNEL);
if (!buf)
return -ENOMEM;
ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, buf, mtu,
MHI_EOT);
if (ret) {
kfree(buf);
return ret;
}
}
return ret;
}
static void mhi_dtr_dl_xfer_cb(struct mhi_device *mhi_dev,
struct mhi_result *mhi_result)
{
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
struct dtr_ctrl_msg *dtr_msg = mhi_result->buf_addr;
u32 chan;
spinlock_t *res_lock;
if (mhi_result->transaction_status == -ENOTCONN) {
kfree(mhi_result->buf_addr);
return;
}
if (mhi_result->bytes_xferd != sizeof(*dtr_msg)) {
MHI_ERR("Unexpected length %zu received\n",
mhi_result->bytes_xferd);
return;
}
MHI_LOG("preamble:0x%x msg_id:%u dest_id:%u msg:0x%x\n",
dtr_msg->preamble, dtr_msg->msg_id, dtr_msg->dest_id,
dtr_msg->msg);
chan = CTRL_GET_CHID(dtr_msg);
if (chan >= mhi_cntrl->max_chan)
goto auto_queue;
mhi_dev = mhi_cntrl->mhi_chan[chan].mhi_dev;
if (!mhi_dev)
goto auto_queue;
res_lock = &mhi_dev->dev.devres_lock;
spin_lock_irq(res_lock);
mhi_dev->tiocm &= ~(TIOCM_CD | TIOCM_DSR | TIOCM_RI);
if (dtr_msg->msg & CTRL_MSG_DCD)
mhi_dev->tiocm |= TIOCM_CD;
if (dtr_msg->msg & CTRL_MSG_DSR)
mhi_dev->tiocm |= TIOCM_DSR;
if (dtr_msg->msg & CTRL_MSG_RI)
mhi_dev->tiocm |= TIOCM_RI;
spin_unlock_irq(res_lock);
auto_queue:
mhi_queue_transfer(mhi_cntrl->dtr_dev, DMA_FROM_DEVICE, mhi_result->buf_addr,
mhi_cntrl->dtr_dev->mtu, MHI_EOT);
}
static void mhi_dtr_ul_xfer_cb(struct mhi_device *mhi_dev,
struct mhi_result *mhi_result)
{
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
struct mhi_chan *dtr_chan = mhi_cntrl->dtr_dev->ul_chan;
MHI_VERB("Received with status:%d\n", mhi_result->transaction_status);
if (!mhi_result->transaction_status)
complete(&dtr_chan->completion);
}
static void mhi_dtr_remove(struct mhi_device *mhi_dev)
{
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
mhi_cntrl->dtr_dev = NULL;
}
static int mhi_dtr_probe(struct mhi_device *mhi_dev,
const struct mhi_device_id *id)
{
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
int ret;
MHI_LOG("Enter for DTR control channel\n");
mhi_dev->mtu = min_t(size_t, id->driver_data, mhi_dev->mtu);
ret = mhi_prepare_for_transfer(mhi_dev);
if (!ret)
mhi_cntrl->dtr_dev = mhi_dev;
if (!ret)
ret = mhi_dtr_queue_inbound(mhi_cntrl);
MHI_LOG("Exit with ret:%d\n", ret);
return ret;
}
static const struct mhi_device_id mhi_dtr_table[] = {
{ .chan = "IP_CTRL", .driver_data = sizeof(struct dtr_ctrl_msg) },
{},
};
static struct mhi_driver mhi_dtr_driver = {
.id_table = mhi_dtr_table,
.remove = mhi_dtr_remove,
.probe = mhi_dtr_probe,
.ul_xfer_cb = mhi_dtr_ul_xfer_cb,
.dl_xfer_cb = mhi_dtr_dl_xfer_cb,
.driver = {
.name = "MHI_DTR",
.owner = THIS_MODULE,
}
};
int __init mhi_dtr_init(void)
{
return mhi_driver_register(&mhi_dtr_driver);
}
void mhi_dtr_exit(void) {
mhi_driver_unregister(&mhi_dtr_driver);
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,362 @@
#ifndef __SDX20_MHI_H
#define __SDX20_MHI_H
#include <linux/types.h>
/* MHI control data structures alloted by the host, including
* channel context array, event context array, command context and rings */
/* Channel context state */
enum mhi_dev_ch_ctx_state {
MHI_DEV_CH_STATE_DISABLED,
MHI_DEV_CH_STATE_ENABLED,
MHI_DEV_CH_STATE_RUNNING,
MHI_DEV_CH_STATE_SUSPENDED,
MHI_DEV_CH_STATE_STOP,
MHI_DEV_CH_STATE_ERROR,
MHI_DEV_CH_STATE_RESERVED,
MHI_DEV_CH_STATE_32BIT = 0x7FFFFFFF
};
/* Channel type */
enum mhi_dev_ch_ctx_type {
MHI_DEV_CH_TYPE_NONE,
MHI_DEV_CH_TYPE_OUTBOUND_CHANNEL,
MHI_DEV_CH_TYPE_INBOUND_CHANNEL,
MHI_DEV_CH_RESERVED
};
/* Channel context type */
struct mhi_dev_ch_ctx {
enum mhi_dev_ch_ctx_state ch_state;
enum mhi_dev_ch_ctx_type ch_type;
uint32_t err_indx;
uint64_t rbase;
uint64_t rlen;
uint64_t rp;
uint64_t wp;
} __packed;
enum mhi_dev_ring_element_type_id {
MHI_DEV_RING_EL_INVALID = 0,
MHI_DEV_RING_EL_NOOP = 1,
MHI_DEV_RING_EL_TRANSFER = 2,
MHI_DEV_RING_EL_RESET = 16,
MHI_DEV_RING_EL_STOP = 17,
MHI_DEV_RING_EL_START = 18,
MHI_DEV_RING_EL_MHI_STATE_CHG = 32,
MHI_DEV_RING_EL_CMD_COMPLETION_EVT = 33,
MHI_DEV_RING_EL_TRANSFER_COMPLETION_EVENT = 34,
MHI_DEV_RING_EL_EE_STATE_CHANGE_NOTIFY = 64,
MHI_DEV_RING_EL_UNDEF
};
enum mhi_dev_ring_state {
RING_STATE_UINT = 0,
RING_STATE_IDLE,
RING_STATE_PENDING,
};
enum mhi_dev_ring_type {
RING_TYPE_CMD = 0,
RING_TYPE_ER,
RING_TYPE_CH,
RING_TYPE_INVAL
};
/* Event context interrupt moderation */
enum mhi_dev_evt_ctx_int_mod_timer {
MHI_DEV_EVT_INT_MODERATION_DISABLED
};
/* Event ring type */
enum mhi_dev_evt_ctx_event_ring_type {
MHI_DEV_EVT_TYPE_DEFAULT,
MHI_DEV_EVT_TYPE_VALID,
MHI_DEV_EVT_RESERVED
};
/* Event ring context type */
struct mhi_dev_ev_ctx {
uint32_t res1:16;
enum mhi_dev_evt_ctx_int_mod_timer intmodt:16;
enum mhi_dev_evt_ctx_event_ring_type ertype;
uint32_t msivec;
uint64_t rbase;
uint64_t rlen;
uint64_t rp;
uint64_t wp;
} __packed;
/* Command context */
struct mhi_dev_cmd_ctx {
uint32_t res1;
uint32_t res2;
uint32_t res3;
uint64_t rbase;
uint64_t rlen;
uint64_t rp;
uint64_t wp;
} __packed;
/* generic context */
struct mhi_dev_gen_ctx {
uint32_t res1;
uint32_t res2;
uint32_t res3;
uint64_t rbase;
uint64_t rlen;
uint64_t rp;
uint64_t wp;
} __packed;
/* Transfer ring element */
struct mhi_dev_transfer_ring_element {
uint64_t data_buf_ptr;
uint32_t len:16;
uint32_t res1:16;
uint32_t chain:1;
uint32_t res2:7;
uint32_t ieob:1;
uint32_t ieot:1;
uint32_t bei:1;
uint32_t res3:5;
enum mhi_dev_ring_element_type_id type:8;
uint32_t res4:8;
} __packed;
/* Command ring element */
/* Command ring No op command */
struct mhi_dev_cmd_ring_op {
uint64_t res1;
uint32_t res2;
uint32_t res3:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t chid:8;
} __packed;
/* Command ring reset channel command */
struct mhi_dev_cmd_ring_reset_channel_cmd {
uint64_t res1;
uint32_t res2;
uint32_t res3:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t chid:8;
} __packed;
/* Command ring stop channel command */
struct mhi_dev_cmd_ring_stop_channel_cmd {
uint64_t res1;
uint32_t res2;
uint32_t res3:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t chid:8;
} __packed;
/* Command ring start channel command */
struct mhi_dev_cmd_ring_start_channel_cmd {
uint64_t res1;
uint32_t seqnum;
uint32_t reliable:1;
uint32_t res2:15;
enum mhi_dev_ring_element_type_id type:8;
uint32_t chid:8;
} __packed;
enum mhi_dev_cmd_completion_code {
MHI_CMD_COMPL_CODE_INVALID = 0,
MHI_CMD_COMPL_CODE_SUCCESS = 1,
MHI_CMD_COMPL_CODE_EOT = 2,
MHI_CMD_COMPL_CODE_OVERFLOW = 3,
MHI_CMD_COMPL_CODE_EOB = 4,
MHI_CMD_COMPL_CODE_UNDEFINED = 16,
MHI_CMD_COMPL_CODE_RING_EL = 17,
MHI_CMD_COMPL_CODE_RES
};
/* Event ring elements */
/* Transfer completion event */
struct mhi_dev_event_ring_transfer_completion {
uint64_t ptr;
uint32_t len:16;
uint32_t res1:8;
enum mhi_dev_cmd_completion_code code:8;
uint32_t res2:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t chid:8;
} __packed;
/* Command completion event */
struct mhi_dev_event_ring_cmd_completion {
uint64_t ptr;
uint32_t res1:24;
enum mhi_dev_cmd_completion_code code:8;
uint32_t res2:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t res3:8;
} __packed;
enum mhi_dev_state {
MHI_DEV_RESET_STATE = 0,
MHI_DEV_READY_STATE,
MHI_DEV_M0_STATE,
MHI_DEV_M1_STATE,
MHI_DEV_M2_STATE,
MHI_DEV_M3_STATE,
MHI_DEV_MAX_STATE,
MHI_DEV_SYSERR_STATE = 0xff
};
/* MHI state change event */
struct mhi_dev_event_ring_state_change {
uint64_t ptr;
uint32_t res1:24;
enum mhi_dev_state mhistate:8;
uint32_t res2:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t res3:8;
} __packed;
enum mhi_dev_execenv {
MHI_DEV_SBL_EE = 1,
MHI_DEV_AMSS_EE = 2,
MHI_DEV_UNRESERVED
};
/* EE state change event */
struct mhi_dev_event_ring_ee_state_change {
uint64_t ptr;
uint32_t res1:24;
enum mhi_dev_execenv execenv:8;
uint32_t res2:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t res3:8;
} __packed;
/* Generic cmd to parse common details like type and channel id */
struct mhi_dev_ring_generic {
uint64_t ptr;
uint32_t res1:24;
enum mhi_dev_state mhistate:8;
uint32_t res2:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t chid:8;
} __packed;
struct mhi_config {
uint32_t mhi_reg_len;
uint32_t version;
uint32_t event_rings;
uint32_t channels;
uint32_t chdb_offset;
uint32_t erdb_offset;
};
#define NUM_CHANNELS 128
#define HW_CHANNEL_BASE 100
#define HW_CHANNEL_END 107
#define MHI_ENV_VALUE 2
#define MHI_MASK_ROWS_CH_EV_DB 4
#define TRB_MAX_DATA_SIZE 8192
#define MHI_CTRL_STATE 25
#define IPA_DMA_SYNC 1
#define IPA_DMA_ASYNC 0
/*maximum trasnfer completion events buffer*/
#define MAX_TR_EVENTS 50
/*maximum event requests */
#define MHI_MAX_EVT_REQ 50
/* Possible ring element types */
union mhi_dev_ring_element_type {
struct mhi_dev_cmd_ring_op cmd_no_op;
struct mhi_dev_cmd_ring_reset_channel_cmd cmd_reset;
struct mhi_dev_cmd_ring_stop_channel_cmd cmd_stop;
struct mhi_dev_cmd_ring_start_channel_cmd cmd_start;
struct mhi_dev_transfer_ring_element cmd_transfer;
struct mhi_dev_event_ring_transfer_completion evt_tr_comp;
struct mhi_dev_event_ring_cmd_completion evt_cmd_comp;
struct mhi_dev_event_ring_state_change evt_state_change;
struct mhi_dev_event_ring_ee_state_change evt_ee_state;
struct mhi_dev_ring_generic generic;
};
/* Transfer ring element type */
union mhi_dev_ring_ctx {
struct mhi_dev_cmd_ctx cmd;
struct mhi_dev_ev_ctx ev;
struct mhi_dev_ch_ctx ch;
struct mhi_dev_gen_ctx generic;
};
/* MHI host Control and data address region */
struct mhi_host_addr {
uint32_t ctrl_base_lsb;
uint32_t ctrl_base_msb;
uint32_t ctrl_limit_lsb;
uint32_t ctrl_limit_msb;
uint32_t data_base_lsb;
uint32_t data_base_msb;
uint32_t data_limit_lsb;
uint32_t data_limit_msb;
};
/* MHI physical and virtual address region */
struct mhi_meminfo {
struct device *dev;
uintptr_t pa_aligned;
uintptr_t pa_unaligned;
uintptr_t va_aligned;
uintptr_t va_unaligned;
uintptr_t size;
};
struct mhi_addr {
uint64_t host_pa;
uintptr_t device_pa;
uintptr_t device_va;
size_t size;
dma_addr_t phy_addr;
void *virt_addr;
bool use_ipa_dma;
};
struct mhi_interrupt_state {
uint32_t mask;
uint32_t status;
};
enum mhi_dev_channel_state {
MHI_DEV_CH_UNINT,
MHI_DEV_CH_STARTED,
MHI_DEV_CH_PENDING_START,
MHI_DEV_CH_PENDING_STOP,
MHI_DEV_CH_STOPPED,
MHI_DEV_CH_CLOSED,
};
enum mhi_dev_ch_operation {
MHI_DEV_OPEN_CH,
MHI_DEV_CLOSE_CH,
MHI_DEV_READ_CH,
MHI_DEV_READ_WR,
MHI_DEV_POLL,
};
enum mhi_ctrl_info {
MHI_STATE_CONFIGURED = 0,
MHI_STATE_CONNECTED = 1,
MHI_STATE_DISCONNECTED = 2,
MHI_STATE_INVAL,
};
enum mhi_dev_tr_compl_evt_type {
SEND_EVENT_BUFFER,
SEND_EVENT_RD_OFFSET,
};
enum mhi_dev_transfer_type {
MHI_DEV_DMA_SYNC,
MHI_DEV_DMA_ASYNC,
};
#endif /* _SDX20_MHI_H_ */

View File

@@ -0,0 +1,426 @@
#ifndef __SDX20_MHI_H
#define __SDX20_MHI_H
#include <linux/types.h>
/* MHI control data structures alloted by the host, including
* channel context array, event context array, command context and rings */
/* Channel context state */
enum mhi_dev_ch_ctx_state {
MHI_DEV_CH_STATE_DISABLED,
MHI_DEV_CH_STATE_ENABLED,
MHI_DEV_CH_STATE_RUNNING,
MHI_DEV_CH_STATE_SUSPENDED,
MHI_DEV_CH_STATE_STOP,
MHI_DEV_CH_STATE_ERROR,
MHI_DEV_CH_STATE_RESERVED,
MHI_DEV_CH_STATE_32BIT = 0x7FFFFFFF
};
/* Channel type */
enum mhi_dev_ch_ctx_type {
MHI_DEV_CH_TYPE_NONE,
MHI_DEV_CH_TYPE_OUTBOUND_CHANNEL,
MHI_DEV_CH_TYPE_INBOUND_CHANNEL,
MHI_DEV_CH_RESERVED
};
/* Channel context type */
struct mhi_dev_ch_ctx {
enum mhi_dev_ch_ctx_state ch_state;
enum mhi_dev_ch_ctx_type ch_type;
uint32_t err_indx;
uint64_t rbase;
uint64_t rlen;
uint64_t rp;
uint64_t wp;
} __packed;
enum mhi_dev_ring_element_type_id {
MHI_DEV_RING_EL_INVALID = 0,
MHI_DEV_RING_EL_NOOP = 1,
MHI_DEV_RING_EL_TRANSFER = 2,
MHI_DEV_RING_EL_RESET = 16,
MHI_DEV_RING_EL_STOP = 17,
MHI_DEV_RING_EL_START = 18,
MHI_DEV_RING_EL_MHI_STATE_CHG = 32,
MHI_DEV_RING_EL_CMD_COMPLETION_EVT = 33,
MHI_DEV_RING_EL_TRANSFER_COMPLETION_EVENT = 34,
MHI_DEV_RING_EL_EE_STATE_CHANGE_NOTIFY = 64,
MHI_DEV_RING_EL_UNDEF
};
enum mhi_dev_ring_state {
RING_STATE_UINT = 0,
RING_STATE_IDLE,
RING_STATE_PENDING,
};
enum mhi_dev_ring_type {
RING_TYPE_CMD = 0,
RING_TYPE_ER,
RING_TYPE_CH,
RING_TYPE_INVAL
};
/* Event context interrupt moderation */
enum mhi_dev_evt_ctx_int_mod_timer {
MHI_DEV_EVT_INT_MODERATION_DISABLED
};
/* Event ring type */
enum mhi_dev_evt_ctx_event_ring_type {
MHI_DEV_EVT_TYPE_DEFAULT,
MHI_DEV_EVT_TYPE_VALID,
MHI_DEV_EVT_RESERVED
};
/* Event ring context type */
struct mhi_dev_ev_ctx {
uint32_t res1:16;
enum mhi_dev_evt_ctx_int_mod_timer intmodt:16;
enum mhi_dev_evt_ctx_event_ring_type ertype;
uint32_t msivec;
uint64_t rbase;
uint64_t rlen;
uint64_t rp;
uint64_t wp;
} __packed;
/* Command context */
struct mhi_dev_cmd_ctx {
uint32_t res1;
uint32_t res2;
uint32_t res3;
uint64_t rbase;
uint64_t rlen;
uint64_t rp;
uint64_t wp;
} __packed;
/* generic context */
struct mhi_dev_gen_ctx {
uint32_t res1;
uint32_t res2;
uint32_t res3;
uint64_t rbase;
uint64_t rlen;
uint64_t rp;
uint64_t wp;
} __packed;
/* Transfer ring element */
struct mhi_dev_transfer_ring_element {
uint64_t data_buf_ptr;
uint32_t len:16;
uint32_t res1:16;
uint32_t chain:1;
uint32_t res2:7;
uint32_t ieob:1;
uint32_t ieot:1;
uint32_t bei:1;
uint32_t res3:5;
enum mhi_dev_ring_element_type_id type:8;
uint32_t res4:8;
} __packed;
/* Command ring element */
/* Command ring No op command */
struct mhi_dev_cmd_ring_op {
uint64_t res1;
uint32_t res2;
uint32_t res3:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t chid:8;
} __packed;
/* Command ring reset channel command */
struct mhi_dev_cmd_ring_reset_channel_cmd {
uint64_t res1;
uint32_t res2;
uint32_t res3:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t chid:8;
} __packed;
/* Command ring stop channel command */
struct mhi_dev_cmd_ring_stop_channel_cmd {
uint64_t res1;
uint32_t res2;
uint32_t res3:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t chid:8;
} __packed;
/* Command ring start channel command */
struct mhi_dev_cmd_ring_start_channel_cmd {
uint64_t res1;
uint32_t seqnum;
uint32_t reliable:1;
uint32_t res2:15;
enum mhi_dev_ring_element_type_id type:8;
uint32_t chid:8;
} __packed;
enum mhi_dev_cmd_completion_code {
MHI_CMD_COMPL_CODE_INVALID = 0,
MHI_CMD_COMPL_CODE_SUCCESS = 1,
MHI_CMD_COMPL_CODE_EOT = 2,
MHI_CMD_COMPL_CODE_OVERFLOW = 3,
MHI_CMD_COMPL_CODE_EOB = 4,
MHI_CMD_COMPL_CODE_UNDEFINED = 16,
MHI_CMD_COMPL_CODE_RING_EL = 17,
MHI_CMD_COMPL_CODE_RES
};
/* Event ring elements */
/* Transfer completion event */
struct mhi_dev_event_ring_transfer_completion {
uint64_t ptr;
uint32_t len:16;
uint32_t res1:8;
enum mhi_dev_cmd_completion_code code:8;
uint32_t res2:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t chid:8;
} __packed;
/* Command completion event */
struct mhi_dev_event_ring_cmd_completion {
uint64_t ptr;
uint32_t res1:24;
enum mhi_dev_cmd_completion_code code:8;
uint32_t res2:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t res3:8;
} __packed;
enum mhi_dev_state {
MHI_DEV_RESET_STATE = 0,
MHI_DEV_READY_STATE,
MHI_DEV_M0_STATE,
MHI_DEV_M1_STATE,
MHI_DEV_M2_STATE,
MHI_DEV_M3_STATE,
MHI_DEV_MAX_STATE,
MHI_DEV_SYSERR_STATE = 0xff
};
/* MHI state change event */
struct mhi_dev_event_ring_state_change {
uint64_t ptr;
uint32_t res1:24;
enum mhi_dev_state mhistate:8;
uint32_t res2:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t res3:8;
} __packed;
enum mhi_dev_execenv {
MHI_DEV_SBL_EE = 1,
MHI_DEV_AMSS_EE = 2,
MHI_DEV_UNRESERVED
};
/* EE state change event */
struct mhi_dev_event_ring_ee_state_change {
uint64_t ptr;
uint32_t res1:24;
enum mhi_dev_execenv execenv:8;
uint32_t res2:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t res3:8;
} __packed;
/* Generic cmd to parse common details like type and channel id */
struct mhi_dev_ring_generic {
uint64_t ptr;
uint32_t res1:24;
enum mhi_dev_state mhistate:8;
uint32_t res2:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t chid:8;
} __packed;
struct mhi_config {
uint32_t mhi_reg_len;
uint32_t version;
uint32_t event_rings;
uint32_t channels;
uint32_t chdb_offset;
uint32_t erdb_offset;
};
#define NUM_CHANNELS 128
#define HW_CHANNEL_BASE 100
#define HW_CHANNEL_END 107
#define MHI_ENV_VALUE 2
#define MHI_MASK_ROWS_CH_EV_DB 4
#define TRB_MAX_DATA_SIZE 8192
#define MHI_CTRL_STATE 25
#define IPA_DMA_SYNC 1
#define IPA_DMA_ASYNC 0
/*maximum trasnfer completion events buffer*/
#define MAX_TR_EVENTS 50
/*maximum event requests */
#define MHI_MAX_EVT_REQ 50
/* Possible ring element types */
union mhi_dev_ring_element_type {
struct mhi_dev_cmd_ring_op cmd_no_op;
struct mhi_dev_cmd_ring_reset_channel_cmd cmd_reset;
struct mhi_dev_cmd_ring_stop_channel_cmd cmd_stop;
struct mhi_dev_cmd_ring_start_channel_cmd cmd_start;
struct mhi_dev_transfer_ring_element tre;
struct mhi_dev_event_ring_transfer_completion evt_tr_comp;
struct mhi_dev_event_ring_cmd_completion evt_cmd_comp;
struct mhi_dev_event_ring_state_change evt_state_change;
struct mhi_dev_event_ring_ee_state_change evt_ee_state;
struct mhi_dev_ring_generic generic;
};
/* Transfer ring element type */
union mhi_dev_ring_ctx {
struct mhi_dev_cmd_ctx cmd;
struct mhi_dev_ev_ctx ev;
struct mhi_dev_ch_ctx ch;
struct mhi_dev_gen_ctx generic;
};
/* MHI host Control and data address region */
struct mhi_host_addr {
uint32_t ctrl_base_lsb;
uint32_t ctrl_base_msb;
uint32_t ctrl_limit_lsb;
uint32_t ctrl_limit_msb;
uint32_t data_base_lsb;
uint32_t data_base_msb;
uint32_t data_limit_lsb;
uint32_t data_limit_msb;
};
/* MHI physical and virtual address region */
struct mhi_meminfo {
struct device *dev;
uintptr_t pa_aligned;
uintptr_t pa_unaligned;
uintptr_t va_aligned;
uintptr_t va_unaligned;
uintptr_t size;
};
struct mhi_addr {
uint64_t host_pa;
uintptr_t device_pa;
uintptr_t device_va;
size_t size;
dma_addr_t phy_addr;
void *virt_addr;
bool use_ipa_dma;
};
struct mhi_interrupt_state {
uint32_t mask;
uint32_t status;
};
enum mhi_dev_channel_state {
MHI_DEV_CH_UNINT,
MHI_DEV_CH_STARTED,
MHI_DEV_CH_PENDING_START,
MHI_DEV_CH_PENDING_STOP,
MHI_DEV_CH_STOPPED,
MHI_DEV_CH_CLOSED,
};
enum mhi_dev_ch_operation {
MHI_DEV_OPEN_CH,
MHI_DEV_CLOSE_CH,
MHI_DEV_READ_CH,
MHI_DEV_READ_WR,
MHI_DEV_POLL,
};
enum mhi_ctrl_info {
MHI_STATE_CONFIGURED = 0,
MHI_STATE_CONNECTED = 1,
MHI_STATE_DISCONNECTED = 2,
MHI_STATE_INVAL,
};
enum mhi_dev_tr_compl_evt_type {
SEND_EVENT_BUFFER,
SEND_EVENT_RD_OFFSET,
};
enum mhi_dev_transfer_type {
MHI_DEV_DMA_SYNC,
MHI_DEV_DMA_ASYNC,
};
#if 0
/* SW channel client list */
enum mhi_client_channel {
MHI_CLIENT_LOOPBACK_OUT = 0,
MHI_CLIENT_LOOPBACK_IN = 1,
MHI_CLIENT_SAHARA_OUT = 2,
MHI_CLIENT_SAHARA_IN = 3,
MHI_CLIENT_DIAG_OUT = 4,
MHI_CLIENT_DIAG_IN = 5,
MHI_CLIENT_SSR_OUT = 6,
MHI_CLIENT_SSR_IN = 7,
MHI_CLIENT_QDSS_OUT = 8,
MHI_CLIENT_QDSS_IN = 9,
MHI_CLIENT_EFS_OUT = 10,
MHI_CLIENT_EFS_IN = 11,
MHI_CLIENT_MBIM_OUT = 12,
MHI_CLIENT_MBIM_IN = 13,
MHI_CLIENT_QMI_OUT = 14,
MHI_CLIENT_QMI_IN = 15,
MHI_CLIENT_IP_CTRL_0_OUT = 16,
MHI_CLIENT_IP_CTRL_0_IN = 17,
MHI_CLIENT_IP_CTRL_1_OUT = 18,
MHI_CLIENT_IP_CTRL_1_IN = 19,
MHI_CLIENT_DCI_OUT = 20,
MHI_CLIENT_DCI_IN = 21,
MHI_CLIENT_IP_CTRL_3_OUT = 22,
MHI_CLIENT_IP_CTRL_3_IN = 23,
MHI_CLIENT_IP_CTRL_4_OUT = 24,
MHI_CLIENT_IP_CTRL_4_IN = 25,
MHI_CLIENT_IP_CTRL_5_OUT = 26,
MHI_CLIENT_IP_CTRL_5_IN = 27,
MHI_CLIENT_IP_CTRL_6_OUT = 28,
MHI_CLIENT_IP_CTRL_6_IN = 29,
MHI_CLIENT_IP_CTRL_7_OUT = 30,
MHI_CLIENT_IP_CTRL_7_IN = 31,
MHI_CLIENT_DUN_OUT = 32,
MHI_CLIENT_DUN_IN = 33,
MHI_CLIENT_IP_SW_0_OUT = 34,
MHI_CLIENT_IP_SW_0_IN = 35,
MHI_CLIENT_IP_SW_1_OUT = 36,
MHI_CLIENT_IP_SW_1_IN = 37,
MHI_CLIENT_IP_SW_2_OUT = 38,
MHI_CLIENT_IP_SW_2_IN = 39,
MHI_CLIENT_IP_SW_3_OUT = 40,
MHI_CLIENT_IP_SW_3_IN = 41,
MHI_CLIENT_CSVT_OUT = 42,
MHI_CLIENT_CSVT_IN = 43,
MHI_CLIENT_SMCT_OUT = 44,
MHI_CLIENT_SMCT_IN = 45,
MHI_CLIENT_IP_SW_4_OUT = 46,
MHI_CLIENT_IP_SW_4_IN = 47,
MHI_MAX_SOFTWARE_CHANNELS = 48,
MHI_CLIENT_TEST_OUT = 60,
MHI_CLIENT_TEST_IN = 61,
MHI_CLIENT_RESERVED_1_LOWER = 62,
MHI_CLIENT_RESERVED_1_UPPER = 99,
MHI_CLIENT_IP_HW_0_OUT = 100,
MHI_CLIENT_IP_HW_0_IN = 101,
MHI_CLIENT_RESERVED_2_LOWER = 102,
MHI_CLIENT_RESERVED_2_UPPER = 127,
MHI_MAX_CHANNELS = 102,
};
#endif
#endif /* _SDX20_MHI_H_ */

View File

@@ -0,0 +1,33 @@
menu "MHI device support"
config MHI_NETDEV
tristate "MHI NETDEV"
depends on MHI_BUS
help
MHI based net device driver for transferring IP traffic
between host and modem. By enabling this driver, clients
can transfer data using standard network interface. Over
the air traffic goes thru mhi netdev interface.
config MHI_UCI
tristate "MHI UCI"
depends on MHI_BUS
help
MHI based uci driver is for transferring data between host and
modem using standard file operations from user space. Open, read,
write, ioctl, and close operations are supported by this driver.
Please check mhi_uci_match_table for all supported channels that
are exposed to userspace.
config MHI_SATELLITE
tristate "MHI SATELLITE"
depends on MHI_BUS
help
MHI proxy satellite device driver enables NON-HLOS MHI satellite
drivers to communicate with device over PCIe link without host
involvement. Host facilitates propagation of events from device
to NON-HLOS MHI satellite drivers, channel states, and power
management over IPC communication. It helps in HLOS power
savings.
endmenu

View File

@@ -0,0 +1,3 @@
obj-$(CONFIG_MHI_NETDEV) +=mhi_netdev.o
obj-$(CONFIG_MHI_UCI) +=mhi_uci.o
obj-$(CONFIG_MHI_SATELLITE) +=mhi_satellite.o

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,981 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.*/
#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/dma-direction.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/delay.h>
#include <linux/version.h>
#if 1
static inline void *ipc_log_context_create(int max_num_pages,
const char *modname, uint16_t user_version)
{ return NULL; }
static inline int ipc_log_string(void *ilctxt, const char *fmt, ...)
{ return -EINVAL; }
#endif
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/uaccess.h>
#include <linux/tty.h>
#include "../core/mhi.h"
#define DEVICE_NAME "mhi"
#define MHI_UCI_DRIVER_NAME "mhi_uci_q"
struct uci_chan {
wait_queue_head_t wq;
spinlock_t lock;
struct list_head pending; /* user space waiting to read */
struct uci_buf *cur_buf; /* current buffer user space reading */
size_t rx_size;
};
struct uci_buf {
struct page *page;
void *data;
size_t len;
unsigned nr_trb;
struct list_head node;
};
struct uci_dev {
struct list_head node;
dev_t devt;
struct device *dev;
struct mhi_device *mhi_dev;
const char *chan;
struct mutex mutex; /* sync open and close */
struct mutex r_mutex;
struct mutex w_mutex;
struct uci_chan ul_chan;
struct uci_chan dl_chan;
size_t mtu;
int ref_count;
bool enabled;
unsigned rx_error;
unsigned nr_trb;
unsigned nr_trbs;
struct uci_buf *uci_buf;
struct ktermios termios;
size_t bytes_xferd;
};
struct mhi_uci_drv {
struct list_head head;
struct mutex lock;
struct class *class;
int major;
dev_t dev_t;
};
static int uci_msg_lvl = MHI_MSG_LVL_ERROR;
module_param( uci_msg_lvl, uint, S_IRUGO | S_IWUSR);
#define MSG_VERB(fmt, ...) do { \
if (uci_msg_lvl <= MHI_MSG_LVL_VERBOSE) \
pr_err("[D][%s] " fmt, __func__, ##__VA_ARGS__); \
} while (0)
#define MSG_LOG(fmt, ...) do { \
if (uci_msg_lvl <= MHI_MSG_LVL_INFO) \
pr_err("[I][%s] " fmt, __func__, ##__VA_ARGS__); \
} while (0)
#define MSG_ERR(fmt, ...) do { \
if (uci_msg_lvl <= MHI_MSG_LVL_ERROR) \
pr_err("[E][%s] " fmt, __func__, ##__VA_ARGS__); \
} while (0)
#define MAX_UCI_DEVICES (64)
#define QUEC_MHI_UCI_ALWAYS_OPEN //by now, sdx20 can not handle "start-reset-start" operation, so the simply solution is keep start state
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0))
#ifdef TCGETS2
__weak int user_termios_to_kernel_termios(struct ktermios *k,
struct termios2 __user *u)
{
return copy_from_user(k, u, sizeof(struct termios2));
}
__weak int kernel_termios_to_user_termios(struct termios2 __user *u,
struct ktermios *k)
{
return copy_to_user(u, k, sizeof(struct termios2));
}
__weak int user_termios_to_kernel_termios_1(struct ktermios *k,
struct termios __user *u)
{
return copy_from_user(k, u, sizeof(struct termios));
}
__weak int kernel_termios_to_user_termios_1(struct termios __user *u,
struct ktermios *k)
{
return copy_to_user(u, k, sizeof(struct termios));
}
#else
__weak int user_termios_to_kernel_termios(struct ktermios *k,
struct termios __user *u)
{
return copy_from_user(k, u, sizeof(struct termios));
}
__weak int kernel_termios_to_user_termios(struct termios __user *u,
struct ktermios *k)
{
return copy_to_user(u, k, sizeof(struct termios));
}
#endif /* TCGETS2 */
#endif
static DECLARE_BITMAP(uci_minors, MAX_UCI_DEVICES);
static struct mhi_uci_drv mhi_uci_drv;
static int mhi_queue_inbound(struct uci_dev *uci_dev)
{
struct mhi_device *mhi_dev = uci_dev->mhi_dev;
int nr_trbs = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE);
size_t mtu = uci_dev->mtu;
void *buf;
struct uci_buf *uci_buf;
int ret = -EIO, i;
if (uci_dev->uci_buf == NULL) {
uci_dev->nr_trb = 0;
uci_dev->nr_trbs = (nr_trbs + 1);
uci_dev->uci_buf = kmalloc_array(uci_dev->nr_trbs, sizeof(*uci_buf), GFP_KERNEL);
if (!uci_dev->uci_buf)
return -ENOMEM;
uci_buf = uci_dev->uci_buf;
for (i = 0; i < uci_dev->nr_trbs; i++, uci_buf++) {
uci_buf->page = alloc_pages(GFP_KERNEL, get_order(mtu));
if (!uci_buf->page)
return -ENOMEM;
uci_buf->data = page_address(uci_buf->page);
uci_buf->len = 0;
uci_buf->nr_trb = i;
if (mhi_dev->dl_chan_id == MHI_CLIENT_DUN_IN) {
//MSG_ERR("[%d] = %p\n", i, uci_buf->data);
}
}
}
for (i = 0; i < nr_trbs; i++) {
#if 0
buf = kmalloc(mtu + sizeof(*uci_buf), GFP_KERNEL);
if (!buf)
return -ENOMEM;
uci_buf = buf + mtu;
uci_buf->data = buf;
#else
uci_buf = &uci_dev->uci_buf[i];
buf = uci_buf->data;
#endif
MSG_VERB("Allocated buf %d of %d size %zu\n", i, nr_trbs, mtu);
ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, buf, mtu,
MHI_EOT);
if (ret) {
#if 0
kfree(buf);
#endif
MSG_ERR("Failed to queue buffer %d\n", i);
return ret;
}
}
return ret;
}
static long mhi_uci_ioctl(struct file *file,
unsigned int cmd,
unsigned long arg)
{
struct uci_dev *uci_dev = file->private_data;
struct mhi_device *mhi_dev = uci_dev->mhi_dev;
long ret = -ERESTARTSYS;
mutex_lock(&uci_dev->mutex);
if (uci_dev->enabled)
ret = mhi_ioctl(mhi_dev, cmd, arg);
if (uci_dev->enabled) {
switch (cmd) {
case TCGETS:
#ifndef TCGETS2
ret = kernel_termios_to_user_termios((struct termios __user *)arg, &uci_dev->termios);
#else
ret = kernel_termios_to_user_termios_1((struct termios __user *)arg, &uci_dev->termios);
#endif
break;
case TCSETSF:
case TCSETS:
#ifndef TCGETS2
ret = user_termios_to_kernel_termios(&uci_dev->termios, (struct termios __user *)arg);
#else
ret = user_termios_to_kernel_termios_1(&uci_dev->termios, (struct termios __user *)arg);
#endif
break;
case TCFLSH:
ret = 0;
break;
default:
break;
}
}
mutex_unlock(&uci_dev->mutex);
return ret;
}
static int mhi_uci_release(struct inode *inode, struct file *file)
{
struct uci_dev *uci_dev = file->private_data;
mutex_lock(&uci_dev->mutex);
uci_dev->ref_count--;
if (!uci_dev->ref_count) {
struct uci_chan *uci_chan;
MSG_LOG("Last client left, closing node\n");
if (uci_dev->enabled)
mhi_unprepare_from_transfer(uci_dev->mhi_dev);
/* clean inbound channel */
uci_chan = &uci_dev->dl_chan;
if (uci_dev->uci_buf) {
unsigned nr_trb = 0;
for (nr_trb = 0; nr_trb < uci_dev->nr_trbs; nr_trb++) {
if (uci_dev->uci_buf[nr_trb].page)
__free_pages(uci_dev->uci_buf[nr_trb].page, get_order(uci_dev->mtu));
}
kfree(uci_dev->uci_buf);
}
uci_chan->cur_buf = NULL;
if (!uci_dev->enabled) {
MSG_LOG("Node is deleted, freeing dev node\n");
mutex_unlock(&uci_dev->mutex);
mutex_destroy(&uci_dev->mutex);
clear_bit(MINOR(uci_dev->devt), uci_minors);
kfree(uci_dev);
return 0;
}
}
MSG_LOG("exit: ref_count:%d\n", uci_dev->ref_count);
mutex_unlock(&uci_dev->mutex);
return 0;
}
static unsigned int mhi_uci_poll(struct file *file, poll_table *wait)
{
struct uci_dev *uci_dev = file->private_data;
struct mhi_device *mhi_dev = uci_dev->mhi_dev;
struct uci_chan *uci_chan;
unsigned int mask = 0;
poll_wait(file, &uci_dev->dl_chan.wq, wait);
// ADPL and QDSS do not need poll write. xingduo.du 2023-02-16
// poll_wait(file, &uci_dev->ul_chan.wq, wait);
uci_chan = &uci_dev->dl_chan;
spin_lock_bh(&uci_chan->lock);
if (!uci_dev->enabled) {
mask = POLLERR;
} else if (!list_empty(&uci_chan->pending) || uci_chan->cur_buf) {
MSG_VERB("Client can read from node\n");
mask |= POLLIN | POLLRDNORM;
}
spin_unlock_bh(&uci_chan->lock);
// ADPL and QDSS are single channel, ul_chan not be initilized. xingduo.du 2023-02-27
if (mhi_dev->ul_chan) {
poll_wait(file, &uci_dev->ul_chan.wq, wait);
uci_chan = &uci_dev->ul_chan;
spin_lock_bh(&uci_chan->lock);
if (!uci_dev->enabled) {
mask |= POLLERR;
} else if (mhi_get_no_free_descriptors(mhi_dev, DMA_TO_DEVICE) > 0) {
MSG_VERB("Client can write to node\n");
mask |= POLLOUT | POLLWRNORM;
}
if (!uci_dev->enabled)
mask |= POLLHUP;
if (uci_dev->rx_error)
mask |= POLLERR;
spin_unlock_bh(&uci_chan->lock);
}
MSG_LOG("Client attempted to poll, returning mask 0x%x\n", mask);
return mask;
}
static ssize_t mhi_uci_write(struct file *file,
const char __user *buf,
size_t count,
loff_t *offp)
{
struct uci_dev *uci_dev = file->private_data;
struct mhi_device *mhi_dev = uci_dev->mhi_dev;
struct uci_chan *uci_chan = &uci_dev->ul_chan;
size_t bytes_xfered = 0;
int ret, nr_avail;
if (!buf || !count || uci_dev->rx_error)
return -EINVAL;
/* confirm channel is active */
spin_lock_bh(&uci_chan->lock);
if (!uci_dev->enabled) {
spin_unlock_bh(&uci_chan->lock);
return -ERESTARTSYS;
}
MSG_VERB("Enter: to xfer:%zu bytes\n", count);
while (count) {
size_t xfer_size;
void *kbuf;
enum MHI_FLAGS flags;
spin_unlock_bh(&uci_chan->lock);
nr_avail = mhi_get_no_free_descriptors(mhi_dev, DMA_TO_DEVICE);
if ((nr_avail == 0) && (file->f_flags & O_NONBLOCK))
return -EAGAIN;
/* wait for free descriptors */
ret = wait_event_interruptible(uci_chan->wq,
(!uci_dev->enabled) ||
(nr_avail = mhi_get_no_free_descriptors(mhi_dev,
DMA_TO_DEVICE)) > 0);
if (ret == -ERESTARTSYS || !uci_dev->enabled) {
MSG_LOG("Exit signal caught for node or not enabled\n");
return -ERESTARTSYS;
}
xfer_size = min_t(size_t, count, uci_dev->mtu);
kbuf = kmalloc(xfer_size, GFP_KERNEL);
if (!kbuf) {
MSG_ERR("Failed to allocate memory %zu\n", xfer_size);
return -ENOMEM;
}
ret = copy_from_user(kbuf, buf, xfer_size);
if (unlikely(ret)) {
kfree(kbuf);
return ret;
}
spin_lock_bh(&uci_chan->lock);
/* if ring is full after this force EOT */
if (nr_avail > 1 && (count - xfer_size))
flags = MHI_CHAIN;
else
flags = MHI_EOT;
if (uci_dev->enabled)
ret = mhi_queue_transfer(mhi_dev, DMA_TO_DEVICE, kbuf,
xfer_size, flags);
else
ret = -ERESTARTSYS;
if (ret) {
kfree(kbuf);
goto sys_interrupt;
}
bytes_xfered += xfer_size;
count -= xfer_size;
buf += xfer_size;
}
spin_unlock_bh(&uci_chan->lock);
MSG_VERB("Exit: Number of bytes xferred:%zu\n", bytes_xfered);
return bytes_xfered;
sys_interrupt:
spin_unlock_bh(&uci_chan->lock);
return ret;
}
static ssize_t mhi_uci_read(struct file *file,
char __user *buf,
size_t count,
loff_t *ppos)
{
struct uci_dev *uci_dev = file->private_data;
struct mhi_device *mhi_dev = uci_dev->mhi_dev;
struct uci_chan *uci_chan = &uci_dev->dl_chan;
struct uci_buf *uci_buf;
char *ptr;
size_t to_copy;
int ret = 0;
if (!buf || uci_dev->rx_error)
return -EINVAL;
MSG_VERB("Client provided buf len:%zu\n", count);
/* confirm channel is active */
spin_lock_bh(&uci_chan->lock);
if (!uci_dev->enabled) {
spin_unlock_bh(&uci_chan->lock);
return -ERESTARTSYS;
}
/* No data available to read, wait */
if (!uci_chan->cur_buf && list_empty(&uci_chan->pending)) {
MSG_VERB("No data available to read waiting\n");
spin_unlock_bh(&uci_chan->lock);
if (file->f_flags & O_NONBLOCK)
return -EAGAIN;
ret = wait_event_interruptible(uci_chan->wq,
(!uci_dev->enabled ||
!list_empty(&uci_chan->pending)));
if (ret == -ERESTARTSYS) {
MSG_LOG("Exit signal caught for node\n");
return -ERESTARTSYS;
}
spin_lock_bh(&uci_chan->lock);
if (!uci_dev->enabled) {
MSG_LOG("node is disabled\n");
ret = -ERESTARTSYS;
goto read_error;
}
}
/* new read, get the next descriptor from the list */
if (!uci_chan->cur_buf) {
uci_buf = list_first_entry_or_null(&uci_chan->pending,
struct uci_buf, node);
if (unlikely(!uci_buf)) {
ret = -EIO;
goto read_error;
}
if (uci_buf->node.next == LIST_POISON1 || uci_buf->node.prev == LIST_POISON1) {
dump_stack();
ret = -EIO;
MSG_ERR("chan[%d] data=%p, len=%zd, nr_trb=%d\n",
mhi_dev->dl_chan_id, uci_buf->data, uci_buf->len, uci_buf->nr_trb);
goto read_error;
}
list_del(&uci_buf->node);
uci_chan->cur_buf = uci_buf;
uci_chan->rx_size = uci_buf->len;
MSG_VERB("Got pkt of size:%zu\n", uci_chan->rx_size);
}
uci_buf = uci_chan->cur_buf;
spin_unlock_bh(&uci_chan->lock);
/* Copy the buffer to user space */
to_copy = min_t(size_t, count, uci_chan->rx_size);
ptr = uci_buf->data + (uci_buf->len - uci_chan->rx_size);
ret = copy_to_user(buf, ptr, to_copy);
if (ret)
return ret;
MSG_VERB("Copied %zu of %zu bytes\n", to_copy, uci_chan->rx_size);
uci_chan->rx_size -= to_copy;
/* we finished with this buffer, queue it back to hardware */
if (!uci_chan->rx_size) {
spin_lock_bh(&uci_chan->lock);
uci_chan->cur_buf = NULL;
if (uci_dev->enabled)
#if 1 //this can make the address in ring do not change
{
if (uci_buf->page) {
unsigned nr_trb = uci_buf->nr_trb ? (uci_buf->nr_trb - 1) : (uci_dev->nr_trbs - 1);
uci_buf = &uci_dev->uci_buf[nr_trb];
ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE,
uci_buf->data, uci_dev->mtu,
MHI_EOT);
} else {
kfree(uci_buf);
ret = 0;
}
}
#endif
else
ret = -ERESTARTSYS;
if (ret) {
MSG_ERR("Failed to recycle element for chan:%d , ret=%d\n", mhi_dev->ul_chan_id, ret);
#if 0
kfree(uci_buf->data);
#endif
goto read_error;
}
spin_unlock_bh(&uci_chan->lock);
}
MSG_VERB("Returning %zu bytes\n", to_copy);
return to_copy;
read_error:
spin_unlock_bh(&uci_chan->lock);
return ret;
}
static ssize_t mhi_uci_write_mutex(struct file *file,
const char __user *buf,
size_t count,
loff_t *offp)
{
struct uci_dev *uci_dev = file->private_data;
int ret;
ret = mutex_lock_interruptible(&uci_dev->w_mutex); /*concurrent writes */
if (ret < 0)
return -ERESTARTSYS;
ret = mhi_uci_write(file, buf, count, offp);
mutex_unlock(&uci_dev->w_mutex);
return ret;
}
static ssize_t mhi_uci_read_mutex(struct file *file,
char __user *buf,
size_t count,
loff_t *ppos)
{
struct uci_dev *uci_dev = file->private_data;
int ret;
ret = mutex_lock_interruptible(&uci_dev->r_mutex); /*concurrent reads */
if (ret < 0)
return -ERESTARTSYS;
ret = mhi_uci_read(file, buf, count, ppos);
mutex_unlock(&uci_dev->r_mutex);
return ret;
}
static int mhi_uci_open(struct inode *inode, struct file *filp)
{
struct uci_dev *uci_dev = NULL, *tmp_dev;
int ret = -EIO;
struct uci_chan *dl_chan;
mutex_lock(&mhi_uci_drv.lock);
list_for_each_entry(tmp_dev, &mhi_uci_drv.head, node) {
if (tmp_dev->devt == inode->i_rdev) {
uci_dev = tmp_dev;
break;
}
}
/* could not find a minor node */
if (!uci_dev)
goto error_exit;
mutex_lock(&uci_dev->mutex);
if (!uci_dev->enabled) {
MSG_ERR("Node exist, but not in active state!\n");
goto error_open_chan;
}
uci_dev->ref_count++;
MSG_LOG("Node open, ref counts %u\n", uci_dev->ref_count);
if (uci_dev->ref_count == 1) {
MSG_LOG("Starting channel\n");
ret = mhi_prepare_for_transfer(uci_dev->mhi_dev);
if (ret) {
MSG_ERR("Error starting transfer channels\n");
uci_dev->ref_count--;
goto error_open_chan;
}
ret = mhi_queue_inbound(uci_dev);
if (ret)
goto error_rx_queue;
#ifdef QUEC_MHI_UCI_ALWAYS_OPEN
uci_dev->ref_count++;
#endif
}
filp->private_data = uci_dev;
mutex_unlock(&uci_dev->mutex);
mutex_unlock(&mhi_uci_drv.lock);
return 0;
error_rx_queue:
dl_chan = &uci_dev->dl_chan;
mhi_unprepare_from_transfer(uci_dev->mhi_dev);
if (uci_dev->uci_buf) {
unsigned nr_trb = 0;
for (nr_trb = 0; nr_trb < uci_dev->nr_trbs; nr_trb++) {
if (uci_dev->uci_buf[nr_trb].page)
__free_pages(uci_dev->uci_buf[nr_trb].page, get_order(uci_dev->mtu));
}
kfree(uci_dev->uci_buf);
}
error_open_chan:
mutex_unlock(&uci_dev->mutex);
error_exit:
mutex_unlock(&mhi_uci_drv.lock);
return ret;
}
static const struct file_operations mhidev_fops = {
.open = mhi_uci_open,
.release = mhi_uci_release,
.read = mhi_uci_read_mutex,
.write = mhi_uci_write_mutex,
.poll = mhi_uci_poll,
.unlocked_ioctl = mhi_uci_ioctl,
};
static void mhi_uci_remove(struct mhi_device *mhi_dev)
{
struct uci_dev *uci_dev = mhi_device_get_devdata(mhi_dev);
MSG_LOG("Enter\n");
mutex_lock(&mhi_uci_drv.lock);
mutex_lock(&uci_dev->mutex);
/* disable the node */
spin_lock_irq(&uci_dev->dl_chan.lock);
spin_lock_irq(&uci_dev->ul_chan.lock);
uci_dev->enabled = false;
spin_unlock_irq(&uci_dev->ul_chan.lock);
spin_unlock_irq(&uci_dev->dl_chan.lock);
wake_up(&uci_dev->dl_chan.wq);
wake_up(&uci_dev->ul_chan.wq);
/* delete the node to prevent new opens */
device_destroy(mhi_uci_drv.class, uci_dev->devt);
uci_dev->dev = NULL;
list_del(&uci_dev->node);
#ifdef QUEC_MHI_UCI_ALWAYS_OPEN
if (uci_dev->ref_count > 0)
uci_dev->ref_count--;
#endif
/* safe to free memory only if all file nodes are closed */
if (!uci_dev->ref_count) {
mutex_unlock(&uci_dev->mutex);
mutex_destroy(&uci_dev->mutex);
clear_bit(MINOR(uci_dev->devt), uci_minors);
kfree(uci_dev);
mutex_unlock(&mhi_uci_drv.lock);
return;
}
MSG_LOG("Exit\n");
mutex_unlock(&uci_dev->mutex);
mutex_unlock(&mhi_uci_drv.lock);
}
static int mhi_uci_probe(struct mhi_device *mhi_dev,
const struct mhi_device_id *id)
{
struct uci_dev *uci_dev;
int minor;
char node_name[32];
int dir;
uci_dev = kzalloc(sizeof(*uci_dev), GFP_KERNEL);
if (!uci_dev)
return -ENOMEM;
mutex_init(&uci_dev->mutex);
mutex_init(&uci_dev->r_mutex);
mutex_init(&uci_dev->w_mutex);
uci_dev->mhi_dev = mhi_dev;
minor = find_first_zero_bit(uci_minors, MAX_UCI_DEVICES);
if (minor >= MAX_UCI_DEVICES) {
kfree(uci_dev);
return -ENOSPC;
}
mutex_lock(&uci_dev->mutex);
mutex_lock(&mhi_uci_drv.lock);
uci_dev->devt = MKDEV(mhi_uci_drv.major, minor);
#if 1
if (mhi_dev->mhi_cntrl->cntrl_idx)
uci_dev->dev = device_create(mhi_uci_drv.class, &mhi_dev->dev,
uci_dev->devt, uci_dev,
DEVICE_NAME "_%s%d",
mhi_dev->chan_name, mhi_dev->mhi_cntrl->cntrl_idx);
else
uci_dev->dev = device_create(mhi_uci_drv.class, &mhi_dev->dev,
uci_dev->devt, uci_dev,
DEVICE_NAME "_%s",
mhi_dev->chan_name);
#else
uci_dev->dev = device_create(mhi_uci_drv.class, &mhi_dev->dev,
uci_dev->devt, uci_dev,
DEVICE_NAME "_%04x_%02u.%02u.%02u%s%d",
mhi_dev->dev_id, mhi_dev->domain,
mhi_dev->bus, mhi_dev->slot, "_pipe_",
mhi_dev->ul_chan_id);
#endif
set_bit(minor, uci_minors);
/* create debugging buffer */
snprintf(node_name, sizeof(node_name), "mhi_uci_%04x_%02u.%02u.%02u_%d",
mhi_dev->dev_id, mhi_dev->domain, mhi_dev->bus, mhi_dev->slot,
mhi_dev->ul_chan_id);
for (dir = 0; dir < 2; dir++) {
struct uci_chan *uci_chan = (dir) ?
&uci_dev->ul_chan : &uci_dev->dl_chan;
spin_lock_init(&uci_chan->lock);
init_waitqueue_head(&uci_chan->wq);
INIT_LIST_HEAD(&uci_chan->pending);
}
uci_dev->termios = tty_std_termios;
uci_dev->mtu = min_t(size_t, id->driver_data, mhi_dev->mtu);
mhi_device_set_devdata(mhi_dev, uci_dev);
uci_dev->enabled = true;
list_add(&uci_dev->node, &mhi_uci_drv.head);
mutex_unlock(&mhi_uci_drv.lock);
mutex_unlock(&uci_dev->mutex);
MSG_LOG("channel:%s successfully probed\n", mhi_dev->chan_name);
return 0;
};
static void mhi_ul_xfer_cb(struct mhi_device *mhi_dev,
struct mhi_result *mhi_result)
{
struct uci_dev *uci_dev = mhi_device_get_devdata(mhi_dev);
struct uci_chan *uci_chan = &uci_dev->ul_chan;
MSG_VERB("status:%d xfer_len:%zu\n", mhi_result->transaction_status,
mhi_result->bytes_xferd);
kfree(mhi_result->buf_addr);
if (!mhi_result->transaction_status)
wake_up(&uci_chan->wq);
}
static void mhi_dl_xfer_cb(struct mhi_device *mhi_dev,
struct mhi_result *mhi_result)
{
struct uci_dev *uci_dev = mhi_device_get_devdata(mhi_dev);
struct uci_chan *uci_chan = &uci_dev->dl_chan;
unsigned long flags;
struct uci_buf *buf;
unsigned nr_trb = uci_dev->nr_trb;
buf = &uci_dev->uci_buf[nr_trb];
if (buf == NULL) {
MSG_ERR("buf = NULL");
return;
}
if (buf->nr_trb != nr_trb || buf->data != mhi_result->buf_addr)
{
uci_dev->rx_error++;
MSG_ERR("chan[%d]: uci_buf[%u] = %p , mhi_result[%u] = %p\n",
mhi_dev->dl_chan_id, buf->nr_trb, buf->data, nr_trb, mhi_result->buf_addr);
return;
}
uci_dev->nr_trb++;
if (uci_dev->nr_trb == uci_dev->nr_trbs)
uci_dev->nr_trb = 0;
if (mhi_result->transaction_status == -ENOTCONN) {
return;
}
if (mhi_result->bytes_xferd > uci_dev->mtu || mhi_result->bytes_xferd <= 0)
{
MSG_ERR("chan[%d]: bytes_xferd = %zd , mtu = %zd\n",
mhi_dev->dl_chan_id, mhi_result->bytes_xferd, uci_dev->mtu);
return;
}
if (mhi_result->bytes_xferd > uci_dev->bytes_xferd)
{
uci_dev->bytes_xferd = mhi_result->bytes_xferd;
//MSG_ERR("chan[%d]: bytes_xferd = %zd , mtu = %zd\n",
// mhi_dev->dl_chan_id, mhi_result->bytes_xferd, uci_dev->mtu);
}
MSG_VERB("status:%d receive_len:%zu\n", mhi_result->transaction_status,
mhi_result->bytes_xferd);
spin_lock_irqsave(&uci_chan->lock, flags);
#if 0
buf = mhi_result->buf_addr + uci_dev->mtu;
buf->data = mhi_result->buf_addr;
#endif
buf->len = mhi_result->bytes_xferd;
if (mhi_dev->dl_chan_id == MHI_CLIENT_DUN_IN
|| mhi_dev->dl_chan_id == MHI_CLIENT_QMI_IN
|| mhi_dev->dl_chan_id == MHI_CLIENT_MBIM_IN)
{
struct uci_buf *tmp_buf = NULL;
int skip_buf = 0;
#ifdef QUEC_MHI_UCI_ALWAYS_OPEN
if (uci_dev->ref_count == 1)
skip_buf++;
#endif
if (!skip_buf)
tmp_buf = (struct uci_buf *)kmalloc(buf->len + sizeof(struct uci_buf), GFP_ATOMIC);;
if (tmp_buf) {
tmp_buf->page = NULL;
tmp_buf->data = ((void *)tmp_buf) + sizeof(struct uci_buf);
tmp_buf->len = buf->len;
memcpy(tmp_buf->data, buf->data, buf->len);
}
if (buf) {
struct uci_buf *uci_buf = buf;
unsigned nr_trb = uci_buf->nr_trb ? (uci_buf->nr_trb - 1) : (uci_dev->nr_trbs - 1);
uci_buf = &uci_dev->uci_buf[nr_trb];
mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, uci_buf->data, uci_dev->mtu, MHI_EOT);
}
buf = tmp_buf;
}
if (buf)
list_add_tail(&buf->node, &uci_chan->pending);
spin_unlock_irqrestore(&uci_chan->lock, flags);
#ifdef CONFIG_PM_SLEEP
if (mhi_dev->dev.power.wakeup)
__pm_wakeup_event(mhi_dev->dev.power.wakeup, 0);
#endif
wake_up(&uci_chan->wq);
}
// repaire sdx6x module can not read qdb file. xingduo.du 2023-01-18
#define DIAG_MAX_PCIE_PKT_SZ 8192 //define by module
/* .driver_data stores max mtu */
static const struct mhi_device_id mhi_uci_match_table[] = {
{ .chan = "LOOPBACK", .driver_data = 0x1000 },
{ .chan = "SAHARA", .driver_data = 0x4000 },
{ .chan = "EDL", .driver_data = 0x4000 },
{ .chan = "DIAG", .driver_data = DIAG_MAX_PCIE_PKT_SZ },
{ .chan = "MBIM", .driver_data = 0x1000 },
{ .chan = "QMI0", .driver_data = 0x1000 },
{ .chan = "QMI1", .driver_data = 0x1000 },
{ .chan = "DUN", .driver_data = 0x1000 },
#ifdef ENABLE_ADPL
{ .chan = "ADPL", .driver_data = 0x1000 },
#endif
#ifdef ENABLE_QDSS
{ .chan = "QDSS", .driver_data = 0x1000 },
#endif
{},
};
static struct mhi_driver mhi_uci_driver = {
.id_table = mhi_uci_match_table,
.remove = mhi_uci_remove,
.probe = mhi_uci_probe,
.ul_xfer_cb = mhi_ul_xfer_cb,
.dl_xfer_cb = mhi_dl_xfer_cb,
.driver = {
.name = MHI_UCI_DRIVER_NAME,
.owner = THIS_MODULE,
},
};
int mhi_device_uci_init(void)
{
int ret;
ret = register_chrdev(0, MHI_UCI_DRIVER_NAME, &mhidev_fops);
if (ret < 0)
return ret;
mhi_uci_drv.major = ret;
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 4, 0))
mhi_uci_drv.class = class_create(MHI_UCI_DRIVER_NAME);
#else
mhi_uci_drv.class = class_create(THIS_MODULE, MHI_UCI_DRIVER_NAME);
#endif
if (IS_ERR(mhi_uci_drv.class)) {
unregister_chrdev(mhi_uci_drv.major, MHI_UCI_DRIVER_NAME);
return -ENODEV;
}
mutex_init(&mhi_uci_drv.lock);
INIT_LIST_HEAD(&mhi_uci_drv.head);
ret = mhi_driver_register(&mhi_uci_driver);
if (ret) {
class_destroy(mhi_uci_drv.class);
unregister_chrdev(mhi_uci_drv.major, MHI_UCI_DRIVER_NAME);
}
return ret;
}
void mhi_device_uci_exit(void)
{
mhi_driver_unregister(&mhi_uci_driver);
class_destroy(mhi_uci_drv.class);
unregister_chrdev(mhi_uci_drv.major, MHI_UCI_DRIVER_NAME);
}

View File

@@ -0,0 +1,13 @@
#
# RMNET MAP driver
#
menuconfig RMNET
tristate "RmNet MAP driver"
default n
select GRO_CELLS
---help---
If you select this, you will enable the RMNET module which is used
for handling data in the multiplexing and aggregation protocol (MAP)
format in the embedded data path. RMNET devices can be attached to
any IP mode physical device.

View File

@@ -0,0 +1,11 @@
#
# Makefile for the RMNET module
#
rmnet-y := rmnet_config.o
rmnet-y += rmnet_vnd.o
rmnet-y += rmnet_handlers.o
rmnet-y += rmnet_map_data.o
rmnet-y += rmnet_map_command.o
rmnet-y += rmnet_descriptor.o
obj-$(CONFIG_RMNET) += rmnet.o

View File

@@ -0,0 +1,141 @@
/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* RMNET configuration engine
*
*/
#include <net/sock.h>
#include <linux/module.h>
#include <linux/netlink.h>
#include <linux/netdevice.h>
#include <linux/hashtable.h>
#include "rmnet_config.h"
#include "rmnet_handlers.h"
#include "rmnet_vnd.h"
#include "rmnet_private.h"
#include "rmnet_map.h"
#include "rmnet_descriptor.h"
/* Locking scheme -
* The shared resource which needs to be protected is realdev->rx_handler_data.
* For the writer path, this is using rtnl_lock(). The writer paths are
* rmnet_newlink(), rmnet_dellink() and rmnet_force_unassociate_device(). These
* paths are already called with rtnl_lock() acquired in. There is also an
* ASSERT_RTNL() to ensure that we are calling with rtnl acquired. For
* dereference here, we will need to use rtnl_dereference(). Dev list writing
* needs to happen with rtnl_lock() acquired for netdev_master_upper_dev_link().
* For the reader path, the real_dev->rx_handler_data is called in the TX / RX
* path. We only need rcu_read_lock() for these scenarios. In these cases,
* the rcu_read_lock() is held in __dev_queue_xmit() and
* netif_receive_skb_internal(), so readers need to use rcu_dereference_rtnl()
* to get the relevant information. For dev list reading, we again acquire
* rcu_read_lock() in rmnet_dellink() for netdev_master_upper_dev_get_rcu().
* We also use unregister_netdevice_many() to free all rmnet devices in
* rmnet_force_unassociate_device() so we dont lose the rtnl_lock() and free in
* same context.
*/
/* Local Definitions and Declarations */
static int rmnet_is_real_dev_registered(const struct net_device *real_dev)
{
return rcu_access_pointer(real_dev->rx_handler) == rmnet_rx_handler;
}
/* Needs rtnl lock */
static struct rmnet_port*
rmnet_get_port_rtnl(const struct net_device *real_dev)
{
return rtnl_dereference(real_dev->rx_handler_data);
}
static int rmnet_unregister_real_device(struct net_device *real_dev,
struct rmnet_port *port)
{
if (port->nr_rmnet_devs)
return -EINVAL;
rmnet_map_cmd_exit(port);
rmnet_map_tx_aggregate_exit(port);
rmnet_descriptor_deinit(port);
kfree(port);
netdev_rx_handler_unregister(real_dev);
/* release reference on real_dev */
dev_put(real_dev);
netdev_dbg(real_dev, "Removed from rmnet\n");
return 0;
}
static int rmnet_register_real_device(struct net_device *real_dev)
{
struct rmnet_port *port;
int rc, entry;
ASSERT_RTNL();
if (rmnet_is_real_dev_registered(real_dev))
return 0;
port = kzalloc(sizeof(*port), GFP_ATOMIC);
if (!port)
return -ENOMEM;
port->dev = real_dev;
rc = netdev_rx_handler_register(real_dev, rmnet_rx_handler, port);
if (rc) {
kfree(port);
return -EBUSY;
}
/* hold on to real dev for MAP data */
dev_hold(real_dev);
for (entry = 0; entry < RMNET_MAX_LOGICAL_EP; entry++)
INIT_HLIST_HEAD(&port->muxed_ep[entry]);
rc = rmnet_descriptor_init(port);
if (rc) {
rmnet_descriptor_deinit(port);
return rc;
}
rmnet_map_tx_aggregate_init(port);
rmnet_map_cmd_init(port);
netdev_dbg(real_dev, "registered with rmnet\n");
return 0;
}
/* Needs either rcu_read_lock() or rtnl lock */
static struct rmnet_port *rmnet_get_port(struct net_device *real_dev)
{
if (rmnet_is_real_dev_registered(real_dev))
return rcu_dereference_rtnl(real_dev->rx_handler_data);
else
return NULL;
}
static struct rmnet_endpoint *rmnet_get_endpoint(struct rmnet_port *port, u8 mux_id)
{
struct rmnet_endpoint *ep;
hlist_for_each_entry_rcu(ep, &port->muxed_ep[mux_id], hlnode) {
if (ep->mux_id == mux_id)
return ep;
}
return NULL;
}

View File

@@ -0,0 +1,174 @@
/* Copyright (c) 2013-2017, 2019 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* RMNET Data configuration engine
*
*/
#include <linux/skbuff.h>
#include <net/gro_cells.h>
#ifndef _RMNET_CONFIG_H_
#define _RMNET_CONFIG_H_
#define RMNET_MAX_LOGICAL_EP 255
#define RMNET_MAX_VEID 4
struct rmnet_endpoint {
u8 mux_id;
struct net_device *egress_dev;
struct hlist_node hlnode;
};
struct rmnet_port_priv_stats {
u64 dl_hdr_last_qmap_vers;
u64 dl_hdr_last_ep_id;
u64 dl_hdr_last_trans_id;
u64 dl_hdr_last_seq;
u64 dl_hdr_last_bytes;
u64 dl_hdr_last_pkts;
u64 dl_hdr_last_flows;
u64 dl_hdr_count;
u64 dl_hdr_total_bytes;
u64 dl_hdr_total_pkts;
u64 dl_trl_last_seq;
u64 dl_trl_count;
};
struct rmnet_egress_agg_params {
u16 agg_size;
u16 agg_count;
u32 agg_time;
};
/* One instance of this structure is instantiated for each real_dev associated
* with rmnet.
*/
struct rmnet_port {
struct net_device *dev;
u32 data_format;
u8 nr_rmnet_devs;
u8 rmnet_mode;
struct hlist_head muxed_ep[RMNET_MAX_LOGICAL_EP];
struct net_device *bridge_ep;
void *rmnet_perf;
struct rmnet_egress_agg_params egress_agg_params;
/* Protect aggregation related elements */
spinlock_t agg_lock;
struct sk_buff *agg_skb;
int agg_state;
u8 agg_count;
struct timespec agg_time;
struct timespec agg_last;
struct hrtimer hrtimer;
struct work_struct agg_wq;
/* dl marker elements */
struct list_head dl_list;
struct rmnet_port_priv_stats stats;
int dl_marker_flush;
/* Descriptor pool */
spinlock_t desc_pool_lock;
struct rmnet_frag_descriptor_pool *frag_desc_pool;
struct sk_buff *chain_head;
struct sk_buff *chain_tail;
};
extern struct rtnl_link_ops rmnet_link_ops;
struct rmnet_vnd_stats {
u64 rx_pkts;
u64 rx_bytes;
u64 tx_pkts;
u64 tx_bytes;
u32 tx_drops;
};
struct rmnet_pcpu_stats {
struct rmnet_vnd_stats stats;
struct u64_stats_sync syncp;
};
struct rmnet_coal_close_stats {
u64 non_coal;
u64 ip_miss;
u64 trans_miss;
u64 hw_nl;
u64 hw_pkt;
u64 hw_byte;
u64 hw_time;
u64 hw_evict;
u64 coal;
};
struct rmnet_coal_stats {
u64 coal_rx;
u64 coal_pkts;
u64 coal_hdr_nlo_err;
u64 coal_hdr_pkt_err;
u64 coal_csum_err;
u64 coal_reconstruct;
u64 coal_ip_invalid;
u64 coal_trans_invalid;
struct rmnet_coal_close_stats close;
u64 coal_veid[RMNET_MAX_VEID];
};
struct rmnet_priv_stats {
u64 csum_ok;
u64 csum_valid_unset;
u64 csum_validation_failed;
u64 csum_err_bad_buffer;
u64 csum_err_invalid_ip_version;
u64 csum_err_invalid_transport;
u64 csum_fragmented_pkt;
u64 csum_skipped;
u64 csum_sw;
u64 csum_hw;
struct rmnet_coal_stats coal;
};
struct rmnet_priv {
u8 mux_id;
struct net_device *real_dev;
struct rmnet_pcpu_stats __percpu *pcpu_stats;
struct gro_cells gro_cells;
struct rmnet_priv_stats stats;
};
enum rmnet_dl_marker_prio {
RMNET_PERF,
RMNET_SHS,
};
enum rmnet_trace_func {
RMNET_MODULE,
NW_STACK_MODULE,
};
enum rmnet_trace_evt {
RMNET_DLVR_SKB,
RMNET_RCV_FROM_PND,
RMNET_TX_UL_PKT,
NW_STACK_DEV_Q_XMIT,
NW_STACK_NAPI_GRO_FLUSH,
NW_STACK_RX,
NW_STACK_TX,
};
static int rmnet_is_real_dev_registered(const struct net_device *real_dev);
static struct rmnet_port *rmnet_get_port(struct net_device *real_dev);
static struct rmnet_endpoint *rmnet_get_endpoint(struct rmnet_port *port, u8 mux_id);
#endif /* _RMNET_CONFIG_H_ */

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,661 @@
/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* RMNET Packet Descriptor Framework
*
*/
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <net/ipv6.h>
#include <net/ip6_checksum.h>
#include "rmnet_config.h"
#include "rmnet_descriptor.h"
#include "rmnet_handlers.h"
#include "rmnet_private.h"
#include "rmnet_vnd.h"
#define RMNET_FRAG_DESCRIPTOR_POOL_SIZE 64
#define RMNET_DL_IND_HDR_SIZE (sizeof(struct rmnet_map_dl_ind_hdr) + \
sizeof(struct rmnet_map_header) + \
sizeof(struct rmnet_map_control_command_header))
#define RMNET_DL_IND_TRL_SIZE (sizeof(struct rmnet_map_dl_ind_trl) + \
sizeof(struct rmnet_map_header) + \
sizeof(struct rmnet_map_control_command_header))
typedef void (*rmnet_perf_desc_hook_t)(struct rmnet_frag_descriptor *frag_desc,
struct rmnet_port *port);
typedef void (*rmnet_perf_chain_hook_t)(void);
static struct rmnet_frag_descriptor *
rmnet_get_frag_descriptor(struct rmnet_port *port)
{
struct rmnet_frag_descriptor_pool *pool = port->frag_desc_pool;
struct rmnet_frag_descriptor *frag_desc;
spin_lock(&port->desc_pool_lock);
if (!list_empty(&pool->free_list)) {
frag_desc = list_first_entry(&pool->free_list,
struct rmnet_frag_descriptor,
list);
list_del_init(&frag_desc->list);
} else {
frag_desc = kzalloc(sizeof(*frag_desc), GFP_ATOMIC);
if (!frag_desc)
goto out;
INIT_LIST_HEAD(&frag_desc->list);
INIT_LIST_HEAD(&frag_desc->sub_frags);
pool->pool_size++;
}
out:
spin_unlock(&port->desc_pool_lock);
return frag_desc;
}
static void rmnet_recycle_frag_descriptor(struct rmnet_frag_descriptor *frag_desc,
struct rmnet_port *port)
{
struct rmnet_frag_descriptor_pool *pool = port->frag_desc_pool;
struct page *page = skb_frag_page(&frag_desc->frag);
list_del(&frag_desc->list);
if (page)
put_page(page);
memset(frag_desc, 0, sizeof(*frag_desc));
INIT_LIST_HEAD(&frag_desc->list);
INIT_LIST_HEAD(&frag_desc->sub_frags);
spin_lock(&port->desc_pool_lock);
list_add_tail(&frag_desc->list, &pool->free_list);
spin_unlock(&port->desc_pool_lock);
}
static void rmnet_descriptor_add_frag(struct rmnet_port *port, struct list_head *list,
struct page *p, u32 page_offset, u32 len)
{
struct rmnet_frag_descriptor *frag_desc;
frag_desc = rmnet_get_frag_descriptor(port);
if (!frag_desc)
return;
rmnet_frag_fill(frag_desc, p, page_offset, len);
list_add_tail(&frag_desc->list, list);
}
static u8 rmnet_frag_do_flow_control(struct rmnet_map_header *qmap,
struct rmnet_port *port,
int enable)
{
struct rmnet_map_control_command *cmd;
struct rmnet_endpoint *ep;
struct net_device *vnd;
u16 ip_family;
u16 fc_seq;
u32 qos_id;
u8 mux_id;
int r;
mux_id = qmap->mux_id;
cmd = (struct rmnet_map_control_command *)
((char *)qmap + sizeof(*qmap));
if (mux_id >= RMNET_MAX_LOGICAL_EP)
return RX_HANDLER_CONSUMED;
ep = rmnet_get_endpoint(port, mux_id);
if (!ep)
return RX_HANDLER_CONSUMED;
vnd = ep->egress_dev;
ip_family = cmd->flow_control.ip_family;
fc_seq = ntohs(cmd->flow_control.flow_control_seq_num);
qos_id = ntohl(cmd->flow_control.qos_id);
/* Ignore the ip family and pass the sequence number for both v4 and v6
* sequence. User space does not support creating dedicated flows for
* the 2 protocols
*/
r = rmnet_vnd_do_flow_control(vnd, enable);
if (r)
return RMNET_MAP_COMMAND_UNSUPPORTED;
else
return RMNET_MAP_COMMAND_ACK;
}
static void rmnet_frag_send_ack(struct rmnet_map_header *qmap,
unsigned char type,
struct rmnet_port *port)
{
struct rmnet_map_control_command *cmd;
struct net_device *dev = port->dev;
struct sk_buff *skb;
u16 alloc_len = ntohs(qmap->pkt_len) + sizeof(*qmap);
skb = alloc_skb(alloc_len, GFP_ATOMIC);
if (!skb)
return;
skb->protocol = htons(ETH_P_MAP);
skb->dev = dev;
cmd = rmnet_map_get_cmd_start(skb);
cmd->cmd_type = type & 0x03;
netif_tx_lock(dev);
dev->netdev_ops->ndo_start_xmit(skb, dev);
netif_tx_unlock(dev);
}
/* Process MAP command frame and send N/ACK message as appropriate. Message cmd
* name is decoded here and appropriate handler is called.
*/
static void rmnet_frag_command(struct rmnet_map_header *qmap, struct rmnet_port *port)
{
struct rmnet_map_control_command *cmd;
unsigned char command_name;
unsigned char rc = 0;
cmd = (struct rmnet_map_control_command *)
((char *)qmap + sizeof(*qmap));
command_name = cmd->command_name;
switch (command_name) {
case RMNET_MAP_COMMAND_FLOW_ENABLE:
rc = rmnet_frag_do_flow_control(qmap, port, 1);
break;
case RMNET_MAP_COMMAND_FLOW_DISABLE:
rc = rmnet_frag_do_flow_control(qmap, port, 0);
break;
default:
rc = RMNET_MAP_COMMAND_UNSUPPORTED;
break;
}
if (rc == RMNET_MAP_COMMAND_ACK)
rmnet_frag_send_ack(qmap, rc, port);
}
static void rmnet_frag_deaggregate(skb_frag_t *frag, struct rmnet_port *port,
struct list_head *list)
{
struct rmnet_map_header *maph;
u8 *data = skb_frag_address(frag);
u32 offset = 0;
u32 packet_len;
while (offset < skb_frag_size(frag)) {
maph = (struct rmnet_map_header *)data;
packet_len = ntohs(maph->pkt_len);
/* Some hardware can send us empty frames. Catch them */
if (packet_len == 0)
return;
packet_len += sizeof(*maph);
if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) {
packet_len += sizeof(struct rmnet_map_dl_csum_trailer);
WARN_ON(1);
} else if (port->data_format &
(RMNET_FLAGS_INGRESS_MAP_CKSUMV5 |
RMNET_FLAGS_INGRESS_COALESCE) && !maph->cd_bit) {
u32 hsize = 0;
u8 type;
type = ((struct rmnet_map_v5_coal_header *)
(data + sizeof(*maph)))->header_type;
switch (type) {
case RMNET_MAP_HEADER_TYPE_COALESCING:
hsize = sizeof(struct rmnet_map_v5_coal_header);
break;
case RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD:
hsize = sizeof(struct rmnet_map_v5_csum_header);
break;
}
packet_len += hsize;
}
else {
//qmap_hex_dump(__func__, data, 64);
WARN_ON(1);
}
if ((int)skb_frag_size(frag) - (int)packet_len < 0)
return;
rmnet_descriptor_add_frag(port, list, skb_frag_page(frag),
frag->page_offset + offset,
packet_len);
offset += packet_len;
data += packet_len;
}
}
/* Allocate and populate an skb to contain the packet represented by the
* frag descriptor.
*/
static struct sk_buff *rmnet_alloc_skb(struct rmnet_frag_descriptor *frag_desc,
struct rmnet_port *port)
{
struct sk_buff *head_skb, *current_skb, *skb;
struct skb_shared_info *shinfo;
struct rmnet_frag_descriptor *sub_frag, *tmp;
/* Use the exact sizes if we know them (i.e. RSB/RSC, rmnet_perf) */
if (frag_desc->hdrs_valid) {
u16 hdr_len = frag_desc->ip_len + frag_desc->trans_len;
head_skb = alloc_skb(hdr_len + RMNET_MAP_DESC_HEADROOM,
GFP_ATOMIC);
if (!head_skb)
return NULL;
skb_reserve(head_skb, RMNET_MAP_DESC_HEADROOM);
skb_put_data(head_skb, frag_desc->hdr_ptr, hdr_len);
skb_reset_network_header(head_skb);
if (frag_desc->trans_len)
skb_set_transport_header(head_skb, frag_desc->ip_len);
/* Packets that have no data portion don't need any frags */
if (hdr_len == skb_frag_size(&frag_desc->frag))
goto skip_frags;
/* If the headers we added are the start of the page,
* we don't want to add them twice
*/
if (frag_desc->hdr_ptr == rmnet_frag_data_ptr(frag_desc)) {
if (!rmnet_frag_pull(frag_desc, port, hdr_len)) {
kfree_skb(head_skb);
return NULL;
}
}
} else {
/* Allocate enough space to avoid penalties in the stack
* from __pskb_pull_tail()
*/
head_skb = alloc_skb(256 + RMNET_MAP_DESC_HEADROOM,
GFP_ATOMIC);
if (!head_skb)
return NULL;
skb_reserve(head_skb, RMNET_MAP_DESC_HEADROOM);
}
/* Add main fragment */
get_page(skb_frag_page(&frag_desc->frag));
skb_add_rx_frag(head_skb, 0, skb_frag_page(&frag_desc->frag),
frag_desc->frag.page_offset,
skb_frag_size(&frag_desc->frag),
skb_frag_size(&frag_desc->frag));
shinfo = skb_shinfo(head_skb);
current_skb = head_skb;
/* Add in any frags from rmnet_perf */
list_for_each_entry_safe(sub_frag, tmp, &frag_desc->sub_frags, list) {
skb_frag_t *frag;
u32 frag_size;
frag = &sub_frag->frag;
frag_size = skb_frag_size(frag);
add_frag:
if (shinfo->nr_frags < MAX_SKB_FRAGS) {
get_page(skb_frag_page(frag));
skb_add_rx_frag(current_skb, shinfo->nr_frags,
skb_frag_page(frag), frag->page_offset,
frag_size, frag_size);
if (current_skb != head_skb) {
head_skb->len += frag_size;
head_skb->data_len += frag_size;
}
} else {
/* Alloc a new skb and try again */
skb = alloc_skb(0, GFP_ATOMIC);
if (!skb)
break;
if (current_skb == head_skb)
shinfo->frag_list = skb;
else
current_skb->next = skb;
current_skb = skb;
shinfo = skb_shinfo(current_skb);
goto add_frag;
}
rmnet_recycle_frag_descriptor(sub_frag, port);
}
skip_frags:
head_skb->dev = frag_desc->dev;
rmnet_set_skb_proto(head_skb);
/* Handle any header metadata that needs to be updated after RSB/RSC
* segmentation
*/
if (frag_desc->ip_id_set) {
struct iphdr *iph;
iph = (struct iphdr *)rmnet_map_data_ptr(head_skb);
csum_replace2(&iph->check, iph->id, frag_desc->ip_id);
iph->id = frag_desc->ip_id;
}
if (frag_desc->tcp_seq_set) {
struct tcphdr *th;
th = (struct tcphdr *)
(rmnet_map_data_ptr(head_skb) + frag_desc->ip_len);
th->seq = frag_desc->tcp_seq;
}
/* Handle csum offloading */
if (frag_desc->csum_valid && frag_desc->hdrs_valid) {
/* Set the partial checksum information */
//rmnet_frag_partial_csum(head_skb, frag_desc);
WARN_ON(1);
} else if (frag_desc->csum_valid) {
/* Non-RSB/RSC/perf packet. The current checksum is fine */
head_skb->ip_summed = CHECKSUM_UNNECESSARY;
} else if (frag_desc->hdrs_valid &&
(frag_desc->trans_proto == IPPROTO_TCP ||
frag_desc->trans_proto == IPPROTO_UDP)) {
/* Unfortunately, we have to fake a bad checksum here, since
* the original bad value is lost by the hardware. The only
* reliable way to do it is to calculate the actual checksum
* and corrupt it.
*/
__sum16 *check;
__wsum csum;
unsigned int offset = skb_transport_offset(head_skb);
__sum16 pseudo;
WARN_ON(1);
/* Calculate pseudo header and update header fields */
if (frag_desc->ip_proto == 4) {
struct iphdr *iph = ip_hdr(head_skb);
__be16 tot_len = htons(head_skb->len);
csum_replace2(&iph->check, iph->tot_len, tot_len);
iph->tot_len = tot_len;
pseudo = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
head_skb->len -
frag_desc->ip_len,
frag_desc->trans_proto, 0);
} else {
struct ipv6hdr *ip6h = ipv6_hdr(head_skb);
ip6h->payload_len = htons(head_skb->len -
sizeof(*ip6h));
pseudo = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
head_skb->len -
frag_desc->ip_len,
frag_desc->trans_proto, 0);
}
if (frag_desc->trans_proto == IPPROTO_TCP) {
check = &tcp_hdr(head_skb)->check;
} else {
udp_hdr(head_skb)->len = htons(head_skb->len -
frag_desc->ip_len);
check = &udp_hdr(head_skb)->check;
}
*check = pseudo;
csum = skb_checksum(head_skb, offset, head_skb->len - offset,
0);
/* Add 1 to corrupt. This cannot produce a final value of 0
* since csum_fold() can't return a value of 0xFFFF
*/
*check = csum16_add(csum_fold(csum), htons(1));
head_skb->ip_summed = CHECKSUM_NONE;
}
/* Handle any rmnet_perf metadata */
if (frag_desc->hash) {
head_skb->hash = frag_desc->hash;
head_skb->sw_hash = 1;
}
if (frag_desc->flush_shs)
head_skb->cb[0] = 1;
/* Handle coalesced packets */
//if (frag_desc->gso_segs > 1)
// rmnet_frag_gso_stamp(head_skb, frag_desc);
return head_skb;
}
/* Deliver the packets contained within a frag descriptor */
static void rmnet_frag_deliver(struct rmnet_frag_descriptor *frag_desc,
struct rmnet_port *port)
{
struct sk_buff *skb;
skb = rmnet_alloc_skb(frag_desc, port);
if (skb)
rmnet_deliver_skb(skb, port);
rmnet_recycle_frag_descriptor(frag_desc, port);
}
/* Process a QMAPv5 packet header */
static int rmnet_frag_process_next_hdr_packet(struct rmnet_frag_descriptor *frag_desc,
struct rmnet_port *port,
struct list_head *list,
u16 len)
{
int rc = 0;
switch (rmnet_frag_get_next_hdr_type(frag_desc)) {
case RMNET_MAP_HEADER_TYPE_COALESCING:
rc = -1;
WARN_ON(1);
break;
case RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD:
if (rmnet_frag_get_csum_valid(frag_desc)) {
frag_desc->csum_valid = true;
} else {
}
if (!rmnet_frag_pull(frag_desc, port,
sizeof(struct rmnet_map_header) +
sizeof(struct rmnet_map_v5_csum_header))) {
rc = -EINVAL;
break;
}
frag_desc->hdr_ptr = rmnet_frag_data_ptr(frag_desc);
/* Remove padding only for csum offload packets.
* Coalesced packets should never have padding.
*/
if (!rmnet_frag_trim(frag_desc, port, len)) {
rc = -EINVAL;
break;
}
list_del_init(&frag_desc->list);
list_add_tail(&frag_desc->list, list);
break;
default:
//qmap_hex_dump(__func__, rmnet_frag_data_ptr(frag_desc), 64);
rc = -EINVAL;
break;
}
return rc;
}
static void
__rmnet_frag_ingress_handler(struct rmnet_frag_descriptor *frag_desc,
struct rmnet_port *port)
{
struct rmnet_map_header *qmap;
struct rmnet_endpoint *ep;
struct rmnet_frag_descriptor *frag, *tmp;
LIST_HEAD(segs);
u16 len, pad;
u8 mux_id;
qmap = (struct rmnet_map_header *)skb_frag_address(&frag_desc->frag);
mux_id = qmap->mux_id;
pad = qmap->pad_len;
len = ntohs(qmap->pkt_len) - pad;
if (qmap->cd_bit) {
if (port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER) {
//rmnet_frag_flow_command(qmap, port, len);
goto recycle;
}
if (port->data_format & RMNET_FLAGS_INGRESS_MAP_COMMANDS)
rmnet_frag_command(qmap, port);
goto recycle;
}
if (mux_id >= RMNET_MAX_LOGICAL_EP)
goto recycle;
ep = rmnet_get_endpoint(port, mux_id);
if (!ep)
goto recycle;
frag_desc->dev = ep->egress_dev;
/* Handle QMAPv5 packet */
if (qmap->next_hdr &&
(port->data_format & (RMNET_FLAGS_INGRESS_COALESCE |
RMNET_FLAGS_INGRESS_MAP_CKSUMV5))) {
if (rmnet_frag_process_next_hdr_packet(frag_desc, port, &segs,
len))
goto recycle;
} else {
/* We only have the main QMAP header to worry about */
if (!rmnet_frag_pull(frag_desc, port, sizeof(*qmap)))
return;
frag_desc->hdr_ptr = rmnet_frag_data_ptr(frag_desc);
if (!rmnet_frag_trim(frag_desc, port, len))
return;
list_add_tail(&frag_desc->list, &segs);
}
list_for_each_entry_safe(frag, tmp, &segs, list) {
list_del_init(&frag->list);
rmnet_frag_deliver(frag, port);
}
return;
recycle:
rmnet_recycle_frag_descriptor(frag_desc, port);
}
static void rmnet_frag_ingress_handler(struct sk_buff *skb,
struct rmnet_port *port)
{
LIST_HEAD(desc_list);
int i = 0;
struct rmnet_nss_cb *nss_cb;
/* Deaggregation and freeing of HW originating
* buffers is done within here
*/
while (skb) {
struct sk_buff *skb_frag;
port->chain_head = NULL;
port->chain_tail = NULL;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
rmnet_frag_deaggregate(&skb_shinfo(skb)->frags[i], port,
&desc_list);
if (!list_empty(&desc_list)) {
struct rmnet_frag_descriptor *frag_desc, *tmp;
list_for_each_entry_safe(frag_desc, tmp,
&desc_list, list) {
list_del_init(&frag_desc->list);
__rmnet_frag_ingress_handler(frag_desc,
port);
}
}
}
nss_cb = rcu_dereference(rmnet_nss_callbacks);
if (nss_cb && port->chain_head) {
port->chain_head->cb[0] = 0;
netif_receive_skb(port->chain_head);
}
skb_frag = skb_shinfo(skb)->frag_list;
skb_shinfo(skb)->frag_list = NULL;
consume_skb(skb);
skb = skb_frag;
}
}
void rmnet_descriptor_deinit(struct rmnet_port *port)
{
struct rmnet_frag_descriptor_pool *pool;
struct rmnet_frag_descriptor *frag_desc, *tmp;
pool = port->frag_desc_pool;
list_for_each_entry_safe(frag_desc, tmp, &pool->free_list, list) {
kfree(frag_desc);
pool->pool_size--;
}
kfree(pool);
}
int rmnet_descriptor_init(struct rmnet_port *port)
{
struct rmnet_frag_descriptor_pool *pool;
int i;
spin_lock_init(&port->desc_pool_lock);
pool = kzalloc(sizeof(*pool), GFP_ATOMIC);
if (!pool)
return -ENOMEM;
INIT_LIST_HEAD(&pool->free_list);
port->frag_desc_pool = pool;
for (i = 0; i < RMNET_FRAG_DESCRIPTOR_POOL_SIZE; i++) {
struct rmnet_frag_descriptor *frag_desc;
frag_desc = kzalloc(sizeof(*frag_desc), GFP_ATOMIC);
if (!frag_desc)
return -ENOMEM;
INIT_LIST_HEAD(&frag_desc->list);
INIT_LIST_HEAD(&frag_desc->sub_frags);
list_add_tail(&frag_desc->list, &pool->free_list);
pool->pool_size++;
}
return 0;
}

View File

@@ -0,0 +1,146 @@
/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* RMNET Packet Descriptor Framework
*
*/
#ifndef _RMNET_DESCRIPTOR_H_
#define _RMNET_DESCRIPTOR_H_
#include <linux/netdevice.h>
#include <linux/list.h>
#include <linux/skbuff.h>
#include "rmnet_config.h"
#include "rmnet_map.h"
struct rmnet_frag_descriptor_pool {
struct list_head free_list;
u32 pool_size;
};
struct rmnet_frag_descriptor {
struct list_head list;
struct list_head sub_frags;
skb_frag_t frag;
u8 *hdr_ptr;
struct net_device *dev;
u32 hash;
__be32 tcp_seq;
__be16 ip_id;
u16 data_offset;
u16 gso_size;
u16 gso_segs;
u16 ip_len;
u16 trans_len;
u8 ip_proto;
u8 trans_proto;
u8 pkt_id;
u8 csum_valid:1,
hdrs_valid:1,
ip_id_set:1,
tcp_seq_set:1,
flush_shs:1,
reserved:3;
};
/* Descriptor management */
static struct rmnet_frag_descriptor *
rmnet_get_frag_descriptor(struct rmnet_port *port);
static void rmnet_recycle_frag_descriptor(struct rmnet_frag_descriptor *frag_desc,
struct rmnet_port *port);
static void rmnet_descriptor_add_frag(struct rmnet_port *port, struct list_head *list,
struct page *p, u32 page_offset, u32 len);
/* QMAP command packets */
/* Ingress data handlers */
static void rmnet_frag_deaggregate(skb_frag_t *frag, struct rmnet_port *port,
struct list_head *list);
static void rmnet_frag_deliver(struct rmnet_frag_descriptor *frag_desc,
struct rmnet_port *port);
static int rmnet_frag_process_next_hdr_packet(struct rmnet_frag_descriptor *frag_desc,
struct rmnet_port *port,
struct list_head *list,
u16 len);
static void rmnet_frag_ingress_handler(struct sk_buff *skb,
struct rmnet_port *port);
static int rmnet_descriptor_init(struct rmnet_port *port);
static void rmnet_descriptor_deinit(struct rmnet_port *port);
static inline void *rmnet_frag_data_ptr(struct rmnet_frag_descriptor *frag_desc)
{
return skb_frag_address(&frag_desc->frag);
}
static inline void *rmnet_frag_pull(struct rmnet_frag_descriptor *frag_desc,
struct rmnet_port *port,
unsigned int size)
{
if (size >= skb_frag_size(&frag_desc->frag)) {
pr_info("%s(): Pulling %u bytes from %u byte pkt. Dropping\n",
__func__, size, skb_frag_size(&frag_desc->frag));
rmnet_recycle_frag_descriptor(frag_desc, port);
return NULL;
}
frag_desc->frag.page_offset += size;
skb_frag_size_sub(&frag_desc->frag, size);
return rmnet_frag_data_ptr(frag_desc);
}
static inline void *rmnet_frag_trim(struct rmnet_frag_descriptor *frag_desc,
struct rmnet_port *port,
unsigned int size)
{
if (!size) {
pr_info("%s(): Trimming %u byte pkt to 0. Dropping\n",
__func__, skb_frag_size(&frag_desc->frag));
rmnet_recycle_frag_descriptor(frag_desc, port);
return NULL;
}
if (size < skb_frag_size(&frag_desc->frag))
skb_frag_size_set(&frag_desc->frag, size);
return rmnet_frag_data_ptr(frag_desc);
}
static inline void rmnet_frag_fill(struct rmnet_frag_descriptor *frag_desc,
struct page *p, u32 page_offset, u32 len)
{
get_page(p);
__skb_frag_set_page(&frag_desc->frag, p);
skb_frag_size_set(&frag_desc->frag, len);
frag_desc->frag.page_offset = page_offset;
}
static inline u8
rmnet_frag_get_next_hdr_type(struct rmnet_frag_descriptor *frag_desc)
{
unsigned char *data = rmnet_frag_data_ptr(frag_desc);
data += sizeof(struct rmnet_map_header);
return ((struct rmnet_map_v5_coal_header *)data)->header_type;
}
static inline bool
rmnet_frag_get_csum_valid(struct rmnet_frag_descriptor *frag_desc)
{
unsigned char *data = rmnet_frag_data_ptr(frag_desc);
data += sizeof(struct rmnet_map_header);
return ((struct rmnet_map_v5_csum_header *)data)->csum_valid_required;
}
#endif /* _RMNET_DESCRIPTOR_H_ */

View File

@@ -0,0 +1,374 @@
/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* RMNET Data ingress/egress handler
*
*/
#include <linux/netdevice.h>
#include <linux/netdev_features.h>
#include <linux/if_arp.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <net/sock.h>
#include <linux/tracepoint.h>
#include "rmnet_private.h"
#include "rmnet_config.h"
#include "rmnet_vnd.h"
#include "rmnet_map.h"
#include "rmnet_handlers.h"
#include "rmnet_descriptor.h"
#define RMNET_IP_VERSION_4 0x40
#define RMNET_IP_VERSION_6 0x60
/* Helper Functions */
static void rmnet_set_skb_proto(struct sk_buff *skb)
{
switch (rmnet_map_data_ptr(skb)[0] & 0xF0) {
case RMNET_IP_VERSION_4:
skb->protocol = htons(ETH_P_IP);
break;
case RMNET_IP_VERSION_6:
skb->protocol = htons(ETH_P_IPV6);
break;
default:
skb->protocol = htons(ETH_P_MAP);
break;
}
}
/* Generic handler */
static void
rmnet_deliver_skb(struct sk_buff *skb, struct rmnet_port *port)
{
struct rmnet_nss_cb *nss_cb;
rmnet_vnd_rx_fixup(skb->dev, skb->len);
/* Pass off the packet to NSS driver if we can */
nss_cb = rcu_dereference(rmnet_nss_callbacks);
if (nss_cb) {
if (!port->chain_head)
port->chain_head = skb;
else
skb_shinfo(port->chain_tail)->frag_list = skb;
port->chain_tail = skb;
return;
}
skb_reset_transport_header(skb);
skb_reset_network_header(skb);
skb->pkt_type = PACKET_HOST;
skb_set_mac_header(skb, 0);
//if (port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER) {
//} else {
//if (!rmnet_check_skb_can_gro(skb))
// gro_cells_receive(&priv->gro_cells, skb);
//else
netif_receive_skb(skb);
//}
}
/* Deliver a list of skbs after undoing coalescing */
static void rmnet_deliver_skb_list(struct sk_buff_head *head,
struct rmnet_port *port)
{
struct sk_buff *skb;
while ((skb = __skb_dequeue(head))) {
rmnet_set_skb_proto(skb);
rmnet_deliver_skb(skb, port);
}
}
/* MAP handler */
static void
_rmnet_map_ingress_handler(struct sk_buff *skb,
struct rmnet_port *port)
{
struct rmnet_map_header *qmap;
struct rmnet_endpoint *ep;
struct sk_buff_head list;
u16 len, pad;
u8 mux_id;
/* We don't need the spinlock since only we touch this */
__skb_queue_head_init(&list);
qmap = (struct rmnet_map_header *)rmnet_map_data_ptr(skb);
if (qmap->cd_bit) {
if (port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER) {
//if (!rmnet_map_flow_command(skb, port, false))
return;
}
if (port->data_format & RMNET_FLAGS_INGRESS_MAP_COMMANDS)
return rmnet_map_command(skb, port);
goto free_skb;
}
mux_id = qmap->mux_id;
pad = qmap->pad_len;
len = ntohs(qmap->pkt_len) - pad;
if (mux_id >= RMNET_MAX_LOGICAL_EP)
goto free_skb;
ep = rmnet_get_endpoint(port, mux_id);
if (!ep)
goto free_skb;
skb->dev = ep->egress_dev;
/* Handle QMAPv5 packet */
if (qmap->next_hdr &&
(port->data_format & (RMNET_FLAGS_INGRESS_COALESCE |
RMNET_FLAGS_INGRESS_MAP_CKSUMV5))) {
if (rmnet_map_process_next_hdr_packet(skb, &list, len))
goto free_skb;
} else {
/* We only have the main QMAP header to worry about */
pskb_pull(skb, sizeof(*qmap));
rmnet_set_skb_proto(skb);
if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) {
//if (!rmnet_map_checksum_downlink_packet(skb, len + pad))
// skb->ip_summed = CHECKSUM_UNNECESSARY;
}
pskb_trim(skb, len);
/* Push the single packet onto the list */
__skb_queue_tail(&list, skb);
}
rmnet_deliver_skb_list(&list, port);
return;
free_skb:
kfree_skb(skb);
}
static void
rmnet_map_ingress_handler(struct sk_buff *skb,
struct rmnet_port *port)
{
struct sk_buff *skbn;
if (port->data_format & (RMNET_FLAGS_INGRESS_COALESCE |
RMNET_FLAGS_INGRESS_MAP_CKSUMV5)) {
if (skb_is_nonlinear(skb)) {
rmnet_frag_ingress_handler(skb, port);
return;
}
}
/* Deaggregation and freeing of HW originating
* buffers is done within here
*/
while (skb) {
struct sk_buff *skb_frag = skb_shinfo(skb)->frag_list;
skb_shinfo(skb)->frag_list = NULL;
while ((skbn = rmnet_map_deaggregate(skb, port)) != NULL) {
_rmnet_map_ingress_handler(skbn, port);
if (skbn == skb)
goto next_skb;
}
consume_skb(skb);
next_skb:
skb = skb_frag;
}
}
static int rmnet_map_egress_handler(struct sk_buff *skb,
struct rmnet_port *port, u8 mux_id,
struct net_device *orig_dev)
{
int required_headroom, additional_header_len, csum_type;
struct rmnet_map_header *map_header;
additional_header_len = 0;
required_headroom = sizeof(struct rmnet_map_header);
csum_type = 0;
if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4) {
additional_header_len = sizeof(struct rmnet_map_ul_csum_header);
csum_type = RMNET_FLAGS_EGRESS_MAP_CKSUMV4;
} else if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV5) {
additional_header_len = sizeof(struct rmnet_map_v5_csum_header);
csum_type = RMNET_FLAGS_EGRESS_MAP_CKSUMV5;
}
required_headroom += additional_header_len;
if (skb_headroom(skb) < required_headroom) {
if (pskb_expand_head(skb, required_headroom, 0, GFP_ATOMIC))
return -ENOMEM;
}
if (csum_type)
rmnet_map_checksum_uplink_packet(skb, orig_dev, csum_type);
map_header = rmnet_map_add_map_header(skb, additional_header_len, 0,
port);
if (!map_header)
return -ENOMEM;
map_header->mux_id = mux_id;
if (port->data_format & RMNET_EGRESS_FORMAT_AGGREGATION) {
if (rmnet_map_tx_agg_skip(skb, required_headroom))
goto done;
rmnet_map_tx_aggregate(skb, port);
return -EINPROGRESS;
}
done:
skb->protocol = htons(ETH_P_MAP);
return 0;
}
static void
rmnet_bridge_handler(struct sk_buff *skb, struct net_device *bridge_dev)
{
if (bridge_dev) {
skb->dev = bridge_dev;
dev_queue_xmit(skb);
}
}
/* Ingress / Egress Entry Points */
/* Processes packet as per ingress data format for receiving device. Logical
* endpoint is determined from packet inspection. Packet is then sent to the
* egress device listed in the logical endpoint configuration.
*/
static rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
{
struct sk_buff *skb = *pskb;
struct rmnet_port *port;
struct net_device *dev;
if (!skb)
goto done;
if (skb->pkt_type == PACKET_LOOPBACK)
return RX_HANDLER_PASS;
dev = skb->dev;
port = rmnet_get_port(dev);
port->chain_head = NULL;
port->chain_tail = NULL;
switch (port->rmnet_mode) {
case RMNET_EPMODE_VND:
rmnet_map_ingress_handler(skb, port);
break;
case RMNET_EPMODE_BRIDGE:
rmnet_bridge_handler(skb, port->bridge_ep);
break;
}
done:
return RX_HANDLER_CONSUMED;
}
static rx_handler_result_t rmnet_rx_priv_handler(struct sk_buff **pskb)
{
struct sk_buff *skb = *pskb;
struct rmnet_nss_cb *nss_cb;
if (!skb)
return RX_HANDLER_CONSUMED;
if (nss_debug) printk("%s skb=%p, len=%d, protocol=%x, hdr_len=%d\n", __func__, skb, skb->len, skb->protocol, skb->hdr_len);
if (skb->pkt_type == PACKET_LOOPBACK)
return RX_HANDLER_PASS;
/* Check this so that we dont loop around netif_receive_skb */
if (skb->cb[0] == 1) {
skb->cb[0] = 0;
skb->dev->stats.rx_packets++;
return RX_HANDLER_PASS;
}
while (skb) {
struct sk_buff *skb_frag = skb_shinfo(skb)->frag_list;
skb_shinfo(skb)->frag_list = NULL;
nss_cb = rcu_dereference(rmnet_nss_callbacks);
if (nss_cb)
nss_cb->nss_tx(skb);
skb = skb_frag;
}
return RX_HANDLER_CONSUMED;
}
/* Modifies packet as per logical endpoint configuration and egress data format
* for egress device configured in logical endpoint. Packet is then transmitted
* on the egress device.
*/
static void rmnet_egress_handler(struct sk_buff *skb)
{
struct net_device *orig_dev;
struct rmnet_port *port;
struct rmnet_priv *priv;
u8 mux_id;
int err;
u32 skb_len;
skb_orphan(skb);
orig_dev = skb->dev;
priv = netdev_priv(orig_dev);
skb->dev = priv->real_dev;
mux_id = priv->mux_id;
port = rmnet_get_port(skb->dev);
if (!port)
goto drop;
skb_len = skb->len;
err = rmnet_map_egress_handler(skb, port, mux_id, orig_dev);
if (err == -ENOMEM)
goto drop;
else if (err == -EINPROGRESS) {
rmnet_vnd_tx_fixup(orig_dev, skb_len);
return;
}
rmnet_vnd_tx_fixup(orig_dev, skb_len);
dev_queue_xmit(skb);
return;
drop:
this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
kfree_skb(skb);
}

View File

@@ -0,0 +1,32 @@
/* Copyright (c) 2013, 2016-2017, 2019
* The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* RMNET Data ingress/egress handler
*
*/
#ifndef _RMNET_HANDLERS_H_
#define _RMNET_HANDLERS_H_
#include "rmnet_config.h"
enum rmnet_packet_context {
RMNET_NET_RX_CTX,
RMNET_WQ_CTX,
};
static void rmnet_egress_handler(struct sk_buff *skb);
static void rmnet_deliver_skb(struct sk_buff *skb, struct rmnet_port *port);
static void rmnet_set_skb_proto(struct sk_buff *skb);
static rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb);
static rx_handler_result_t rmnet_rx_priv_handler(struct sk_buff **pskb);
#endif /* _RMNET_HANDLERS_H_ */

View File

@@ -0,0 +1,272 @@
/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _RMNET_MAP_H_
#define _RMNET_MAP_H_
#include <linux/skbuff.h>
#include "rmnet_config.h"
struct rmnet_map_control_command {
u8 command_name;
u8 cmd_type:2;
u8 reserved:6;
u16 reserved2;
u32 transaction_id;
union {
struct {
u16 ip_family:2;
u16 reserved:14;
__be16 flow_control_seq_num;
__be32 qos_id;
} flow_control;
u8 data[0];
};
} __aligned(1);
enum rmnet_map_commands {
RMNET_MAP_COMMAND_NONE,
RMNET_MAP_COMMAND_FLOW_DISABLE,
RMNET_MAP_COMMAND_FLOW_ENABLE,
RMNET_MAP_COMMAND_FLOW_START = 7,
RMNET_MAP_COMMAND_FLOW_END = 8,
/* These should always be the last 2 elements */
RMNET_MAP_COMMAND_UNKNOWN,
RMNET_MAP_COMMAND_ENUM_LENGTH
};
enum rmnet_map_v5_header_type {
RMNET_MAP_HEADER_TYPE_UNKNOWN,
RMNET_MAP_HEADER_TYPE_COALESCING = 0x1,
RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD = 0x2,
RMNET_MAP_HEADER_TYPE_ENUM_LENGTH
};
enum rmnet_map_v5_close_type {
RMNET_MAP_COAL_CLOSE_NON_COAL,
RMNET_MAP_COAL_CLOSE_IP_MISS,
RMNET_MAP_COAL_CLOSE_TRANS_MISS,
RMNET_MAP_COAL_CLOSE_HW,
RMNET_MAP_COAL_CLOSE_COAL,
};
enum rmnet_map_v5_close_value {
RMNET_MAP_COAL_CLOSE_HW_NL,
RMNET_MAP_COAL_CLOSE_HW_PKT,
RMNET_MAP_COAL_CLOSE_HW_BYTE,
RMNET_MAP_COAL_CLOSE_HW_TIME,
RMNET_MAP_COAL_CLOSE_HW_EVICT,
};
/* Main QMAP header */
struct rmnet_map_header {
u8 pad_len:6;
u8 next_hdr:1;
u8 cd_bit:1;
u8 mux_id;
__be16 pkt_len;
} __aligned(1);
/* QMAP v5 headers */
struct rmnet_map_v5_csum_header {
u8 next_hdr:1;
u8 header_type:7;
u8 hw_reserved:7;
u8 csum_valid_required:1;
__be16 reserved;
} __aligned(1);
struct rmnet_map_v5_nl_pair {
__be16 pkt_len;
u8 csum_error_bitmap;
u8 num_packets;
} __aligned(1);
/* NLO: Number-length object */
#define RMNET_MAP_V5_MAX_NLOS (6)
#define RMNET_MAP_V5_MAX_PACKETS (48)
struct rmnet_map_v5_coal_header {
u8 next_hdr:1;
u8 header_type:7;
u8 reserved1:4;
u8 num_nlos:3;
u8 csum_valid:1;
u8 close_type:4;
u8 close_value:4;
u8 reserved2:4;
u8 virtual_channel_id:4;
struct rmnet_map_v5_nl_pair nl_pairs[RMNET_MAP_V5_MAX_NLOS];
} __aligned(1);
/* QMAP v4 headers */
struct rmnet_map_dl_csum_trailer {
u8 reserved1;
u8 valid:1;
u8 reserved2:7;
u16 csum_start_offset;
u16 csum_length;
__be16 csum_value;
} __aligned(1);
struct rmnet_map_ul_csum_header {
__be16 csum_start_offset;
u16 csum_insert_offset:14;
u16 udp_ind:1;
u16 csum_enabled:1;
} __aligned(1);
struct rmnet_map_control_command_header {
u8 command_name;
u8 cmd_type:2;
u8 reserved:5;
u8 e:1;
u16 source_id:15;
u16 ext:1;
u32 transaction_id;
} __aligned(1);
struct rmnet_map_flow_info_le {
__be32 mux_id;
__be32 flow_id;
__be32 bytes;
__be32 pkts;
} __aligned(1);
struct rmnet_map_flow_info_be {
u32 mux_id;
u32 flow_id;
u32 bytes;
u32 pkts;
} __aligned(1);
struct rmnet_map_dl_ind_hdr {
union {
struct {
u32 seq;
u32 bytes;
u32 pkts;
u32 flows;
struct rmnet_map_flow_info_le flow[0];
} le __aligned(1);
struct {
__be32 seq;
__be32 bytes;
__be32 pkts;
__be32 flows;
struct rmnet_map_flow_info_be flow[0];
} be __aligned(1);
} __aligned(1);
} __aligned(1);
struct rmnet_map_dl_ind_trl {
union {
__be32 seq_be;
u32 seq_le;
} __aligned(1);
} __aligned(1);
struct rmnet_map_dl_ind {
u8 priority;
union {
void (*dl_hdr_handler)(struct rmnet_map_dl_ind_hdr *);
void (*dl_hdr_handler_v2)(struct rmnet_map_dl_ind_hdr *,
struct
rmnet_map_control_command_header *);
} __aligned(1);
union {
void (*dl_trl_handler)(struct rmnet_map_dl_ind_trl *);
void (*dl_trl_handler_v2)(struct rmnet_map_dl_ind_trl *,
struct
rmnet_map_control_command_header *);
} __aligned(1);
struct list_head list;
};
#define RMNET_MAP_GET_MUX_ID(Y) (((struct rmnet_map_header *) \
(Y)->data)->mux_id)
#define RMNET_MAP_GET_CD_BIT(Y) (((struct rmnet_map_header *) \
(Y)->data)->cd_bit)
#define RMNET_MAP_GET_PAD(Y) (((struct rmnet_map_header *) \
(Y)->data)->pad_len)
#define RMNET_MAP_GET_CMD_START(Y) ((struct rmnet_map_control_command *) \
((Y)->data + \
sizeof(struct rmnet_map_header)))
#define RMNET_MAP_GET_LENGTH(Y) (ntohs(((struct rmnet_map_header *) \
(Y)->data)->pkt_len))
#define RMNET_MAP_DEAGGR_SPACING 64
#define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2)
#define RMNET_MAP_DESC_HEADROOM 128
#define RMNET_MAP_COMMAND_REQUEST 0
#define RMNET_MAP_COMMAND_ACK 1
#define RMNET_MAP_COMMAND_UNSUPPORTED 2
#define RMNET_MAP_COMMAND_INVALID 3
#define RMNET_MAP_NO_PAD_BYTES 0
#define RMNET_MAP_ADD_PAD_BYTES 1
static inline unsigned char *rmnet_map_data_ptr(struct sk_buff *skb)
{
/* Nonlinear packets we receive are entirely within frag 0 */
if (skb_is_nonlinear(skb) && skb->len == skb->data_len)
return skb_frag_address(skb_shinfo(skb)->frags);
return skb->data;
}
static inline struct rmnet_map_control_command *
rmnet_map_get_cmd_start(struct sk_buff *skb)
{
unsigned char *data = rmnet_map_data_ptr(skb);
data += sizeof(struct rmnet_map_header);
return (struct rmnet_map_control_command *)data;
}
static inline u8 rmnet_map_get_next_hdr_type(struct sk_buff *skb)
{
unsigned char *data = rmnet_map_data_ptr(skb);
data += sizeof(struct rmnet_map_header);
return ((struct rmnet_map_v5_coal_header *)data)->header_type;
}
static inline bool rmnet_map_get_csum_valid(struct sk_buff *skb)
{
unsigned char *data = rmnet_map_data_ptr(skb);
data += sizeof(struct rmnet_map_header);
return ((struct rmnet_map_v5_csum_header *)data)->csum_valid_required;
}
static struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
struct rmnet_port *port);
static struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
int hdrlen, int pad,
struct rmnet_port *port);
static void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port);
static void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
struct net_device *orig_dev,
int csum_type);
static int rmnet_map_process_next_hdr_packet(struct sk_buff *skb,
struct sk_buff_head *list,
u16 len);
static int rmnet_map_tx_agg_skip(struct sk_buff *skb, int offset);
static void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port);
static void rmnet_map_tx_aggregate_init(struct rmnet_port *port);
static void rmnet_map_tx_aggregate_exit(struct rmnet_port *port);
static void rmnet_map_cmd_init(struct rmnet_port *port);
static void rmnet_map_cmd_exit(struct rmnet_port *port);
#endif /* _RMNET_MAP_H_ */

View File

@@ -0,0 +1,143 @@
/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/netdevice.h>
#include "rmnet_config.h"
#include "rmnet_map.h"
#include "rmnet_private.h"
#include "rmnet_vnd.h"
#define RMNET_DL_IND_HDR_SIZE (sizeof(struct rmnet_map_dl_ind_hdr) + \
sizeof(struct rmnet_map_header) + \
sizeof(struct rmnet_map_control_command_header))
#define RMNET_MAP_CMD_SIZE (sizeof(struct rmnet_map_header) + \
sizeof(struct rmnet_map_control_command_header))
#define RMNET_DL_IND_TRL_SIZE (sizeof(struct rmnet_map_dl_ind_trl) + \
sizeof(struct rmnet_map_header) + \
sizeof(struct rmnet_map_control_command_header))
static u8 rmnet_map_do_flow_control(struct sk_buff *skb,
struct rmnet_port *port,
int enable)
{
struct rmnet_map_header *qmap;
struct rmnet_map_control_command *cmd;
struct rmnet_endpoint *ep;
struct net_device *vnd;
u16 ip_family;
u16 fc_seq;
u32 qos_id;
u8 mux_id;
int r;
qmap = (struct rmnet_map_header *)rmnet_map_data_ptr(skb);
mux_id = qmap->mux_id;
cmd = rmnet_map_get_cmd_start(skb);
if (mux_id >= RMNET_MAX_LOGICAL_EP) {
kfree_skb(skb);
return RX_HANDLER_CONSUMED;
}
ep = rmnet_get_endpoint(port, mux_id);
if (!ep) {
kfree_skb(skb);
return RX_HANDLER_CONSUMED;
}
vnd = ep->egress_dev;
ip_family = cmd->flow_control.ip_family;
fc_seq = ntohs(cmd->flow_control.flow_control_seq_num);
qos_id = ntohl(cmd->flow_control.qos_id);
/* Ignore the ip family and pass the sequence number for both v4 and v6
* sequence. User space does not support creating dedicated flows for
* the 2 protocols
*/
r = rmnet_vnd_do_flow_control(vnd, enable);
if (r) {
kfree_skb(skb);
return RMNET_MAP_COMMAND_UNSUPPORTED;
} else {
return RMNET_MAP_COMMAND_ACK;
}
}
static void rmnet_map_send_ack(struct sk_buff *skb,
unsigned char type,
struct rmnet_port *port)
{
struct rmnet_map_control_command *cmd;
struct net_device *dev = skb->dev;
if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
pskb_trim(skb,
skb->len - sizeof(struct rmnet_map_dl_csum_trailer));
skb->protocol = htons(ETH_P_MAP);
cmd = rmnet_map_get_cmd_start(skb);
cmd->cmd_type = type & 0x03;
netif_tx_lock(dev);
dev->netdev_ops->ndo_start_xmit(skb, dev);
netif_tx_unlock(dev);
}
/* Process MAP command frame and send N/ACK message as appropriate. Message cmd
* name is decoded here and appropriate handler is called.
*/
static void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port)
{
struct rmnet_map_control_command *cmd;
unsigned char command_name;
unsigned char rc = 0;
cmd = rmnet_map_get_cmd_start(skb);
command_name = cmd->command_name;
switch (command_name) {
case RMNET_MAP_COMMAND_FLOW_ENABLE:
rc = rmnet_map_do_flow_control(skb, port, 1);
break;
case RMNET_MAP_COMMAND_FLOW_DISABLE:
rc = rmnet_map_do_flow_control(skb, port, 0);
break;
default:
rc = RMNET_MAP_COMMAND_UNSUPPORTED;
kfree_skb(skb);
break;
}
if (rc == RMNET_MAP_COMMAND_ACK)
rmnet_map_send_ack(skb, rc, port);
}
static void rmnet_map_cmd_exit(struct rmnet_port *port)
{
struct rmnet_map_dl_ind *tmp, *idx;
list_for_each_entry_safe(tmp, idx, &port->dl_list, list)
list_del_rcu(&tmp->list);
}
static void rmnet_map_cmd_init(struct rmnet_port *port)
{
INIT_LIST_HEAD(&port->dl_list);
port->dl_marker_flush = -1;
}

View File

@@ -0,0 +1,682 @@
/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* RMNET Data MAP protocol
*
*/
#include <linux/netdevice.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <net/ip6_checksum.h>
#include "rmnet_config.h"
#include "rmnet_map.h"
#include "rmnet_private.h"
#include "rmnet_handlers.h"
#define RMNET_MAP_PKT_COPY_THRESHOLD 64
#define RMNET_MAP_DEAGGR_SPACING 64
#define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2)
struct rmnet_map_coal_metadata {
void *ip_header;
void *trans_header;
u16 ip_len;
u16 trans_len;
u16 data_offset;
u16 data_len;
u8 ip_proto;
u8 trans_proto;
u8 pkt_id;
u8 pkt_count;
};
static __sum16 *rmnet_map_get_csum_field(unsigned char protocol,
const void *txporthdr)
{
__sum16 *check = NULL;
switch (protocol) {
case IPPROTO_TCP:
check = &(((struct tcphdr *)txporthdr)->check);
break;
case IPPROTO_UDP:
check = &(((struct udphdr *)txporthdr)->check);
break;
default:
check = NULL;
break;
}
return check;
}
static void rmnet_map_complement_ipv4_txporthdr_csum_field(void *iphdr)
{
struct iphdr *ip4h = (struct iphdr *)iphdr;
void *txphdr;
u16 *csum;
txphdr = iphdr + ip4h->ihl * 4;
if (ip4h->protocol == IPPROTO_TCP || ip4h->protocol == IPPROTO_UDP) {
csum = (u16 *)rmnet_map_get_csum_field(ip4h->protocol, txphdr);
*csum = ~(*csum);
}
}
static void
rmnet_map_ipv4_ul_csum_header(void *iphdr,
struct rmnet_map_ul_csum_header *ul_header,
struct sk_buff *skb)
{
struct iphdr *ip4h = (struct iphdr *)iphdr;
__be16 *hdr = (__be16 *)ul_header, offset;
offset = htons((__force u16)(skb_transport_header(skb) -
(unsigned char *)iphdr));
ul_header->csum_start_offset = offset;
ul_header->csum_insert_offset = skb->csum_offset;
ul_header->csum_enabled = 1;
if (ip4h->protocol == IPPROTO_UDP)
ul_header->udp_ind = 1;
else
ul_header->udp_ind = 0;
/* Changing remaining fields to network order */
hdr++;
*hdr = htons((__force u16)*hdr);
skb->ip_summed = CHECKSUM_NONE;
rmnet_map_complement_ipv4_txporthdr_csum_field(iphdr);
}
#if IS_ENABLED(CONFIG_IPV6)
static void rmnet_map_complement_ipv6_txporthdr_csum_field(void *ip6hdr)
{
struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr;
void *txphdr;
u16 *csum;
txphdr = ip6hdr + sizeof(struct ipv6hdr);
if (ip6h->nexthdr == IPPROTO_TCP || ip6h->nexthdr == IPPROTO_UDP) {
csum = (u16 *)rmnet_map_get_csum_field(ip6h->nexthdr, txphdr);
*csum = ~(*csum);
}
}
static void
rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
struct rmnet_map_ul_csum_header *ul_header,
struct sk_buff *skb)
{
struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr;
__be16 *hdr = (__be16 *)ul_header, offset;
offset = htons((__force u16)(skb_transport_header(skb) -
(unsigned char *)ip6hdr));
ul_header->csum_start_offset = offset;
ul_header->csum_insert_offset = skb->csum_offset;
ul_header->csum_enabled = 1;
if (ip6h->nexthdr == IPPROTO_UDP)
ul_header->udp_ind = 1;
else
ul_header->udp_ind = 0;
/* Changing remaining fields to network order */
hdr++;
*hdr = htons((__force u16)*hdr);
skb->ip_summed = CHECKSUM_NONE;
rmnet_map_complement_ipv6_txporthdr_csum_field(ip6hdr);
}
#endif
/* Adds MAP header to front of skb->data
* Padding is calculated and set appropriately in MAP header. Mux ID is
* initialized to 0.
*/
static struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
int hdrlen, int pad,
struct rmnet_port *port)
{
struct rmnet_map_header *map_header;
u32 padding, map_datalen;
u8 *padbytes;
map_datalen = skb->len - hdrlen;
map_header = (struct rmnet_map_header *)
skb_push(skb, sizeof(struct rmnet_map_header));
memset(map_header, 0, sizeof(struct rmnet_map_header));
/* Set next_hdr bit for csum offload packets */
if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV5)
map_header->next_hdr = 1;
if (pad == RMNET_MAP_NO_PAD_BYTES) {
map_header->pkt_len = htons(map_datalen);
return map_header;
}
padding = ALIGN(map_datalen, 4) - map_datalen;
if (padding == 0)
goto done;
if (skb_tailroom(skb) < padding)
return NULL;
padbytes = (u8 *)skb_put(skb, padding);
memset(padbytes, 0, padding);
done:
map_header->pkt_len = htons(map_datalen + padding);
map_header->pad_len = padding & 0x3F;
return map_header;
}
/* Deaggregates a single packet
* A whole new buffer is allocated for each portion of an aggregated frame.
* Caller should keep calling deaggregate() on the source skb until 0 is
* returned, indicating that there are no more packets to deaggregate. Caller
* is responsible for freeing the original skb.
*/
static struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
struct rmnet_port *port)
{
struct rmnet_map_header *maph;
struct sk_buff *skbn;
unsigned char *data = rmnet_map_data_ptr(skb), *next_hdr = NULL;
u32 packet_len;
if (skb->len == 0)
return NULL;
maph = (struct rmnet_map_header *)data;
packet_len = ntohs(maph->pkt_len) + sizeof(struct rmnet_map_header);
if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
packet_len += sizeof(struct rmnet_map_dl_csum_trailer);
else if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV5) {
if (!maph->cd_bit) {
packet_len += sizeof(struct rmnet_map_v5_csum_header);
/* Coalescing headers require MAPv5 */
next_hdr = data + sizeof(*maph);
}
}
if (((int)skb->len - (int)packet_len) < 0)
return NULL;
/* Some hardware can send us empty frames. Catch them */
if (ntohs(maph->pkt_len) == 0)
return NULL;
if (next_hdr &&
((struct rmnet_map_v5_coal_header *)next_hdr)->header_type ==
RMNET_MAP_HEADER_TYPE_COALESCING)
return skb;
if (skb_is_nonlinear(skb)) {
skb_frag_t *frag0 = skb_shinfo(skb)->frags;
struct page *page = skb_frag_page(frag0);
skbn = alloc_skb(RMNET_MAP_DEAGGR_HEADROOM, GFP_ATOMIC);
if (!skbn)
return NULL;
skb_append_pagefrags(skbn, page, frag0->page_offset,
packet_len);
skbn->data_len += packet_len;
skbn->len += packet_len;
} else {
skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING,
GFP_ATOMIC);
if (!skbn)
return NULL;
skb_reserve(skbn, RMNET_MAP_DEAGGR_HEADROOM);
skb_put(skbn, packet_len);
memcpy(skbn->data, data, packet_len);
}
pskb_pull(skb, packet_len);
return skbn;
}
static void rmnet_map_v4_checksum_uplink_packet(struct sk_buff *skb,
struct net_device *orig_dev)
{
struct rmnet_priv *priv = netdev_priv(orig_dev);
struct rmnet_map_ul_csum_header *ul_header;
void *iphdr;
ul_header = (struct rmnet_map_ul_csum_header *)
skb_push(skb, sizeof(struct rmnet_map_ul_csum_header));
if (unlikely(!(orig_dev->features &
(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))))
goto sw_csum;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
iphdr = (char *)ul_header +
sizeof(struct rmnet_map_ul_csum_header);
if (skb->protocol == htons(ETH_P_IP)) {
rmnet_map_ipv4_ul_csum_header(iphdr, ul_header, skb);
priv->stats.csum_hw++;
return;
} else if (skb->protocol == htons(ETH_P_IPV6)) {
#if IS_ENABLED(CONFIG_IPV6)
rmnet_map_ipv6_ul_csum_header(iphdr, ul_header, skb);
priv->stats.csum_hw++;
return;
#else
priv->stats.csum_err_invalid_ip_version++;
goto sw_csum;
#endif
} else {
priv->stats.csum_err_invalid_ip_version++;
}
}
sw_csum:
ul_header->csum_start_offset = 0;
ul_header->csum_insert_offset = 0;
ul_header->csum_enabled = 0;
ul_header->udp_ind = 0;
priv->stats.csum_sw++;
}
static void rmnet_map_v5_checksum_uplink_packet(struct sk_buff *skb,
struct net_device *orig_dev)
{
struct rmnet_priv *priv = netdev_priv(orig_dev);
struct rmnet_map_v5_csum_header *ul_header;
ul_header = (struct rmnet_map_v5_csum_header *)
skb_push(skb, sizeof(*ul_header));
memset(ul_header, 0, sizeof(*ul_header));
ul_header->header_type = RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
void *iph = (char *)ul_header + sizeof(*ul_header);
void *trans;
__sum16 *check;
u8 proto;
if (skb->protocol == htons(ETH_P_IP)) {
u16 ip_len = ((struct iphdr *)iph)->ihl * 4;
proto = ((struct iphdr *)iph)->protocol;
trans = iph + ip_len;
} else if (skb->protocol == htons(ETH_P_IPV6)) {
u16 ip_len = sizeof(struct ipv6hdr);
proto = ((struct ipv6hdr *)iph)->nexthdr;
trans = iph + ip_len;
} else {
priv->stats.csum_err_invalid_ip_version++;
goto sw_csum;
}
check = rmnet_map_get_csum_field(proto, trans);
if (check) {
*check = 0;
skb->ip_summed = CHECKSUM_NONE;
/* Ask for checksum offloading */
ul_header->csum_valid_required = 1;
priv->stats.csum_hw++;
return;
}
}
sw_csum:
priv->stats.csum_sw++;
}
/* Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP
* packets that are supported for UL checksum offload.
*/
void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
struct net_device *orig_dev,
int csum_type)
{
switch (csum_type) {
case RMNET_FLAGS_EGRESS_MAP_CKSUMV4:
rmnet_map_v4_checksum_uplink_packet(skb, orig_dev);
break;
case RMNET_FLAGS_EGRESS_MAP_CKSUMV5:
rmnet_map_v5_checksum_uplink_packet(skb, orig_dev);
break;
default:
break;
}
}
static void rmnet_map_move_headers(struct sk_buff *skb)
{
struct iphdr *iph;
u16 ip_len;
u16 trans_len = 0;
u8 proto;
/* This only applies to non-linear SKBs */
if (!skb_is_nonlinear(skb))
return;
iph = (struct iphdr *)rmnet_map_data_ptr(skb);
if (iph->version == 4) {
ip_len = iph->ihl * 4;
proto = iph->protocol;
if (iph->frag_off & htons(IP_OFFSET))
/* No transport header information */
goto pull;
} else if (iph->version == 6) {
struct ipv6hdr *ip6h = (struct ipv6hdr *)iph;
__be16 frag_off;
u8 nexthdr = ip6h->nexthdr;
ip_len = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr,
&frag_off);
if (ip_len < 0)
return;
proto = nexthdr;
} else {
return;
}
if (proto == IPPROTO_TCP) {
struct tcphdr *tp = (struct tcphdr *)((u8 *)iph + ip_len);
trans_len = tp->doff * 4;
} else if (proto == IPPROTO_UDP) {
trans_len = sizeof(struct udphdr);
} else if (proto == NEXTHDR_FRAGMENT) {
/* Non-first fragments don't have the fragment length added by
* ipv6_skip_exthdr() and sho up as proto NEXTHDR_FRAGMENT, so
* we account for the length here.
*/
ip_len += sizeof(struct frag_hdr);
}
pull:
__pskb_pull_tail(skb, ip_len + trans_len);
skb_reset_network_header(skb);
if (trans_len)
skb_set_transport_header(skb, ip_len);
}
/* Process a QMAPv5 packet header */
static int rmnet_map_process_next_hdr_packet(struct sk_buff *skb,
struct sk_buff_head *list,
u16 len)
{
struct rmnet_priv *priv = netdev_priv(skb->dev);
int rc = 0;
switch (rmnet_map_get_next_hdr_type(skb)) {
case RMNET_MAP_HEADER_TYPE_COALESCING:
priv->stats.coal.coal_rx++;
break;
case RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD:
if (rmnet_map_get_csum_valid(skb)) {
priv->stats.csum_ok++;
skb->ip_summed = CHECKSUM_UNNECESSARY;
} else {
priv->stats.csum_valid_unset++;
}
/* Pull unnecessary headers and move the rest to the linear
* section of the skb.
*/
pskb_pull(skb,
(sizeof(struct rmnet_map_header) +
sizeof(struct rmnet_map_v5_csum_header)));
rmnet_map_move_headers(skb);
/* Remove padding only for csum offload packets.
* Coalesced packets should never have padding.
*/
pskb_trim(skb, len);
__skb_queue_tail(list, skb);
break;
default:
rc = -EINVAL;
break;
}
return rc;
}
long rmnet_agg_time_limit __read_mostly = 1000000L;
long rmnet_agg_bypass_time __read_mostly = 10000000L;
static int rmnet_map_tx_agg_skip(struct sk_buff *skb, int offset)
{
u8 *packet_start = skb->data + offset;
int is_icmp = 0;
if (skb->protocol == htons(ETH_P_IP)) {
struct iphdr *ip4h = (struct iphdr *)(packet_start);
if (ip4h->protocol == IPPROTO_ICMP)
is_icmp = 1;
} else if (skb->protocol == htons(ETH_P_IPV6)) {
struct ipv6hdr *ip6h = (struct ipv6hdr *)(packet_start);
if (ip6h->nexthdr == IPPROTO_ICMPV6) {
is_icmp = 1;
} else if (ip6h->nexthdr == NEXTHDR_FRAGMENT) {
struct frag_hdr *frag;
frag = (struct frag_hdr *)(packet_start
+ sizeof(struct ipv6hdr));
if (frag->nexthdr == IPPROTO_ICMPV6)
is_icmp = 1;
}
}
return is_icmp;
}
static void rmnet_map_flush_tx_packet_work(struct work_struct *work)
{
struct sk_buff *skb = NULL;
struct rmnet_port *port;
unsigned long flags;
port = container_of(work, struct rmnet_port, agg_wq);
spin_lock_irqsave(&port->agg_lock, flags);
if (likely(port->agg_state == -EINPROGRESS)) {
/* Buffer may have already been shipped out */
if (likely(port->agg_skb)) {
skb = port->agg_skb;
port->agg_skb = NULL;
port->agg_count = 0;
memset(&port->agg_time, 0, sizeof(struct timespec));
}
port->agg_state = 0;
}
spin_unlock_irqrestore(&port->agg_lock, flags);
if (skb)
dev_queue_xmit(skb);
}
static enum hrtimer_restart rmnet_map_flush_tx_packet_queue(struct hrtimer *t)
{
struct rmnet_port *port;
port = container_of(t, struct rmnet_port, hrtimer);
schedule_work(&port->agg_wq);
return HRTIMER_NORESTART;
}
static void rmnet_map_linearize_copy(struct sk_buff *dst, struct sk_buff *src)
{
unsigned int linear = src->len - src->data_len, target = src->len;
unsigned char *src_buf;
struct sk_buff *skb;
src_buf = src->data;
skb_put_data(dst, src_buf, linear);
target -= linear;
skb = src;
while (target) {
unsigned int i = 0, non_linear = 0;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
non_linear = skb_frag_size(&skb_shinfo(skb)->frags[i]);
src_buf = skb_frag_address(&skb_shinfo(skb)->frags[i]);
skb_put_data(dst, src_buf, non_linear);
target -= non_linear;
}
if (skb_shinfo(skb)->frag_list) {
skb = skb_shinfo(skb)->frag_list;
continue;
}
if (skb->next)
skb = skb->next;
}
}
static void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port)
{
struct timespec diff, last;
int size, agg_count = 0;
struct sk_buff *agg_skb;
unsigned long flags;
new_packet:
spin_lock_irqsave(&port->agg_lock, flags);
memcpy(&last, &port->agg_last, sizeof(struct timespec));
getnstimeofday(&port->agg_last);
if (!port->agg_skb) {
/* Check to see if we should agg first. If the traffic is very
* sparse, don't aggregate. We will need to tune this later
*/
diff = timespec_sub(port->agg_last, last);
size = port->egress_agg_params.agg_size - skb->len;
if (diff.tv_sec > 0 || diff.tv_nsec > rmnet_agg_bypass_time ||
size <= 0) {
spin_unlock_irqrestore(&port->agg_lock, flags);
skb->protocol = htons(ETH_P_MAP);
dev_queue_xmit(skb);
return;
}
port->agg_skb = alloc_skb(port->egress_agg_params.agg_size,
GFP_ATOMIC);
if (!port->agg_skb) {
port->agg_skb = 0;
port->agg_count = 0;
memset(&port->agg_time, 0, sizeof(struct timespec));
spin_unlock_irqrestore(&port->agg_lock, flags);
skb->protocol = htons(ETH_P_MAP);
dev_queue_xmit(skb);
return;
}
rmnet_map_linearize_copy(port->agg_skb, skb);
port->agg_skb->dev = skb->dev;
port->agg_skb->protocol = htons(ETH_P_MAP);
port->agg_count = 1;
getnstimeofday(&port->agg_time);
dev_kfree_skb_any(skb);
goto schedule;
}
diff = timespec_sub(port->agg_last, port->agg_time);
size = port->egress_agg_params.agg_size - port->agg_skb->len;
if (skb->len > size ||
port->agg_count >= port->egress_agg_params.agg_count ||
diff.tv_sec > 0 || diff.tv_nsec > rmnet_agg_time_limit) {
agg_skb = port->agg_skb;
agg_count = port->agg_count;
port->agg_skb = 0;
port->agg_count = 0;
memset(&port->agg_time, 0, sizeof(struct timespec));
port->agg_state = 0;
spin_unlock_irqrestore(&port->agg_lock, flags);
hrtimer_cancel(&port->hrtimer);
dev_queue_xmit(agg_skb);
goto new_packet;
}
rmnet_map_linearize_copy(port->agg_skb, skb);
port->agg_count++;
dev_kfree_skb_any(skb);
schedule:
if (port->agg_state != -EINPROGRESS) {
port->agg_state = -EINPROGRESS;
hrtimer_start(&port->hrtimer,
ns_to_ktime(port->egress_agg_params.agg_time),
HRTIMER_MODE_REL);
}
spin_unlock_irqrestore(&port->agg_lock, flags);
}
static void rmnet_map_tx_aggregate_init(struct rmnet_port *port)
{
hrtimer_init(&port->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
port->hrtimer.function = rmnet_map_flush_tx_packet_queue;
port->egress_agg_params.agg_size = 8192;
port->egress_agg_params.agg_count = 20;
port->egress_agg_params.agg_time = 3000000;
spin_lock_init(&port->agg_lock);
INIT_WORK(&port->agg_wq, rmnet_map_flush_tx_packet_work);
}
static void rmnet_map_tx_aggregate_exit(struct rmnet_port *port)
{
unsigned long flags;
hrtimer_cancel(&port->hrtimer);
cancel_work_sync(&port->agg_wq);
spin_lock_irqsave(&port->agg_lock, flags);
if (port->agg_state == -EINPROGRESS) {
if (port->agg_skb) {
kfree_skb(port->agg_skb);
port->agg_skb = NULL;
port->agg_count = 0;
memset(&port->agg_time, 0, sizeof(struct timespec));
}
port->agg_state = 0;
}
spin_unlock_irqrestore(&port->agg_lock, flags);
}

View File

@@ -0,0 +1,34 @@
/* Copyright (c) 2013-2014, 2016-2019 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _RMNET_PRIVATE_H_
#define _RMNET_PRIVATE_H_
#define RMNET_MAX_PACKET_SIZE 16384
#define RMNET_DFLT_PACKET_SIZE 1500
#define RMNET_NEEDED_HEADROOM 16
#define RMNET_TX_QUEUE_LEN 1000
/* Constants */
#define RMNET_EGRESS_FORMAT_AGGREGATION BIT(31)
#define RMNET_INGRESS_FORMAT_DL_MARKER_V1 BIT(30)
#define RMNET_INGRESS_FORMAT_DL_MARKER_V2 BIT(29)
#define RMNET_INGRESS_FORMAT_DL_MARKER (RMNET_INGRESS_FORMAT_DL_MARKER_V1 |\
RMNET_INGRESS_FORMAT_DL_MARKER_V2)
/* Replace skb->dev to a virtual rmnet device and pass up the stack */
#define RMNET_EPMODE_VND (1)
/* Pass the frame directly to another device with dev_queue_xmit() */
#define RMNET_EPMODE_BRIDGE (2)
#endif /* _RMNET_PRIVATE_H_ */

View File

@@ -0,0 +1,257 @@
/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM rmnet
#define TRACE_INCLUDE_FILE rmnet_trace
#if !defined(_RMNET_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
#define _RMNET_TRACE_H_
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/tracepoint.h>
/*****************************************************************************/
/* Trace events for rmnet module */
/*****************************************************************************/
DECLARE_EVENT_CLASS
(rmnet_mod_template,
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2),
TP_STRUCT__entry(
__field(u8, func)
__field(u8, evt)
__field(u32, uint1)
__field(u32, uint2)
__field(u64, ulong1)
__field(u64, ulong2)
__field(void *, ptr1)
__field(void *, ptr2)
),
TP_fast_assign(
__entry->func = func;
__entry->evt = evt;
__entry->uint1 = uint1;
__entry->uint2 = uint2;
__entry->ulong1 = ulong1;
__entry->ulong2 = ulong2;
__entry->ptr1 = ptr1;
__entry->ptr2 = ptr2;
),
TP_printk("fun:%u ev:%u u1:%u u2:%u ul1:%llu ul2:%llu p1:0x%pK p2:0x%pK",
__entry->func, __entry->evt,
__entry->uint1, __entry->uint2,
__entry->ulong1, __entry->ulong2,
__entry->ptr1, __entry->ptr2)
)
DEFINE_EVENT
(rmnet_mod_template, rmnet_low,
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2)
);
DEFINE_EVENT
(rmnet_mod_template, rmnet_high,
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2)
);
DEFINE_EVENT
(rmnet_mod_template, rmnet_err,
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2)
);
/*****************************************************************************/
/* Trace events for rmnet_perf module */
/*****************************************************************************/
DEFINE_EVENT
(rmnet_mod_template, rmnet_perf_low,
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2)
);
DEFINE_EVENT
(rmnet_mod_template, rmnet_perf_high,
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2)
);
DEFINE_EVENT
(rmnet_mod_template, rmnet_perf_err,
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2)
);
/*****************************************************************************/
/* Trace events for rmnet_shs module */
/*****************************************************************************/
DEFINE_EVENT
(rmnet_mod_template, rmnet_shs_low,
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2)
);
DEFINE_EVENT
(rmnet_mod_template, rmnet_shs_high,
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2)
);
DEFINE_EVENT
(rmnet_mod_template, rmnet_shs_err,
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2)
);
DEFINE_EVENT
(rmnet_mod_template, rmnet_shs_wq_low,
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2)
);
DEFINE_EVENT
(rmnet_mod_template, rmnet_shs_wq_high,
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2)
);
DEFINE_EVENT
(rmnet_mod_template, rmnet_shs_wq_err,
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2)
);
DECLARE_EVENT_CLASS
(rmnet_freq_template,
TP_PROTO(u8 core, u32 newfreq),
TP_ARGS(core, newfreq),
TP_STRUCT__entry(
__field(u8, core)
__field(u32, newfreq)
),
TP_fast_assign(
__entry->core = core;
__entry->newfreq = newfreq;
),
TP_printk("freq policy core:%u freq floor :%u",
__entry->core, __entry->newfreq)
);
DEFINE_EVENT
(rmnet_freq_template, rmnet_freq_boost,
TP_PROTO(u8 core, u32 newfreq),
TP_ARGS(core, newfreq)
);
DEFINE_EVENT
(rmnet_freq_template, rmnet_freq_reset,
TP_PROTO(u8 core, u32 newfreq),
TP_ARGS(core, newfreq)
);
TRACE_EVENT
(rmnet_freq_update,
TP_PROTO(u8 core, u32 lowfreq, u32 highfreq),
TP_ARGS(core, lowfreq, highfreq),
TP_STRUCT__entry(
__field(u8, core)
__field(u32, lowfreq)
__field(u32, highfreq)
),
TP_fast_assign(
__entry->core = core;
__entry->lowfreq = lowfreq;
__entry->highfreq = highfreq;
),
TP_printk("freq policy update core:%u policy freq floor :%u freq ceil :%u",
__entry->core, __entry->lowfreq, __entry->highfreq)
);
#endif /* _RMNET_TRACE_H_ */
/* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH ../drivers/net/ethernet/qualcomm/rmnet
#include <trace/define_trace.h>

View File

@@ -0,0 +1,382 @@
/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* RMNET Data virtual network driver
*
*/
#include <linux/etherdevice.h>
#include <linux/if_arp.h>
#include <linux/ip.h>
#include <net/pkt_sched.h>
#include "rmnet_config.h"
#include "rmnet_handlers.h"
#include "rmnet_private.h"
#include "rmnet_map.h"
#include "rmnet_vnd.h"
/* RX/TX Fixup */
static void rmnet_vnd_rx_fixup(struct net_device *dev, u32 skb_len)
{
struct rmnet_priv *priv = netdev_priv(dev);
struct rmnet_pcpu_stats *pcpu_ptr;
pcpu_ptr = this_cpu_ptr(priv->pcpu_stats);
u64_stats_update_begin(&pcpu_ptr->syncp);
pcpu_ptr->stats.rx_pkts++;
pcpu_ptr->stats.rx_bytes += skb_len;
u64_stats_update_end(&pcpu_ptr->syncp);
}
static void rmnet_vnd_tx_fixup(struct net_device *dev, u32 skb_len)
{
struct rmnet_priv *priv = netdev_priv(dev);
struct rmnet_pcpu_stats *pcpu_ptr;
pcpu_ptr = this_cpu_ptr(priv->pcpu_stats);
u64_stats_update_begin(&pcpu_ptr->syncp);
pcpu_ptr->stats.tx_pkts++;
pcpu_ptr->stats.tx_bytes += skb_len;
u64_stats_update_end(&pcpu_ptr->syncp);
}
/* Network Device Operations */
static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct rmnet_priv *priv;
priv = netdev_priv(dev);
if (priv->real_dev) {
rmnet_egress_handler(skb);
} else {
this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
kfree_skb(skb);
}
return NETDEV_TX_OK;
}
static int rmnet_vnd_change_mtu(struct net_device *rmnet_dev, int new_mtu)
{
if (new_mtu < 0 || new_mtu > RMNET_MAX_PACKET_SIZE)
return -EINVAL;
rmnet_dev->mtu = new_mtu;
return 0;
}
static int rmnet_vnd_get_iflink(const struct net_device *dev)
{
struct rmnet_priv *priv = netdev_priv(dev);
return priv->real_dev->ifindex;
}
static int rmnet_vnd_init(struct net_device *dev)
{
struct rmnet_priv *priv = netdev_priv(dev);
int err;
priv->pcpu_stats = alloc_percpu(struct rmnet_pcpu_stats);
if (!priv->pcpu_stats)
return -ENOMEM;
err = gro_cells_init(&priv->gro_cells, dev);
if (err) {
free_percpu(priv->pcpu_stats);
return err;
}
return 0;
}
static void rmnet_vnd_uninit(struct net_device *dev)
{
struct rmnet_priv *priv = netdev_priv(dev);
gro_cells_destroy(&priv->gro_cells);
free_percpu(priv->pcpu_stats);
}
static struct rtnl_link_stats64* rmnet_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *s)
{
struct rmnet_priv *priv = netdev_priv(dev);
struct rmnet_vnd_stats total_stats;
struct rmnet_pcpu_stats *pcpu_ptr;
unsigned int cpu, start;
memset(&total_stats, 0, sizeof(struct rmnet_vnd_stats));
for_each_possible_cpu(cpu) {
pcpu_ptr = per_cpu_ptr(priv->pcpu_stats, cpu);
do {
start = u64_stats_fetch_begin_irq(&pcpu_ptr->syncp);
total_stats.rx_pkts += pcpu_ptr->stats.rx_pkts;
total_stats.rx_bytes += pcpu_ptr->stats.rx_bytes;
total_stats.tx_pkts += pcpu_ptr->stats.tx_pkts;
total_stats.tx_bytes += pcpu_ptr->stats.tx_bytes;
} while (u64_stats_fetch_retry_irq(&pcpu_ptr->syncp, start));
total_stats.tx_drops += pcpu_ptr->stats.tx_drops;
}
s->rx_packets = total_stats.rx_pkts;
s->rx_bytes = total_stats.rx_bytes;
s->tx_packets = total_stats.tx_pkts;
s->tx_bytes = total_stats.tx_bytes;
s->tx_dropped = total_stats.tx_drops;
return s;
}
static const struct net_device_ops rmnet_vnd_ops = {
.ndo_start_xmit = rmnet_vnd_start_xmit,
.ndo_change_mtu = rmnet_vnd_change_mtu,
.ndo_get_iflink = rmnet_vnd_get_iflink,
//.ndo_add_slave = rmnet_add_bridge,
//.ndo_del_slave = rmnet_del_bridge,
.ndo_init = rmnet_vnd_init,
.ndo_uninit = rmnet_vnd_uninit,
.ndo_get_stats64 = rmnet_get_stats64,
};
static const char rmnet_gstrings_stats[][ETH_GSTRING_LEN] = {
"Checksum ok",
"Checksum valid bit not set",
"Checksum validation failed",
"Checksum error bad buffer",
"Checksum error bad ip version",
"Checksum error bad transport",
"Checksum skipped on ip fragment",
"Checksum skipped",
"Checksum computed in software",
"Checksum computed in hardware",
"Coalescing packets received",
"Coalesced packets",
"Coalescing header NLO errors",
"Coalescing header pcount errors",
"Coalescing checksum errors",
"Coalescing packet reconstructs",
"Coalescing IP version invalid",
"Coalescing L4 header invalid",
"Coalescing close Non-coalescable",
"Coalescing close L3 mismatch",
"Coalescing close L4 mismatch",
"Coalescing close HW NLO limit",
"Coalescing close HW packet limit",
"Coalescing close HW byte limit",
"Coalescing close HW time limit",
"Coalescing close HW eviction",
"Coalescing close Coalescable",
"Coalescing packets over VEID0",
"Coalescing packets over VEID1",
"Coalescing packets over VEID2",
"Coalescing packets over VEID3",
};
static const char rmnet_port_gstrings_stats[][ETH_GSTRING_LEN] = {
"MAP Cmd last version",
"MAP Cmd last ep id",
"MAP Cmd last transaction id",
"DL header last seen sequence",
"DL header last seen bytes",
"DL header last seen packets",
"DL header last seen flows",
"DL header pkts received",
"DL header total bytes received",
"DL header total pkts received",
"DL trailer last seen sequence",
"DL trailer pkts received",
};
static void rmnet_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
switch (stringset) {
case ETH_SS_STATS:
memcpy(buf, &rmnet_gstrings_stats,
sizeof(rmnet_gstrings_stats));
memcpy(buf + sizeof(rmnet_gstrings_stats),
&rmnet_port_gstrings_stats,
sizeof(rmnet_port_gstrings_stats));
break;
}
}
static int rmnet_get_sset_count(struct net_device *dev, int sset)
{
switch (sset) {
case ETH_SS_STATS:
return ARRAY_SIZE(rmnet_gstrings_stats) +
ARRAY_SIZE(rmnet_port_gstrings_stats);
default:
return -EOPNOTSUPP;
}
}
static void rmnet_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct rmnet_priv *priv = netdev_priv(dev);
struct rmnet_priv_stats *st = &priv->stats;
struct rmnet_port_priv_stats *stp;
struct rmnet_port *port;
port = rmnet_get_port(priv->real_dev);
if (!data || !port)
return;
stp = &port->stats;
memcpy(data, st, ARRAY_SIZE(rmnet_gstrings_stats) * sizeof(u64));
memcpy(data + ARRAY_SIZE(rmnet_gstrings_stats), stp,
ARRAY_SIZE(rmnet_port_gstrings_stats) * sizeof(u64));
}
static int rmnet_stats_reset(struct net_device *dev)
{
struct rmnet_priv *priv = netdev_priv(dev);
struct rmnet_port_priv_stats *stp;
struct rmnet_port *port;
port = rmnet_get_port(priv->real_dev);
if (!port)
return -EINVAL;
stp = &port->stats;
memset(stp, 0, sizeof(*stp));
return 0;
}
static const struct ethtool_ops rmnet_ethtool_ops = {
.get_ethtool_stats = rmnet_get_ethtool_stats,
.get_strings = rmnet_get_strings,
.get_sset_count = rmnet_get_sset_count,
.nway_reset = rmnet_stats_reset,
};
/* Called by kernel whenever a new rmnet<n> device is created. Sets MTU,
* flags, ARP type, needed headroom, etc...
*/
void rmnet_vnd_setup(struct net_device *rmnet_dev)
{
rmnet_dev->netdev_ops = &rmnet_vnd_ops;
rmnet_dev->mtu = RMNET_DFLT_PACKET_SIZE;
rmnet_dev->needed_headroom = RMNET_NEEDED_HEADROOM;
random_ether_addr(rmnet_dev->dev_addr);
rmnet_dev->tx_queue_len = RMNET_TX_QUEUE_LEN;
/* Raw IP mode */
rmnet_dev->header_ops = NULL; /* No header */
rmnet_dev->type = ARPHRD_RAWIP;
rmnet_dev->hard_header_len = 0;
rmnet_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
//rmnet_dev->needs_free_netdev = true;
rmnet_dev->hw_features = NETIF_F_RXCSUM;
rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
//rmnet_dev->hw_features |= NETIF_F_SG;
//rmnet_dev->hw_features |= NETIF_F_GRO_HW;
}
/* Exposed API */
static int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
struct rmnet_port *port,
struct net_device *real_dev,
struct rmnet_endpoint *ep)
{
struct rmnet_priv *priv = netdev_priv(rmnet_dev);
struct rmnet_nss_cb *nss_cb;
int rc;
if (ep->egress_dev)
return -EINVAL;
if (rmnet_get_endpoint(port, id))
return -EBUSY;
rmnet_dev->hw_features = NETIF_F_RXCSUM;
rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
rmnet_dev->hw_features |= NETIF_F_SG;
priv->real_dev = real_dev;
rc = register_netdevice(rmnet_dev);
if (!rc) {
ep->egress_dev = rmnet_dev;
ep->mux_id = id;
port->nr_rmnet_devs++;
//rmnet_dev->rtnl_link_ops = &rmnet_link_ops;
priv->mux_id = id;
netdev_dbg(rmnet_dev, "rmnet dev created\n");
}
nss_cb = rcu_dereference(rmnet_nss_callbacks);
if (nss_cb) {
rc = nss_cb->nss_create(rmnet_dev);
if (rc) {
/* Log, but don't fail the device creation */
netdev_err(rmnet_dev, "Device will not use NSS path: %d\n", rc);
rc = 0;
} else {
netdev_dbg(rmnet_dev, "NSS context created\n");
}
}
return rc;
}
static int rmnet_vnd_dellink(u8 id, struct rmnet_port *port,
struct rmnet_endpoint *ep)
{
struct rmnet_nss_cb *nss_cb;
if (id >= RMNET_MAX_LOGICAL_EP || !ep->egress_dev)
return -EINVAL;
if (ep->egress_dev) {
nss_cb = rcu_dereference(rmnet_nss_callbacks);
if (nss_cb)
nss_cb->nss_free(ep->egress_dev);
}
ep->egress_dev = NULL;
port->nr_rmnet_devs--;
return 0;
}
static int rmnet_vnd_do_flow_control(struct net_device *rmnet_dev, int enable)
{
netdev_dbg(rmnet_dev, "Setting VND TX queue state to %d\n", enable);
/* Although we expect similar number of enable/disable
* commands, optimize for the disable. That is more
* latency sensitive than enable
*/
if (unlikely(enable))
netif_wake_queue(rmnet_dev);
else
netif_stop_queue(rmnet_dev);
return 0;
}

View File

@@ -0,0 +1,29 @@
/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* RMNET Data Virtual Network Device APIs
*
*/
#ifndef _RMNET_VND_H_
#define _RMNET_VND_H_
static int rmnet_vnd_do_flow_control(struct net_device *dev, int enable);
static int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
struct rmnet_port *port,
struct net_device *real_dev,
struct rmnet_endpoint *ep);
static int rmnet_vnd_dellink(u8 id, struct rmnet_port *port,
struct rmnet_endpoint *ep);
static void rmnet_vnd_rx_fixup(struct net_device *dev, u32 skb_len);
static void rmnet_vnd_tx_fixup(struct net_device *dev, u32 skb_len);
static void rmnet_vnd_setup(struct net_device *dev);
#endif /* _RMNET_VND_H_ */

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,424 @@
/* Copyright (c) 2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/hashtable.h>
#include <linux/if_ether.h>
#include <linux/ip.h>
#include <qca-nss-drv/nss_api_if.h>
#include <linux/rmnet_nss.h>
#define RMNET_NSS_HASH_BITS 8
#define hash_add_ptr(table, node, key) \
hlist_add_head(node, &table[hash_ptr(key, HASH_BITS(table))])
static DEFINE_HASHTABLE(rmnet_nss_ctx_hashtable, RMNET_NSS_HASH_BITS);
struct rmnet_nss_ctx {
struct hlist_node hnode;
struct net_device *rmnet_dev;
struct nss_rmnet_rx_handle *nss_ctx;
};
enum __rmnet_nss_stat {
RMNET_NSS_RX_ETH,
RMNET_NSS_RX_FAIL,
RMNET_NSS_RX_NON_ETH,
RMNET_NSS_RX_BUSY,
RMNET_NSS_TX_NO_CTX,
RMNET_NSS_TX_SUCCESS,
RMNET_NSS_TX_FAIL,
RMNET_NSS_TX_NONLINEAR,
RMNET_NSS_TX_BAD_IP,
RMNET_NSS_EXCEPTIONS,
RMNET_NSS_EX_BAD_HDR,
RMNET_NSS_EX_BAD_IP,
RMNET_NSS_EX_SUCCESS,
RMNET_NSS_TX_BAD_FRAGS,
RMNET_NSS_TX_LINEARIZE_FAILS,
RMNET_NSS_TX_NON_ZERO_HEADLEN_FRAGS,
RMNET_NSS_TX_BUSY_LOOP,
RMNET_NSS_NUM_STATS,
};
static unsigned long rmnet_nss_stats[RMNET_NSS_NUM_STATS];
#define RMNET_NSS_STAT(name, counter, desc) \
module_param_named(name, rmnet_nss_stats[counter], ulong, 0444); \
MODULE_PARM_DESC(name, desc)
RMNET_NSS_STAT(rmnet_nss_rx_ethernet, RMNET_NSS_RX_ETH,
"Number of Ethernet headers successfully removed");
RMNET_NSS_STAT(rmnet_nss_rx_fail, RMNET_NSS_RX_FAIL,
"Number of Ethernet headers that could not be removed");
RMNET_NSS_STAT(rmnet_nss_rx_non_ethernet, RMNET_NSS_RX_NON_ETH,
"Number of non-Ethernet packets received");
RMNET_NSS_STAT(rmnet_nss_rx_busy, RMNET_NSS_RX_BUSY,
"Number of packets dropped decause rmnet_data device was busy");
RMNET_NSS_STAT(rmnet_nss_tx_slow, RMNET_NSS_TX_NO_CTX,
"Number of packets sent over non-NSS-accelerated rmnet device");
RMNET_NSS_STAT(rmnet_nss_tx_fast, RMNET_NSS_TX_SUCCESS,
"Number of packets sent over NSS-accelerated rmnet device");
RMNET_NSS_STAT(rmnet_nss_tx_fail, RMNET_NSS_TX_FAIL,
"Number of packets that NSS could not transmit");
RMNET_NSS_STAT(rmnet_nss_tx_nonlinear, RMNET_NSS_TX_NONLINEAR,
"Number of non linear sent over NSS-accelerated rmnet device");
RMNET_NSS_STAT(rmnet_nss_tx_invalid_ip, RMNET_NSS_TX_BAD_IP,
"Number of ingress packets with invalid IP headers");
RMNET_NSS_STAT(rmnet_nss_tx_invalid_frags, RMNET_NSS_TX_BAD_FRAGS,
"Number of ingress packets with invalid frag format");
RMNET_NSS_STAT(rmnet_nss_tx_linearize_fail, RMNET_NSS_TX_LINEARIZE_FAILS,
"Number of ingress packets where linearize in tx fails");
RMNET_NSS_STAT(rmnet_nss_tx_exceptions, RMNET_NSS_EXCEPTIONS,
"Number of times our DL exception handler was invoked");
RMNET_NSS_STAT(rmnet_nss_exception_non_ethernet, RMNET_NSS_EX_BAD_HDR,
"Number of non-Ethernet exception packets");
RMNET_NSS_STAT(rmnet_nss_exception_invalid_ip, RMNET_NSS_EX_BAD_IP,
"Number of exception packets with invalid IP headers");
RMNET_NSS_STAT(rmnet_nss_exception_success, RMNET_NSS_EX_SUCCESS,
"Number of exception packets handled successfully");
RMNET_NSS_STAT(rmnet_nss_tx_non_zero_headlen_frags, RMNET_NSS_TX_NON_ZERO_HEADLEN_FRAGS,
"Number of packets with non zero headlen");
RMNET_NSS_STAT(rmnet_nss_tx_busy_loop, RMNET_NSS_TX_BUSY_LOOP,
"Number of times tx packets busy looped");
static void rmnet_nss_inc_stat(enum __rmnet_nss_stat stat)
{
if (stat >= 0 && stat < RMNET_NSS_NUM_STATS)
rmnet_nss_stats[stat]++;
}
static struct rmnet_nss_ctx *rmnet_nss_find_ctx(struct net_device *dev)
{
struct rmnet_nss_ctx *ctx;
struct hlist_head *bucket;
u32 hash;
hash = hash_ptr(dev, HASH_BITS(rmnet_nss_ctx_hashtable));
bucket = &rmnet_nss_ctx_hashtable[hash];
hlist_for_each_entry(ctx, bucket, hnode) {
if (ctx->rmnet_dev == dev)
return ctx;
}
return NULL;
}
static void rmnet_nss_free_ctx(struct rmnet_nss_ctx *ctx)
{
if (ctx) {
hash_del(&ctx->hnode);
nss_rmnet_rx_xmit_callback_unregister(ctx->nss_ctx);
nss_rmnet_rx_destroy_sync(ctx->nss_ctx);
kfree(ctx);
}
}
/* Pull off an ethernet header, if possible */
static int rmnet_nss_ethhdr_pull(struct sk_buff *skb)
{
if (!skb->protocol || skb->protocol == htons(ETH_P_802_3)) {
void *ret = skb_pull(skb, sizeof(struct ethhdr));
rmnet_nss_inc_stat((ret) ? RMNET_NSS_RX_ETH :
RMNET_NSS_RX_FAIL);
return !ret;
}
rmnet_nss_inc_stat(RMNET_NSS_RX_NON_ETH);
return -1;
}
/* Copy headers to linear section for non linear packets */
static int rmnet_nss_adjust_header(struct sk_buff *skb)
{
struct iphdr *iph;
skb_frag_t *frag;
int bytes = 0;
u8 transport;
if (skb_shinfo(skb)->nr_frags != 1) {
rmnet_nss_inc_stat(RMNET_NSS_TX_BAD_FRAGS);
return -EINVAL;
}
if (skb_headlen(skb)) {
rmnet_nss_inc_stat(RMNET_NSS_TX_NON_ZERO_HEADLEN_FRAGS);
return 0;
}
frag = &skb_shinfo(skb)->frags[0];
iph = (struct iphdr *)(skb_frag_address(frag));
if (iph->version == 4) {
bytes = iph->ihl*4;
transport = iph->protocol;
} else if (iph->version == 6) {
struct ipv6hdr *ip6h = (struct ipv6hdr *)iph;
bytes = sizeof(struct ipv6hdr);
/* Dont have to account for extension headers yet */
transport = ip6h->nexthdr;
} else {
rmnet_nss_inc_stat(RMNET_NSS_TX_BAD_IP);
return -EINVAL;
}
if (transport == IPPROTO_TCP) {
struct tcphdr *th;
th = (struct tcphdr *)((u8 *)iph + bytes);
bytes += th->doff * 4;
} else if (transport == IPPROTO_UDP) {
bytes += sizeof(struct udphdr);
} else {
/* cant do anything else here unfortunately so linearize */
if (skb_linearize(skb)) {
rmnet_nss_inc_stat(RMNET_NSS_TX_LINEARIZE_FAILS);
return -EINVAL;
} else {
return 0;
}
}
if (bytes > skb_frag_size(frag)) {
rmnet_nss_inc_stat(RMNET_NSS_TX_BAD_FRAGS);
return -EINVAL;
}
skb_push(skb, bytes);
memcpy(skb->data, iph, bytes);
/* subtract to account for skb_push */
skb->len -= bytes;
frag->page_offset += bytes;
skb_frag_size_sub(frag, bytes);
/* subtract to account for skb_frag_size_sub */
skb->data_len -= bytes;
return 0;
}
/* Main downlink handler
* Looks up NSS contex associated with the device. If the context is found,
* we add a dummy ethernet header with the approriate protocol field set,
* the pass the packet off to NSS for hardware acceleration.
*/
int rmnet_nss_tx(struct sk_buff *skb)
{
struct ethhdr *eth;
struct rmnet_nss_ctx *ctx;
struct net_device *dev = skb->dev;
nss_tx_status_t rc;
unsigned int len;
u8 version;
if (skb_is_nonlinear(skb)) {
if (rmnet_nss_adjust_header(skb))
goto fail;
else
rmnet_nss_inc_stat(RMNET_NSS_TX_NONLINEAR);
}
version = ((struct iphdr *)skb->data)->version;
ctx = rmnet_nss_find_ctx(dev);
if (!ctx) {
rmnet_nss_inc_stat(RMNET_NSS_TX_NO_CTX);
return -EINVAL;
}
eth = (struct ethhdr *)skb_push(skb, sizeof(*eth));
memset(&eth->h_dest, 0, ETH_ALEN * 2);
if (version == 4) {
eth->h_proto = htons(ETH_P_IP);
} else if (version == 6) {
eth->h_proto = htons(ETH_P_IPV6);
} else {
rmnet_nss_inc_stat(RMNET_NSS_TX_BAD_IP);
goto fail;
}
skb->protocol = htons(ETH_P_802_3);
/* Get length including ethhdr */
len = skb->len;
transmit:
rc = nss_rmnet_rx_tx_buf(ctx->nss_ctx, skb);
if (rc == NSS_TX_SUCCESS) {
/* Increment rmnet_data device stats.
* Don't call rmnet_data_vnd_rx_fixup() to do this, as
* there's no guarantee the skb pointer is still valid.
*/
dev->stats.rx_packets++;
dev->stats.rx_bytes += len;
rmnet_nss_inc_stat(RMNET_NSS_TX_SUCCESS);
return 0;
} else if (rc == NSS_TX_FAILURE_QUEUE) {
rmnet_nss_inc_stat(RMNET_NSS_TX_BUSY_LOOP);
goto transmit;
}
fail:
rmnet_nss_inc_stat(RMNET_NSS_TX_FAIL);
kfree_skb(skb);
return 1;
}
/* Called by NSS in the DL exception case.
* Since the packet cannot be sent over the accelerated path, we need to
* handle it. Remove the ethernet header and pass it onward to the stack
* if possible.
*/
void rmnet_nss_receive(struct net_device *dev, struct sk_buff *skb,
struct napi_struct *napi)
{
rmnet_nss_inc_stat(RMNET_NSS_EXCEPTIONS);
if (!skb)
return;
if (rmnet_nss_ethhdr_pull(skb)) {
rmnet_nss_inc_stat(RMNET_NSS_EX_BAD_HDR);
goto drop;
}
/* reset header pointers */
skb_reset_transport_header(skb);
skb_reset_network_header(skb);
skb_reset_mac_header(skb);
/* reset packet type */
skb->pkt_type = PACKET_HOST;
skb->dev = dev;
/* reset protocol type */
switch (skb->data[0] & 0xF0) {
case 0x40:
skb->protocol = htons(ETH_P_IP);
break;
case 0x60:
skb->protocol = htons(ETH_P_IPV6);
break;
default:
rmnet_nss_inc_stat(RMNET_NSS_EX_BAD_IP);
goto drop;
}
rmnet_nss_inc_stat(RMNET_NSS_EX_SUCCESS);
/* Set this so that we dont loop around netif_receive_skb */
skb->cb[0] = 1;
netif_receive_skb(skb);
return;
drop:
kfree_skb(skb);
}
/* Called by NSS in the UL acceleration case.
* We are guaranteed to have an ethernet packet here from the NSS hardware,
* We need to pull the header off and invoke our ndo_start_xmit function
* to handle transmitting the packet to the network stack.
*/
void rmnet_nss_xmit(struct net_device *dev, struct sk_buff *skb)
{
netdev_tx_t ret;
skb_pull(skb, sizeof(struct ethhdr));
rmnet_nss_inc_stat(RMNET_NSS_RX_ETH);
/* NSS takes care of shaping, so bypassing Qdiscs like this is OK */
ret = dev->netdev_ops->ndo_start_xmit(skb, dev);
if (unlikely(ret == NETDEV_TX_BUSY)) {
dev_kfree_skb_any(skb);
rmnet_nss_inc_stat(RMNET_NSS_RX_BUSY);
}
}
/* Create and register an NSS context for an rmnet_data device */
int rmnet_nss_create_vnd(struct net_device *dev)
{
struct rmnet_nss_ctx *ctx;
ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
if (!ctx)
return -ENOMEM;
ctx->rmnet_dev = dev;
ctx->nss_ctx = nss_rmnet_rx_create_sync_nexthop(dev, NSS_N2H_INTERFACE,
NSS_C2C_TX_INTERFACE);
if (!ctx->nss_ctx) {
kfree(ctx);
return -1;
}
nss_rmnet_rx_register(ctx->nss_ctx, rmnet_nss_receive, dev);
nss_rmnet_rx_xmit_callback_register(ctx->nss_ctx, rmnet_nss_xmit);
hash_add_ptr(rmnet_nss_ctx_hashtable, &ctx->hnode, dev);
return 0;
}
/* Unregister and destroy the NSS context for an rmnet_data device */
int rmnet_nss_free_vnd(struct net_device *dev)
{
struct rmnet_nss_ctx *ctx;
ctx = rmnet_nss_find_ctx(dev);
rmnet_nss_free_ctx(ctx);
return 0;
}
static const struct rmnet_nss_cb rmnet_nss = {
.nss_create = rmnet_nss_create_vnd,
.nss_free = rmnet_nss_free_vnd,
.nss_tx = rmnet_nss_tx,
};
int __init rmnet_nss_init(void)
{
pr_err("%s(): initializing rmnet_nss\n", __func__);
RCU_INIT_POINTER(rmnet_nss_callbacks, &rmnet_nss);
return 0;
}
void __exit rmnet_nss_exit(void)
{
struct hlist_node *tmp;
struct rmnet_nss_ctx *ctx;
int bkt;
pr_err("%s(): exiting rmnet_nss\n", __func__);
RCU_INIT_POINTER(rmnet_nss_callbacks, NULL);
/* Tear down all NSS contexts */
hash_for_each_safe(rmnet_nss_ctx_hashtable, bkt, tmp, ctx, hnode)
rmnet_nss_free_ctx(ctx);
}
#if 0
MODULE_LICENSE("GPL v2");
module_init(rmnet_nss_init);
module_exit(rmnet_nss_exit);
#endif

View File

@@ -0,0 +1,31 @@
root@imx6qsabresd:~# busybox microcom /dev/mhi_DUN
[ 384.652992] [I][mhi_uci_open] Node open, ref counts 1
[ 384.658144] [I][mhi_uci_open] Starting channel
[ 384.662612] [I][__mhi_prepare_channel] Entered: preparing channel:32
[ 384.680397] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1
[ 384.685890] [I][__mhi_prepare_channel] Chan:32 successfully moved to start state
[ 384.693312] [I][__mhi_prepare_channel] Entered: preparing channel:33
[ 384.708692] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1
[ 384.714324] [I][__mhi_prepare_channel] Chan:33 successfully moved to start state
RDY
+CFUN: 1
+CPIN: READY
+QUSIM: 1
+QIND: SMS DONE
+QIND: PB DONE
ati
Quectel
EM20
Revision: EM20GR01A01M4G
OK
at+cpin?
+CPIN: READY
OK

View File

@@ -0,0 +1,145 @@
root@OpenWrt:~# insmod pcie_mhi.ko mhi_mbim_enabled=1
root@OpenWrt:~# dmesg | grep mhi
[ 65.587160] mhi_init Quectel_Linux_PCIE_MHI_Driver_V1.3.0.6
[ 65.597089] mhi_pci_probe pci_dev->name = 0000:01:00.0, domain=0, bus=1, slot=0, vendor=17CB, device=0306
[ 65.602250] mhi_q 0000:01:00.0: BAR 0: assigned [mem 0x20300000-0x20300fff 64bit]
[ 65.611690] mhi_q 0000:01:00.0: enabling device (0140 -> 0142)
[ 65.619307] [I][mhi_init_pci_dev] msi_required = 5, msi_allocated = 5, msi_irq = 63
[ 65.619327] [I][mhi_power_up] dev_state:RESET
[ 65.619331] [I][mhi_async_power_up] Requested to power on
[ 65.619449] [I][mhi_alloc_coherent] size = 114688, dma_handle = 6fca0000
[ 65.619462] [I][mhi_init_dev_ctxt] mhi_ctxt->ctrl_seg = c221e000
[ 65.619731] [I][mhi_async_power_up] dev_state:RESET ee:AMSS
[ 65.619747] [I][mhi_pm_st_worker] Transition to state:READY
[ 65.619760] [I][mhi_pm_st_worker] INVALID_EE -> AMSS
[ 65.619764] [I][mhi_ready_state_transition] Waiting to enter READY state
[ 65.619885] [I][mhi_async_power_up] Power on setup success
[ 65.619897] [I][mhi_pci_probe] Return successful
[ 65.665114] [I][mhi_ready_state_transition] Device in READY State
[ 65.665125] [I][mhi_intvec_threaded_handlr] device ee:AMSS dev_state:READY, pm_state:POR
[ 65.665131] [I][mhi_intvec_threaded_handlr] device ee:AMSS dev_state:READY, INVALID_EE
[ 65.665133] [I][mhi_tryset_pm_state] Transition to pm state from:POR to:POR
[ 65.665137] [I][mhi_init_mmio] Initializing MMIO
[ 65.665142] [I][mhi_init_mmio] CHDBOFF:0x300
[ 65.665151] [I][mhi_init_mmio] ERDBOFF:0x700
[ 65.665156] [I][mhi_init_mmio] Programming all MMIO values.
[ 65.786283] [I][mhi_dump_tre] carl_ev evt_state_change mhistate=2
[ 65.786289] [I][mhi_process_ctrl_ev_ring] MHI state change event to state:M0
[ 65.786295] [I][mhi_pm_m0_transition] Entered With State:READY PM_STATE:POR
[ 65.786300] [I][mhi_tryset_pm_state] Transition to pm state from:POR to:M0
[ 65.789734] [I][mhi_dump_tre] carl_ev evt_ee_state execenv=2
[ 65.789739] [I][mhi_process_ctrl_ev_ring] MHI EE received event:AMSS
[ 65.789756] [I][mhi_pm_st_worker] Transition to state:MISSION MODE
[ 65.789767] [I][mhi_pm_st_worker] INVALID_EE -> AMSS
[ 65.789771] [I][mhi_pm_mission_mode_transition] Processing Mission Mode Transition
[ 65.789787] [I][mhi_init_timesync] No timesync capability found
[ 65.789791] [I][mhi_pm_mission_mode_transition] Adding new devices
[ 65.790570] [I][mhi_dtr_probe] Enter for DTR control channel
[ 65.790577] [I][__mhi_prepare_channel] Entered: preparing channel:18
[ 65.797036] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1
[ 65.797051] [I][__mhi_prepare_channel] Chan:18 successfully moved to start state
[ 65.797055] [I][__mhi_prepare_channel] Entered: preparing channel:19
[ 65.802457] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1
[ 65.802469] [I][__mhi_prepare_channel] Chan:19 successfully moved to start state
[ 65.802485] [I][mhi_dtr_probe] Exit with ret:0
[ 65.802748] [I][mhi_netdev_enable_iface] Prepare the channels for transfer
[ 65.802772] [I][__mhi_prepare_channel] Entered: preparing channel:100
[ 65.825279] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1
[ 65.825293] [I][__mhi_prepare_channel] Chan:100 successfully moved to start state
[ 65.825297] [I][__mhi_prepare_channel] Entered: preparing channel:101
[ 65.835565] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1
[ 65.835578] [I][__mhi_prepare_channel] Chan:101 successfully moved to start state
[ 65.839141] [I][mhi_netdev_enable_iface] Exited.
[ 65.839875] rmnet_vnd_register_device(rmnet_mhi0.1)=0
[ 65.843278] net rmnet_mhi0 rmnet_mhi0.1: NSS context created
[ 65.861808] [I][mhi_pm_mission_mode_transition] Exit with ret:0
[ 68.625595] [I][__mhi_prepare_channel] Entered: preparing channel:12
[ 68.634610] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1
[ 68.634622] [I][__mhi_prepare_channel] Chan:12 successfully moved to start state
[ 68.634625] [I][__mhi_prepare_channel] Entered: preparing channel:13
[ 68.644978] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1
[ 68.644987] [I][__mhi_prepare_channel] Chan:13 successfully moved to start state
[ 69.170666] net rmnet_mhi0: link_state 0x0 -> 0x1
[ 69.177035] [I][mhi_netdev_open] Opened net dev interface
[ 71.655431] [I][mhi_netdev_open] Opened net dev interface
root@OpenWrt:~# ./quectel-CM &
[04-02_04:14:12:134] Quectel_QConnectManager_Linux_V1.6.0.5
[04-02_04:14:12:134] Find /sys/bus/usb/devices/4-1 idVendor=0x2c7c idProduct=0x800, bus=0x004, dev=0x002
[04-02_04:14:12:135] network interface '' or qmidev '' is not exist
[04-02_04:14:12:135] netcard driver = pcie_mhi, driver version = V1.3.0.6
[04-02_04:14:12:135] Modem works in MBIM mode
[04-02_04:14:12:135] apn (null), user (null), passwd (null), auth 0
[04-02_04:14:12:135] IP Proto MBIMContextIPTypeIPv4
[04-02_04:14:12:154] mbim_read_thread is created
sh: can't create /sys/class/net/rmnet_mhi0/mbim/link_state: nonexistent directory
[04-02_04:14:12:156] system(echo 0 > /sys/class/net/rmnet_mhi0/mbim/link_state)=256
[04-02_04:14:12:185] system(ip address flush dev rmnet_mhi0)=0
[04-02_04:14:12:187] system(ip link set dev rmnet_mhi0 down)=0
[04-02_04:14:12:188] mbim_open_device()
[04-02_04:14:12:605] mbim_device_caps_query()
[04-02_04:14:12:610] DeviceId: 869710030002905
[04-02_04:14:12:610] HardwareInfo: 0
[04-02_04:14:12:610] mbim_set_radio_state( 1 )
[04-02_04:14:12:613] HwRadioState: 1, SwRadioState: 1
[04-02_04:14:12:613] mbim_subscriber_status_query()
[04-02_04:14:12:620] SubscriberReadyState NotInitialized -> Initialized
[04-02_04:14:12:620] mbim_register_state_query()
[04-02_04:14:12:625] RegisterState Unknown -> Home
[04-02_04:14:12:625] mbim_packet_service_query()
[04-02_04:14:12:629] PacketServiceState Unknown -> Attached
[04-02_04:14:12:629] mbim_query_connect(sessionID=0)
[04-02_04:14:12:633] ActivationState Unknown -> Deactivated
[04-02_04:14:12:633] mbim_set_connect(onoff=1, sessionID=0)
[ 69.170666] net rmnet_mhi0: link_state 0x0 -> 0x1
[04-02_04:14:12:680] ActivationState Deactivated -> Activated
[ 69.177035] [I][mhi_netdev_open] Opened net dev interface
[04-02_04:14:12:680] mbim_ip_config(sessionID=0)
[04-02_04:14:12:683] < SessionId = 0
[04-02_04:14:12:683] < IPv4ConfigurationAvailable = 0xf
[04-02_04:14:12:683] < IPv6ConfigurationAvailable = 0x0
[04-02_04:14:12:683] < IPv4AddressCount = 0x1
[04-02_04:14:12:683] < IPv4AddressOffset = 0x3c
[04-02_04:14:12:683] < IPv6AddressCount = 0x0
[04-02_04:14:12:683] < IPv6AddressOffset = 0x0
[04-02_04:14:12:683] < IPv4 = 10.129.59.93/30
[04-02_04:14:12:683] < gw = 10.129.59.94
[04-02_04:14:12:683] < dns1 = 211.138.180.2
[04-02_04:14:12:683] < dns2 = 211.138.180.3
[04-02_04:14:12:683] < ipv4 mtu = 1500
sh: can't create /sys/class/net/rmnet_mhi0/mbim/link_state: nonexistent directory
[04-02_04:14:12:684] system(echo 1 > /sys/class/net/rmnet_mhi0/mbim/link_state)=256
[04-02_04:14:12:689] system(ip link set dev rmnet_mhi0 up)=0
[04-02_04:14:12:692] system(ip -4 address flush dev rmnet_mhi0)=0
[04-02_04:14:12:694] system(ip -4 address add 10.129.59.93/30 dev rmnet_mhi0)=0
[04-02_04:14:12:697] system(ip -4 route add default via 10.129.59.94 dev rmnet_mhi0)=0
[04-02_04:14:12:699] system(ip -4 link set dev rmnet_mhi0 mtu 1500)=0
root@OpenWrt:~# ifconfig rmnet_mhi0
rmnet_mhi0 Link encap:UNSPEC HWaddr 00-00-00-00-00-00-00-00-00-00-00-00-00-00-00-00
UP RUNNING NOARP MTU:1500 Metric:1
RX packets:99379 errors:0 dropped:0 overruns:0 frame:0
TX packets:176569 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:1528181052 (1.4 GiB) TX bytes:62467192 (59.5 MiB)
root@OpenWrt:~# ifconfig rmnet_mhi0.1
rmnet_mhi0.1 Link encap:UNSPEC HWaddr 02-50-F4-00-00-00-00-00-00-00-00-00-00-00-00-00
inet addr:10.129.59.93 Mask:255.255.255.252
inet6 addr: fe80::50:f4ff:fe00:0/64 Scope:Link
UP RUNNING NOARP MTU:1500 Metric:1
RX packets:1089360 errors:0 dropped:0 overruns:0 frame:0
TX packets:176581 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:1521449058 (1.4 GiB) TX bytes:57525792 (54.8 MiB)
# adjust CPU load balancing
root@OpenWrt:~# echo 2 > /sys/class/net/rmnet_mhi0/queues/rx-0/rps_cpus
root@OpenWrt:~# echo 4 > /sys/class/net/rmnet_mhi0.1/queues/rx-0/rps_cpus
root@OpenWrt:~# echo 2000 > /proc/sys/net/core/netdev_max_backlog
root@OpenWrt:~# cat /sys/class/net/rmnet_mhi0/queues/rx-0/rps_cpus
2
root@OpenWrt:~# cat /sys/class/net/rmnet_mhi0.1/queues/rx-0/rps_cpus
4
root@OpenWrt:~# cat /proc/sys/net/core/netdev_max_backlog
2000

View File

@@ -0,0 +1,134 @@
disable ccflags-y += -DCONFIG_MHI_NETDEV_MBIM in pcie_mhi/Makefile
root@OpenWrt:~# insmod pcie_mhi.ko
root@OpenWrt:~# dmesg | grep mhi
[ 138.483252] mhi_init Quectel_Linux_PCIE_MHI_Driver_V1.3.0.6
[ 138.492350] mhi_pci_probe pci_dev->name = 0000:01:00.0, domain=0, bus=1, slot=0, vendor=17CB, device=0306
[ 138.497564] mhi_q 0000:01:00.0: BAR 0: assigned [mem 0x20300000-0x20300fff 64bit]
[ 138.506952] mhi_q 0000:01:00.0: enabling device (0140 -> 0142)
[ 138.514562] [I][mhi_init_pci_dev] msi_required = 5, msi_allocated = 5, msi_irq = 63
[ 138.514581] [I][mhi_power_up] dev_state:RESET
[ 138.514587] [I][mhi_async_power_up] Requested to power on
[ 138.514728] [I][mhi_alloc_coherent] size = 114688, dma_handle = 72160000
[ 138.514734] [I][mhi_init_dev_ctxt] mhi_ctxt->ctrl_seg = c221f000
[ 138.515030] [I][mhi_async_power_up] dev_state:RESET ee:AMSS
[ 138.515056] [I][mhi_pm_st_worker] Transition to state:READY
[ 138.515067] [I][mhi_pm_st_worker] INVALID_EE -> AMSS
[ 138.515073] [I][mhi_ready_state_transition] Waiting to enter READY state
[ 138.515210] [I][mhi_async_power_up] Power on setup success
[ 138.515227] [I][mhi_pci_probe] Return successful
[ 138.589013] [I][mhi_ready_state_transition] Device in READY State
[ 138.589029] [I][mhi_intvec_threaded_handlr] device ee:AMSS dev_state:READY, pm_state:POR
[ 138.589038] [I][mhi_intvec_threaded_handlr] device ee:AMSS dev_state:READY, INVALID_EE
[ 138.589041] [I][mhi_tryset_pm_state] Transition to pm state from:POR to:POR
[ 138.589046] [I][mhi_init_mmio] Initializing MMIO
[ 138.589050] [I][mhi_init_mmio] CHDBOFF:0x300
[ 138.589060] [I][mhi_init_mmio] ERDBOFF:0x700
[ 138.589065] [I][mhi_init_mmio] Programming all MMIO values.
[ 138.706124] [I][mhi_dump_tre] carl_ev evt_state_change mhistate=2
[ 138.706132] [I][mhi_process_ctrl_ev_ring] MHI state change event to state:M0
[ 138.706140] [I][mhi_pm_m0_transition] Entered With State:READY PM_STATE:POR
[ 138.706146] [I][mhi_tryset_pm_state] Transition to pm state from:POR to:M0
[ 138.708699] [I][mhi_dump_tre] carl_ev evt_ee_state execenv=2
[ 138.708706] [I][mhi_process_ctrl_ev_ring] MHI EE received event:AMSS
[ 138.708726] [I][mhi_pm_st_worker] Transition to state:MISSION MODE
[ 138.708736] [I][mhi_pm_st_worker] INVALID_EE -> AMSS
[ 138.708742] [I][mhi_pm_mission_mode_transition] Processing Mission Mode Transition
[ 138.708758] [I][mhi_init_timesync] No timesync capability found
[ 138.708764] [I][mhi_pm_mission_mode_transition] Adding new devices
[ 138.709785] [I][mhi_dtr_probe] Enter for DTR control channel
[ 138.709794] [I][__mhi_prepare_channel] Entered: preparing channel:18
[ 138.715378] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1
[ 138.715397] [I][__mhi_prepare_channel] Chan:18 successfully moved to start state
[ 138.715403] [I][__mhi_prepare_channel] Entered: preparing channel:19
[ 138.720201] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1
[ 138.720218] [I][__mhi_prepare_channel] Chan:19 successfully moved to start state
[ 138.720236] [I][mhi_dtr_probe] Exit with ret:0
[ 138.720590] [I][mhi_netdev_enable_iface] Prepare the channels for transfer
[ 138.720630] [I][__mhi_prepare_channel] Entered: preparing channel:100
[ 138.757230] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1
[ 138.757253] [I][__mhi_prepare_channel] Chan:100 successfully moved to start state
[ 138.757259] [I][__mhi_prepare_channel] Entered: preparing channel:101
[ 138.774352] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1
[ 138.774370] [I][__mhi_prepare_channel] Chan:101 successfully moved to start state
[ 138.778137] [I][mhi_netdev_enable_iface] Exited.
[ 138.779018] rmnet_vnd_register_device(rmnet_mhi0.1)=0
[ 138.782283] net rmnet_mhi0 rmnet_mhi0.1: NSS context created
[ 138.800865] [I][mhi_pm_mission_mode_transition] Exit with ret:0
root@OpenWrt:~# ./quectel-CM &
root@OpenWrt:~# [04-02_04:12:16:477] Quectel_QConnectManager_Linux_V1.6.0.5
[04-02_04:12:16:477] Find /sys/bus/usb/devices/4-1 idVendor=0x2c7c idProduct=0x800, bus=0x004, dev=0x002
[04-02_04:12:16:478] network interface '' or qmidev '' is not exist
[04-02_04:12:16:478] netcard driver = pcie_mhi, driver version = V1.3.0.6
[04-02_04:12:16:479] qmap_mode = 1, qmap_version = 9, qmap_size = 16384, muxid = 0x81, qmap_netcard = rmnet_mhi0.1
[04-02_04:12:16:479] Modem works in QMI mode
[04-02_04:12:16:505] cdc_wdm_fd = 7
[04-02_04:12:17:506] QmiThreadSendQMITimeout pthread_cond_timeout_np timeout
[04-02_04:12:18:516] Get clientWDS = 19
[04-02_04:12:18:520] Get clientDMS = 1
[04-02_04:12:18:524] Get clientNAS = 3
[04-02_04:12:18:527] Get clientUIM = 1
[04-02_04:12:18:531] Get clientWDA = 1
[04-02_04:12:18:535] requestBaseBandVersion RM500QGLAAR03A01M4G_BETA_20200107F 1 [Dec 30 2019 17:00:00]
[04-02_04:12:18:539] qmap_settings.rx_urb_size = 16384
[04-02_04:12:18:539] qmap_settings.ul_data_aggregation_max_datagrams = 16
[04-02_04:12:18:539] qmap_settings.ul_data_aggregation_max_size = 8192
[04-02_04:12:18:539] qmap_settings.dl_minimum_padding = 0
[04-02_04:12:18:550] requestSetLoopBackState(loopback_state=1, replication_factor=14)
[04-02_04:12:18:557] requestGetSIMStatus SIMStatus: SIM_ABSENT
[04-02_04:12:18:560] requestGetProfile[1] ///0
[04-02_04:12:18:563] requestRegistrationState2 MCC: 0, MNC: 0, PS: Detached, DataCap: UNKNOW
[04-02_04:12:18:565] requestQueryDataCall IPv4ConnectionStatus: DISCONNECTED
[04-02_04:12:18:566] ifconfig rmnet_mhi0.1 down
[04-02_04:12:18:571] ifconfig rmnet_mhi0.1 0.0.0.0
ifconfig: SIOCSIFFLAGS: Network is down
[04-02_04:12:18:575] SetLoopBackInd: loopback_state=1, replication_factor=14
[04-02_04:12:18:591] requestSetupDataCall WdsConnectionIPv4Handle: 0xe40182a0
[04-02_04:12:18:601] ifconfig rmnet_mhi0 up
[04-02_04:12:18:607] ifconfig rmnet_mhi0.1 up
[04-02_04:12:18:613] you are use OpenWrt?
[04-02_04:12:18:614] should not calling udhcpc manually?
[04-02_04:12:18:614] should modify /etc/config/network as below?
[04-02_04:12:18:614] config interface wan
[04-02_04:12:18:614] option ifname rmnet_mhi0.1
[04-02_04:12:18:614] option proto dhcp
[04-02_04:12:18:614] should use "/sbin/ifstaus wan" to check rmnet_mhi0.1 's status?
[04-02_04:12:18:614] busybox udhcpc -f -n -q -t 5 -i rmnet_mhi0.1
udhcpc: started, v1.28.3
udhcpc: sending discover
udhcpc: sending select for 192.168.48.171
udhcpc: lease of 192.168.48.171 obtained, lease time 7200
[04-02_04:12:18:809] udhcpc: ifconfig rmnet_mhi0.1 192.168.48.171 netmask 255.255.255.248 broadcast +
[04-02_04:12:18:819] udhcpc: setting default routers: 192.168.48.172
root@OpenWrt:~# ifconfig rmnet_mhi0
rmnet_mhi0 Link encap:Ethernet HWaddr 02:50:F4:00:00:00
inet6 addr: fe80::50:f4ff:fe00:0/64 Scope:Link
UP RUNNING NOARP MTU:1500 Metric:1
RX packets:2 errors:0 dropped:0 overruns:0 frame:0
TX packets:2 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:608 (608.0 B) TX bytes:672 (672.0 B)
root@OpenWrt:~# ifconfig rmnet_mhi0.1
rmnet_mhi0.1 Link encap:UNSPEC HWaddr 02-50-F4-00-00-00-00-00-00-00-00-00-00-00-00-00
inet addr:192.168.48.171 Mask:255.255.255.248
inet6 addr: fe80::50:f4ff:fe00:0/64 Scope:Link
UP RUNNING NOARP MTU:1500 Metric:1
RX packets:2 errors:0 dropped:0 overruns:0 frame:0
TX packets:2 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:592 (592.0 B) TX bytes:656 (656.0 B)
# adjust CPU load balancing
root@OpenWrt:~# echo 2 > /sys/class/net/rmnet_mhi0/queues/rx-0/rps_cpus
root@OpenWrt:~# echo 4 > /sys/class/net/rmnet_mhi0.1/queues/rx-0/rps_cpus
root@OpenWrt:~# echo 2000 > /proc/sys/net/core/netdev_max_backlog
root@OpenWrt:~# cat /sys/class/net/rmnet_mhi0/queues/rx-0/rps_cpus
2
root@OpenWrt:~# cat /sys/class/net/rmnet_mhi0.1/queues/rx-0/rps_cpus
4
root@OpenWrt:~# cat /proc/sys/net/core/netdev_max_backlog
2000

View File

@@ -0,0 +1,14 @@
root@imx6qsabresd:~# ./QLog -p /dev/mhi_DIAG -s log &
root@imx6qsabresd:~# [000.000]QLog Version: Quectel_QLog_Linux&Android_V1.2.4
[ 298.597963] [I][mhi_uci_open] Node open, ref counts 1
[ 298.605601] [I][mhi_uci_open] Starting channel
[ 298.612159] [I][__mhi_prepare_channel] Entered: preparing channel:4
[ 298.629906] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1
[ 298.635415] [I][__mhi_prepare_channel] Chan:4 successfully moved to start state
[ 298.642749] [I][__mhi_prepare_channel] Entered: preparing channel:5
[ 298.658043] [I][mhi_dump_tre] carl_ev evt_cmd_comp code=1
[ 298.663543] [I][__mhi_prepare_channel] Chan:5 successfully moved to start state
[000.075]open /dev/mhi_DIAG ttyfd = 3
[000.075]Press CTRL+C to stop catch log.
[000.096]qlog_logfile_create log/20160920_145758_0000.qmdl logfd=4
[005.268]recv: 0M 70K 490B in 5181 msec

View File

@@ -0,0 +1,22 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=quectel-qmi-wwan
PKG_VERSION:=1.2.6
PKG_RELEASE:=1
include $(INCLUDE_DIR)/kernel.mk
include $(INCLUDE_DIR)/package.mk
define KernelPackage/usb-net-qmi-wwan-quectel
SUBMENU:=USB Support
TITLE:=QMI WWAN driver for Quectel modules
DEPENDS:=+kmod-usb-net +kmod-usb-wdm
FILES:=$(PKG_BUILD_DIR)/qmi_wwan_q.ko
AUTOLOAD:=$(call AutoLoad,81,qmi_wwan_q)
endef
define Build/Compile
+$(KERNEL_MAKE) M="$(PKG_BUILD_DIR)" modules
endef
$(eval $(call KernelPackage,usb-net-qmi-wwan-quectel))

View File

@@ -0,0 +1,36 @@
obj-m += qmi_wwan_q.o
PWD := $(shell pwd)
OUTPUTDIR=/lib/modules/`uname -r`/kernel/drivers/net/usb/
ifeq ($(ARCH),)
ARCH := $(shell uname -m)
endif
ifeq ($(CROSS_COMPILE),)
CROSS_COMPILE :=
endif
ifeq ($(KDIR),)
KDIR := /lib/modules/$(shell uname -r)/build
ifeq ($(ARCH),i686)
ifeq ($(wildcard $KDIR/arch/$ARCH),)
ARCH=i386
endif
endif
endif
ifneq ($(findstring &,${PWD}),)
$(warning "${PWD}")
$(warning "current directory contain special char '&' !")
$(error "please remove it!")
endif
default:
$(MAKE) ARCH=${ARCH} CROSS_COMPILE=${CROSS_COMPILE} -C $(KDIR) M=$(PWD) modules
install: default
cp $(PWD)/qmi_wwan_q.ko /lib/modules/$(shell uname -r)/kernel/drivers/net/usb/
depmod
clean:
rm -rf *~ .tmp_versions modules.order Module.symvers
find . -type f -name "*~" -o -name "*.o" -o -name "*.ko" -o -name "*.cmd" -o -name "*.mod.c" | xargs rm -rf

View File

@@ -0,0 +1,171 @@
Release Notes
[V1.2.6]
Date: 5/25/2023
enhancement:
1. support Linux 6.1
fix:
[V1.2.5]
Date: 5/4/2023
enhancement:
1. support Linux 5.19
2. NSS not effect on SPF12.x and support SFE on 9574
fix:
[V1.2.4]
Date: 4/14/2023
enhancement:
1. support SDX7X platform
fix:
[V1.2.3]
Date: 9/20/2022
enhancement:
1. support 9x07(pid 0x030E) platform
fix:
[V1.2.2]
Date: 9/7/2022
enhancement:
1. Optimization, the network card send queue wakeup is changed from callback to tasklet
2. Add the function of returning LAN packets in bridge mode
3. support ndo ioctl on kernel>5.14
4. Allow setting MTU greater than 1500
fix:
[V1.2.1]
Date: 9/26/2021
enhancement:
1. support IPQ5018's NSS
2. use 'qsdk/qca/src/data-kernel/drivers/rmnet-nss/rmnet_nss.c' instead myself rmnet_nss.c
and qmi_wwan_q.ko must load after rmnet_nss.ko
fix:
[V1.2.0.25]
Date: 9/17/2021
enhancement:
fix:
1. add sdx6x platfrom support
[V1.2.0.24]
Date: 9/6/2021
enhancement:
fix:
1. add BG95 support
2. support Linux 5.14.0
[V1.2.0.23]
Date: 3/23/2021
enhancement:
fix:
1. add sdx12 platfrom support
[V1.2.0.22]
Date: 2/5/2021
enhancement:
fix:
1. fix panic (memory-access-after-free) when do modem reboot stress test
[V1.2.0.21]
Date: 2/4/2021
enhancement:
1. Code refactoring - QMAP and rmnet
fix:
1. qmap_qmi_wwan_rx_fixup: change skb_dequeue to __skb_dequeue
[V1.2.0.20]
Date: 11/2/2020
enhancement:
fix:
1. LTE-A modems can not obtain IP by DHCP
[V1.2.0.19]
Date: 10/9/2020
enhancement:
fix:
1. X55 can not access internet after usb resume
[V1.2.0.18]
Date: 10/9/2020
enhancement:
fix:
1. X55: rename rmnet_usb0.1 to wwan0_1
1.1 if there is '.', openwrt will think it is vlan, and auto create vlan
1.2 if there is '.', android will think it is not vaild
1.3 if named as rmnet_usb0, and SOC is QCOM, QCOM's netmgr will auto manager it
[V1.2.0.17]
Date: 9/14/2020
enhancement:
1. Code refactoring - QMAP size and version
fix:
[V1.2.0.16]
Date: 9/14/2020
enhancement:
1. rx_fixup() check if this is enough skb_headroom() to fill ethernet header
fix:
1. fix "WARNING: suspicious RCU usage"
[V1.2.0.15]
Date: 9/10/2020
enhancement:
fix:
1. fix compile errors on kernel 3.10~3.13
[V1.2.0.14]
Date: 7/24/2020
enhancement:
fix:
1. fix QMAP V5 bug on Big Endian CPU
[V1.2.0.13]
Date: 6/22/2020
enhancement:
fix:
1. fix no data traffic when do Upload TPUT test
[V1.2.0.12]
Date: 5/29/2020
enhancement:
fix:
1. IPQ8074: when enable hyfi, quectel-CM will crash system crash
[V1.2.0.9]
Date: 5/13/2020
enhancement:
fix:
1. IPQ8074: enable CONFIG_QCA_NSS_DRV by CONFIG_PINCTRL_IPQ807x (from CONFIG_ARCH_IPQ807x)
[V1.2.0.8]
Date: 5/9/2020
enhancement:
fix:
1. fix compile errors on kernel V3.10
[V1.2.0.7]
Date: 4/25/2020
enhancement:
1. X55 support bridge mode
fix:
[V1.2.0.6]
Date: 4/20/2020
enhancement:
1. add stat64, or the rx/tx statics will become to 0 when data > 4G
2. do not use skb_clone, will make QCOM's NSS and SFE 's cpu loading very high
fix:
[V1.2.0.5]
Date: 4/8/2020
enhancement:
1. add attrite link_state, change carrier state accoring link_state
quectel-CM will set link_state to 1 when QMI setup call success.
fix:
[V1.2.0.4]
Date: 4/8/2020
enhancement:
1. support X55's QMAP V5
fix:

View File

@@ -0,0 +1,68 @@
1. Enable QUECTEL_BRIDGE_MODE in qmi_wwan_q.c
2. Guide to use ....
Welcome to Buildroot for the Orange Pi Zero
OrangePi_Zero login: root
# insmod qmi_wwan_q.ko
[ 90.591841] qmi_wwan_q 3-1:1.4: cdc-wdm0: USB WDM device
[ 90.597185] qmi_wwan_q 3-1:1.4: Quectel EC25&EC21&EG91&EG95&EG06&EP06&EM06&EG12&EP12&EM12&EG16&EG18&BG96&AG35 work on RawIP mode
[ 90.610176] qmi_wwan_q 3-1:1.4: rx_urb_size = 32768
[ 90.620589] qmi_wwan_q 3-1:1.4 wwan0: register 'qmi_wwan_q' at usb-1c1b000.usb-1, WWAN/QMI device, 96:42:59:a9:f5:e4
[ 90.631293] usbcore: registered new interface driver qmi_wwan_q
# brctl addbr br0
# brctl addif br0 eth0
[ 100.413071] br0: port 1(eth0) entered blocking state
[ 100.418081] br0: port 1(eth0) entered disabled state
[ 100.423356] device eth0 entered promiscuous mode
# brctl addif br0 wwan0
[ 102.696724] br0: port 2(wwan0) entered blocking state
[ 102.701823] br0: port 2(wwan0) entered disabled state
[ 102.707182] device wwan0 entered promiscuous mode
# ifconfig br0 up
[ 110.405561] br0: port 1(eth0) entered blocking state
[ 110.410567] br0: port 1(eth0) entered forwarding state
# brctl show
bridge name bridge id STP enabled interfaces
br0 8000.0242b22e80d8 no eth0
wwan0
# ./quectel-CM &
# [01-01_06:37:02:386] Quectel_QConnectManager_Linux_V1.4.3
[01-01_06:37:02:388] Find /sys/bus/usb/devices/3-1 idVendor=0x2c7c idProduct=0x512
[01-01_06:37:02:388] Auto find qmichannel = /dev/cdc-wdm0
[01-01_06:37:02:388] Auto find usbnet_adapter = wwan0
[01-01_06:37:02:389] qmap_mode = 1, muxid = 0x81, qmap_netcard = wwan0
[01-01_06:37:02:389] Modem works in QMI mode
[01-01_06:37:02:389] qmap_mode = 1, muxid = 0x81, qmap_netcard = wwan0
[01-01_06:37:02:394] cdc_wdm_fd = 7
[01-01_06:37:02:561] Get clientWDS = 18
[01-01_06:37:02:633] Get clientDMS = 1
[01-01_06:37:02:689] Get clientNAS = 2
[01-01_06:37:02:753] Get clientUIM = 1
[01-01_06:37:02:817] Get clientWDA = 1
[01-01_06:37:02:881] requestBaseBandVersion EM12GBATE1127
[01-01_06:37:02:945] qmap_settings.rx_urb_size = 16384
[01-01_06:37:03:201] requestGetSIMStatus SIMStatus: SIM_READY
[01-01_06:37:03:265] requestGetProfile[1] ctnet///0
[01-01_06:37:03:329] requestRegistrationState2 MCC: 460, MNC: 11, PS: Attached, DataCap: LTE
[01-01_06:37:03:393] requestQueryDataCall IPv4ConnectionStatus: DISCONNECTED
[01-01_06:37:03:457] requestSetupDataCall WdsConnectionIPv4Handle: 0x192a5ed0
[01-01_06:37:03:717] ifconfig wwan0 up
[01-01_06:37:03:747] echo '0x64b69855' > /sys/module/qmi_wwan_q/parameters/bridge_ipv4
[ 117.030116] net wwan0: link_state 0x0 -> 0x1
[ 117.041259] br0: port 2(wwan0) entered blocking state
[ 117.046326] br0: port 2(wwan0) entered forwarding state
[ 117.336688] net wwan0: sip = 100.182.152.85, tip=100.182.152.86, ipv4=100.182.152.85
[ 121.612281] random: crng init done
[ 128.143645] net wwan0: PC Mac Address: 5e:6b:82:fa:ab:c3
[ 128.151936] net wwan0: rx_pkts=1, rx_len=312
[ 128.203578] net wwan0: PC Mac Address: 5e:6b:82:fa:ab:c3
[ 131.012891] net wwan0: sip = 100.182.152.85, tip=100.182.152.86, ipv4=100.182.152.85
[ 131.341780] net wwan0: rx_pkts=1, rx_len=316
[ 131.434642] net wwan0: rx_pkts=1, rx_len=1404
[ 131.439416] net wwan0: rx_pkts=3, rx_len=4212
[ 131.512782] net wwan0: rx_pkts=4, rx_len=5616
[ 131.535345] net wwan0: rx_pkts=7, rx_len=9828
[ 133.778699] net wwan0: rx_pkts=8, rx_len=11232
[ 134.143941] net wwan0: rx_pkts=9, rx_len=12636
[ 140.053957] net wwan0: rx_pkts=11, rx_len=15444

View File

@@ -0,0 +1,234 @@
1. Enable QUECTEL_BRIDGE_MODE in qmi_wwan_q.c
2. set qmap_mode to 4
3. if you want add wwan0.2 to br2, wwan0.3 to br3
set bridge_mode to BIT(1)|BIT(2)
4. Guide to use ....
# insmod qmi_wwan_q.ko qmap_mode=4 bridge_mode=6
[243467.331669] qmi_wwan_q 3-1:1.4: cdc-wdm0: USB WDM device
[243467.337136] qmi_wwan_q 3-1:1.4: Quectel EC25&EC21&EG91&EG95&EG06&EP06&EM06&EG12&EP12&EM12&EG16&EG18&BG96&AG35 work on RawIP mode
[243467.349471] qmi_wwan_q 3-1:1.4: rx_urb_size = 32768
[243467.364803] qmi_wwan_q 3-1:1.4 wwan0: register 'qmi_wwan_q' at usb-1c1b000.usb-1, WWAN/QMI device, 96:42:59:a9:f5:e4
[243467.376025] net wwan0: qmap_register_device wwan0.1
[243467.381658] net wwan0: qmap_register_device wwan0.2
[243467.387281] net wwan0: qmap_register_device wwan0.3
[243467.392851] net wwan0: qmap_register_device wwan0.4
[243467.398106] usbcore: registered new interface driver qmi_wwan_q
# cat /sys/class/net/wwan0.2/bridge_mode
1
# cat /sys/class/net/wwan0.3/bridge_mode
1
# brctl addbr br2
# brctl addif br2 wwan0.2
[243492.518563] br2: port 1(wwan0.2) entered blocking state
[243492.523888] br2: port 1(wwan0.2) entered disabled state
[243492.535948] device wwan0.2 entered promiscuous mode
# brctl addbr br3
# brctl addif br3 wwan0.3
[243507.486717] br3: port 1(wwan0.3) entered blocking state
[243507.492248] br3: port 1(wwan0.3) entered disabled state
[243507.497982] device wwan0.3 entered promiscuous mode
# brctl show
bridge name bridge id STP enabled interfaces
br2 8000.964259a9f5e4 no wwan0.2
br3 8000.964259a9f5e4 no wwan0.3
# ./quectel-qmi-proxy &
# Find /sys/bus/usb/devices/3-1 idVendor=2c7c idProduct=0512
Find /sys/bus/usb/devices/3-1:1.4/usbmisc/cdc-wdm0
Will use cdc-wdm /dev/cdc-wdm0
qmi_proxy_init enter
qmi_proxy_loop enter
link_prot 2
ul_data_aggregation_protocol 5
dl_data_aggregation_protocol 5
dl_data_aggregation_max_datagrams 32
dl_data_aggregation_max_size 16384
ul_data_aggregation_max_datagrams 16
ul_data_aggregation_max_size 3072
qmi_proxy_init finished, rx_urb_size is 16384
local server: quectel-qmi-proxy sockfd = 4
qmi_start_server: qmi_proxy_server_fd = 4
# ./quectel-CM -n 2 &
# [01-04_02:13:53:053] Quectel_QConnectManager_Linux_V1.4.3
[01-04_02:13:53:056] Find /sys/bus/usb/devices/3-1 idVendor=0x2c7c idProduct=0x512
[01-04_02:13:53:056] Auto find qmichannel = /dev/cdc-wdm0
[01-04_02:13:53:056] Auto find usbnet_adapter = wwan0
[01-04_02:13:53:056] qmap_mode = 4, muxid = 0x82, qmap_netcard = wwan0.2
[01-04_02:13:53:057] Modem works in QMI mode
[01-04_02:13:53:057] qmap_mode = 4, muxid = 0x82, qmap_netcard = wwan0.2
+++ ClientFd=5
[01-04_02:13:53:058] connect to quectel-qmi-proxy sockfd = 7
[01-04_02:13:53:058] cdc_wdm_fd = 7
+++ ClientFd=5 QMIType=1 ClientId=18
[01-04_02:13:53:130] Get clientWDS = 18
+++ ClientFd=5 QMIType=2 ClientId=1
[01-04_02:13:53:194] Get clientDMS = 1
+++ ClientFd=5 QMIType=3 ClientId=2
[01-04_02:13:53:258] Get clientNAS = 2
+++ ClientFd=5 QMIType=11 ClientId=2
[01-04_02:13:53:333] Get clientUIM = 2
[01-04_02:13:53:386] requestBaseBandVersion EM12GBATE1127
[01-04_02:13:53:642] requestGetSIMStatus SIMStatus: SIM_READY
[01-04_02:13:53:706] requestGetProfile[2] IMS///0
[01-04_02:13:53:770] requestRegistrationState2 MCC: 460, MNC: 11, PS: Attached, DataCap: LTE
[01-04_02:13:53:841] requestQueryDataCall IPv4ConnectionStatus: DISCONNECTED
[01-04_02:13:54:058] requestSetupDataCall WdsConnectionIPv4Handle: 0x78a3aba0
[243527.630628] net wwan0: link_state 0x0 -> 0x2
[01-04_02:13:54:319] ifconfig wwan0 up
[01-04_02:13:54:325] ifconfig wwan0.2 up
[01-04_02:13:54:330] echo '0x645026c8' > /sys/class/net/wwan0.2/bridge_ipv4
# udhcpc -i br2
udhcpc: started, v1.29.3
[243532.653027] br2: port 1(wwan0.2) entered blocking state
[243532.658384] br2: port 1(wwan0.2) entered forwarding state
udhcpc: sending discover
[243532.784337] wwan0.2 PC Mac Address: 96:42:59:a9:f5:e4
[243532.794813] net wwan0: rx_pkts=1, rx_len=312
udhcpc: sending select for 100.80.38.200
[243532.894325] wwan0.2 PC Mac Address: 96:42:59:a9:f5:e4
udhcpc: lease of 100.80.38.200 obtained, lease time 7200
deleting routers
adding dns 202.102.213.68
adding dns 61.132.163.68
# ./quectel-CM -n 3 &
# [01-04_02:14:03:645] Quectel_QConnectManager_Linux_V1.4.3
[01-04_02:14:03:648] Find /sys/bus/usb/devices/3-1 idVendor=0x2c7c idProduct=0x512
[01-04_02:14:03:648] Auto find qmichannel = /dev/cdc-wdm0
[01-04_02:14:03:648] Auto find usbnet_adapter = wwan0
[01-04_02:14:03:649] qmap_mode = 4, muxid = 0x83, qmap_netcard = wwan0.3
[01-04_02:14:03:649] Modem works in QMI mode
[01-04_02:14:03:649] qmap_mode = 4, muxid = 0x83, qmap_netcard = wwan0.3
[01-04_02:14:03:650] connect to quectel-qmi-proxy sockfd = 7
+++ ClientFd=6
[01-04_02:14:03:650] cdc_wdm_fd = 7
+++ ClientFd=6 QMIType=1 ClientId=19
[01-04_02:14:03:722] Get clientWDS = 19
+++ ClientFd=6 QMIType=2 ClientId=2
[01-04_02:14:03:786] Get clientDMS = 2
+++ ClientFd=6 QMIType=3 ClientId=3
[01-04_02:14:03:850] Get clientNAS = 3
+++ ClientFd=6 QMIType=11 ClientId=3
[01-04_02:14:03:914] Get clientUIM = 3
[01-04_02:14:03:978] requestBaseBandVersion EM12GBATE1127
[01-04_02:14:04:235] requestGetSIMStatus SIMStatus: SIM_READY
[01-04_02:14:04:298] requestGetProfile[3] lte///0
[01-04_02:14:04:362] requestRegistrationState2 MCC: 460, MNC: 11, PS: Attached, DataCap: LTE
[01-04_02:14:04:426] requestQueryDataCall IPv4ConnectionStatus: DISCONNECTED
[01-04_02:14:04:555] requestSetupDataCall WdsConnectionIPv4Handle: 0x78a5c850
[243538.126755] net wwan0: link_state 0x2 -> 0x6
[01-04_02:14:04:815] ifconfig wwan0 up
[01-04_02:14:04:824] ifconfig wwan0.3 up
[01-04_02:14:04:829] echo '0x64548ae0' > /sys/class/net/wwan0.3/bridge_ipv4
# udhcpc -i br3
udhcpc: started, v1.29.3
[243541.850178] br3: port 1(wwan0.3) entered blocking state
[243541.855509] br3: port 1(wwan0.3) entered forwarding state
udhcpc: sending discover
[243541.976693] wwan0.3 PC Mac Address: 96:42:59:a9:f5:e4
udhcpc: sending select for 100.84.138.224
[243542.056668] wwan0.3 PC Mac Address: 96:42:59:a9:f5:e4
udhcpc: lease of 100.84.138.224 obtained, lease time 7200
deleting routers
adding dns 202.102.213.68
adding dns 61.132.163.68
# ./quectel-CM -n 1 &
# [01-04_02:14:12:742] Quectel_QConnectManager_Linux_V1.4.3
[01-04_02:14:12:744] Find /sys/bus/usb/devices/3-1 idVendor=0x2c7c idProduct=0x512
[01-04_02:14:12:745] Auto find qmichannel = /dev/cdc-wdm0
[01-04_02:14:12:745] Auto find usbnet_adapter = wwan0
[01-04_02:14:12:745] qmap_mode = 4, muxid = 0x81, qmap_netcard = wwan0.1
[01-04_02:14:12:745] Modem works in QMI mode
[01-04_02:14:12:746] qmap_mode = 4, muxid = 0x81, qmap_netcard = wwan0.1
[01-04_02:14:12:746] connect to quectel-qmi-proxy sockfd = 7
+++ ClientFd=7
[01-04_02:14:12:746] cdc_wdm_fd = 7
+++ ClientFd=7 QMIType=1 ClientId=20
[01-04_02:14:12:842] Get clientWDS = 20
+++ ClientFd=7 QMIType=2 ClientId=3
[01-04_02:14:12:906] Get clientDMS = 3
+++ ClientFd=7 QMIType=3 ClientId=4
[01-04_02:14:12:970] Get clientNAS = 4
+++ ClientFd=7 QMIType=11 ClientId=4
[01-04_02:14:13:034] Get clientUIM = 4
[01-04_02:14:13:098] requestBaseBandVersion EM12GBATE1127
[01-04_02:14:13:354] requestGetSIMStatus SIMStatus: SIM_READY
[01-04_02:14:13:418] requestGetProfile[1] ctnet///0
[01-04_02:14:13:483] requestRegistrationState2 MCC: 460, MNC: 11, PS: Attached, DataCap: LTE
[01-04_02:14:13:546] requestQueryDataCall IPv4ConnectionStatus: DISCONNECTED
[01-04_02:14:13:610] requestSetupDataCall WdsConnectionIPv4Handle: 0x78a92b30
[243547.182801] net wwan0: link_state 0x6 -> 0x7
[01-04_02:14:13:874] ifconfig wwan0 up
[01-04_02:14:13:880] ifconfig wwan0.1 up
[01-04_02:14:13:885] busybox udhcpc -f -n -q -t 5 -i wwan0.1
udhcpc: started, v1.29.3
udhcpc: sending discover
udhcpc: sending select for 10.175.212.85
udhcpc: lease of 10.175.212.85 obtained, lease time 7200
[01-04_02:14:14:175] deleting routers
[01-04_02:14:14:194] adding dns 202.102.213.68
[01-04_02:14:14:195] adding dns 61.132.163.68
# ifconfig
br2 Link encap:Ethernet HWaddr 96:42:59:A9:F5:E4
inet addr:100.80.38.200 Bcast:100.80.38.207 Mask:255.255.255.240
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
RX packets:2 errors:0 dropped:0 overruns:0 frame:0
TX packets:2 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:612 (612.0 B) TX bytes:684 (684.0 B)
br3 Link encap:Ethernet HWaddr 96:42:59:A9:F5:E4
inet addr:100.84.138.224 Bcast:100.84.138.255 Mask:255.255.255.192
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
RX packets:2 errors:0 dropped:0 overruns:0 frame:0
TX packets:2 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:612 (612.0 B) TX bytes:684 (684.0 B)
wwan0 Link encap:Ethernet HWaddr 96:42:59:A9:F5:E4
UP BROADCAST RUNNING NOARP MULTICAST MTU:1500 Metric:1
RX packets:0 errors:0 dropped:0 overruns:0 frame:0
TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:0 (0.0 B) TX bytes:0 (0.0 B)
wwan0.1 Link encap:Ethernet HWaddr 96:42:59:A9:F5:E4
inet addr:10.175.212.85 Bcast:10.175.212.87 Mask:255.255.255.252
UP BROADCAST RUNNING NOARP MULTICAST MTU:1500 Metric:1
RX packets:2 errors:0 dropped:0 overruns:0 frame:0
TX packets:2 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:612 (612.0 B) TX bytes:664 (664.0 B)
wwan0.2 Link encap:Ethernet HWaddr 96:42:59:A9:F5:E4
UP BROADCAST RUNNING NOARP MULTICAST MTU:1500 Metric:1
RX packets:2 errors:0 dropped:0 overruns:0 frame:0
TX packets:2 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:612 (612.0 B) TX bytes:664 (664.0 B)
wwan0.3 Link encap:Ethernet HWaddr 96:42:59:A9:F5:E4
UP BROADCAST RUNNING NOARP MULTICAST MTU:1500 Metric:1
RX packets:2 errors:0 dropped:0 overruns:0 frame:0
TX packets:2 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:612 (612.0 B) TX bytes:664 (664.0 B)

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,424 @@
/* Copyright (c) 2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/hashtable.h>
#include <linux/if_ether.h>
#include <linux/ip.h>
#include <qca-nss-drv/nss_api_if.h>
#include <linux/rmnet_nss.h>
#define RMNET_NSS_HASH_BITS 8
#define hash_add_ptr(table, node, key) \
hlist_add_head(node, &table[hash_ptr(key, HASH_BITS(table))])
static DEFINE_HASHTABLE(rmnet_nss_ctx_hashtable, RMNET_NSS_HASH_BITS);
struct rmnet_nss_ctx {
struct hlist_node hnode;
struct net_device *rmnet_dev;
struct nss_rmnet_rx_handle *nss_ctx;
};
enum __rmnet_nss_stat {
RMNET_NSS_RX_ETH,
RMNET_NSS_RX_FAIL,
RMNET_NSS_RX_NON_ETH,
RMNET_NSS_RX_BUSY,
RMNET_NSS_TX_NO_CTX,
RMNET_NSS_TX_SUCCESS,
RMNET_NSS_TX_FAIL,
RMNET_NSS_TX_NONLINEAR,
RMNET_NSS_TX_BAD_IP,
RMNET_NSS_EXCEPTIONS,
RMNET_NSS_EX_BAD_HDR,
RMNET_NSS_EX_BAD_IP,
RMNET_NSS_EX_SUCCESS,
RMNET_NSS_TX_BAD_FRAGS,
RMNET_NSS_TX_LINEARIZE_FAILS,
RMNET_NSS_TX_NON_ZERO_HEADLEN_FRAGS,
RMNET_NSS_TX_BUSY_LOOP,
RMNET_NSS_NUM_STATS,
};
static unsigned long rmnet_nss_stats[RMNET_NSS_NUM_STATS];
#define RMNET_NSS_STAT(name, counter, desc) \
module_param_named(name, rmnet_nss_stats[counter], ulong, 0444); \
MODULE_PARM_DESC(name, desc)
RMNET_NSS_STAT(rmnet_nss_rx_ethernet, RMNET_NSS_RX_ETH,
"Number of Ethernet headers successfully removed");
RMNET_NSS_STAT(rmnet_nss_rx_fail, RMNET_NSS_RX_FAIL,
"Number of Ethernet headers that could not be removed");
RMNET_NSS_STAT(rmnet_nss_rx_non_ethernet, RMNET_NSS_RX_NON_ETH,
"Number of non-Ethernet packets received");
RMNET_NSS_STAT(rmnet_nss_rx_busy, RMNET_NSS_RX_BUSY,
"Number of packets dropped decause rmnet_data device was busy");
RMNET_NSS_STAT(rmnet_nss_tx_slow, RMNET_NSS_TX_NO_CTX,
"Number of packets sent over non-NSS-accelerated rmnet device");
RMNET_NSS_STAT(rmnet_nss_tx_fast, RMNET_NSS_TX_SUCCESS,
"Number of packets sent over NSS-accelerated rmnet device");
RMNET_NSS_STAT(rmnet_nss_tx_fail, RMNET_NSS_TX_FAIL,
"Number of packets that NSS could not transmit");
RMNET_NSS_STAT(rmnet_nss_tx_nonlinear, RMNET_NSS_TX_NONLINEAR,
"Number of non linear sent over NSS-accelerated rmnet device");
RMNET_NSS_STAT(rmnet_nss_tx_invalid_ip, RMNET_NSS_TX_BAD_IP,
"Number of ingress packets with invalid IP headers");
RMNET_NSS_STAT(rmnet_nss_tx_invalid_frags, RMNET_NSS_TX_BAD_FRAGS,
"Number of ingress packets with invalid frag format");
RMNET_NSS_STAT(rmnet_nss_tx_linearize_fail, RMNET_NSS_TX_LINEARIZE_FAILS,
"Number of ingress packets where linearize in tx fails");
RMNET_NSS_STAT(rmnet_nss_tx_exceptions, RMNET_NSS_EXCEPTIONS,
"Number of times our DL exception handler was invoked");
RMNET_NSS_STAT(rmnet_nss_exception_non_ethernet, RMNET_NSS_EX_BAD_HDR,
"Number of non-Ethernet exception packets");
RMNET_NSS_STAT(rmnet_nss_exception_invalid_ip, RMNET_NSS_EX_BAD_IP,
"Number of exception packets with invalid IP headers");
RMNET_NSS_STAT(rmnet_nss_exception_success, RMNET_NSS_EX_SUCCESS,
"Number of exception packets handled successfully");
RMNET_NSS_STAT(rmnet_nss_tx_non_zero_headlen_frags, RMNET_NSS_TX_NON_ZERO_HEADLEN_FRAGS,
"Number of packets with non zero headlen");
RMNET_NSS_STAT(rmnet_nss_tx_busy_loop, RMNET_NSS_TX_BUSY_LOOP,
"Number of times tx packets busy looped");
static void rmnet_nss_inc_stat(enum __rmnet_nss_stat stat)
{
if (stat >= 0 && stat < RMNET_NSS_NUM_STATS)
rmnet_nss_stats[stat]++;
}
static struct rmnet_nss_ctx *rmnet_nss_find_ctx(struct net_device *dev)
{
struct rmnet_nss_ctx *ctx;
struct hlist_head *bucket;
u32 hash;
hash = hash_ptr(dev, HASH_BITS(rmnet_nss_ctx_hashtable));
bucket = &rmnet_nss_ctx_hashtable[hash];
hlist_for_each_entry(ctx, bucket, hnode) {
if (ctx->rmnet_dev == dev)
return ctx;
}
return NULL;
}
static void rmnet_nss_free_ctx(struct rmnet_nss_ctx *ctx)
{
if (ctx) {
hash_del(&ctx->hnode);
nss_rmnet_rx_xmit_callback_unregister(ctx->nss_ctx);
nss_rmnet_rx_destroy_sync(ctx->nss_ctx);
kfree(ctx);
}
}
/* Pull off an ethernet header, if possible */
static int rmnet_nss_ethhdr_pull(struct sk_buff *skb)
{
if (!skb->protocol || skb->protocol == htons(ETH_P_802_3)) {
void *ret = skb_pull(skb, sizeof(struct ethhdr));
rmnet_nss_inc_stat((ret) ? RMNET_NSS_RX_ETH :
RMNET_NSS_RX_FAIL);
return !ret;
}
rmnet_nss_inc_stat(RMNET_NSS_RX_NON_ETH);
return -1;
}
/* Copy headers to linear section for non linear packets */
static int rmnet_nss_adjust_header(struct sk_buff *skb)
{
struct iphdr *iph;
skb_frag_t *frag;
int bytes = 0;
u8 transport;
if (skb_shinfo(skb)->nr_frags != 1) {
rmnet_nss_inc_stat(RMNET_NSS_TX_BAD_FRAGS);
return -EINVAL;
}
if (skb_headlen(skb)) {
rmnet_nss_inc_stat(RMNET_NSS_TX_NON_ZERO_HEADLEN_FRAGS);
return 0;
}
frag = &skb_shinfo(skb)->frags[0];
iph = (struct iphdr *)(skb_frag_address(frag));
if (iph->version == 4) {
bytes = iph->ihl*4;
transport = iph->protocol;
} else if (iph->version == 6) {
struct ipv6hdr *ip6h = (struct ipv6hdr *)iph;
bytes = sizeof(struct ipv6hdr);
/* Dont have to account for extension headers yet */
transport = ip6h->nexthdr;
} else {
rmnet_nss_inc_stat(RMNET_NSS_TX_BAD_IP);
return -EINVAL;
}
if (transport == IPPROTO_TCP) {
struct tcphdr *th;
th = (struct tcphdr *)((u8 *)iph + bytes);
bytes += th->doff * 4;
} else if (transport == IPPROTO_UDP) {
bytes += sizeof(struct udphdr);
} else {
/* cant do anything else here unfortunately so linearize */
if (skb_linearize(skb)) {
rmnet_nss_inc_stat(RMNET_NSS_TX_LINEARIZE_FAILS);
return -EINVAL;
} else {
return 0;
}
}
if (bytes > skb_frag_size(frag)) {
rmnet_nss_inc_stat(RMNET_NSS_TX_BAD_FRAGS);
return -EINVAL;
}
skb_push(skb, bytes);
memcpy(skb->data, iph, bytes);
/* subtract to account for skb_push */
skb->len -= bytes;
frag->page_offset += bytes;
skb_frag_size_sub(frag, bytes);
/* subtract to account for skb_frag_size_sub */
skb->data_len -= bytes;
return 0;
}
/* Main downlink handler
* Looks up NSS contex associated with the device. If the context is found,
* we add a dummy ethernet header with the approriate protocol field set,
* the pass the packet off to NSS for hardware acceleration.
*/
int rmnet_nss_tx(struct sk_buff *skb)
{
struct ethhdr *eth;
struct rmnet_nss_ctx *ctx;
struct net_device *dev = skb->dev;
nss_tx_status_t rc;
unsigned int len;
u8 version;
if (skb_is_nonlinear(skb)) {
if (rmnet_nss_adjust_header(skb))
goto fail;
else
rmnet_nss_inc_stat(RMNET_NSS_TX_NONLINEAR);
}
version = ((struct iphdr *)skb->data)->version;
ctx = rmnet_nss_find_ctx(dev);
if (!ctx) {
rmnet_nss_inc_stat(RMNET_NSS_TX_NO_CTX);
return -EINVAL;
}
eth = (struct ethhdr *)skb_push(skb, sizeof(*eth));
memset(&eth->h_dest, 0, ETH_ALEN * 2);
if (version == 4) {
eth->h_proto = htons(ETH_P_IP);
} else if (version == 6) {
eth->h_proto = htons(ETH_P_IPV6);
} else {
rmnet_nss_inc_stat(RMNET_NSS_TX_BAD_IP);
goto fail;
}
skb->protocol = htons(ETH_P_802_3);
/* Get length including ethhdr */
len = skb->len;
transmit:
rc = nss_rmnet_rx_tx_buf(ctx->nss_ctx, skb);
if (rc == NSS_TX_SUCCESS) {
/* Increment rmnet_data device stats.
* Don't call rmnet_data_vnd_rx_fixup() to do this, as
* there's no guarantee the skb pointer is still valid.
*/
dev->stats.rx_packets++;
dev->stats.rx_bytes += len;
rmnet_nss_inc_stat(RMNET_NSS_TX_SUCCESS);
return 0;
} else if (rc == NSS_TX_FAILURE_QUEUE) {
rmnet_nss_inc_stat(RMNET_NSS_TX_BUSY_LOOP);
goto transmit;
}
fail:
rmnet_nss_inc_stat(RMNET_NSS_TX_FAIL);
kfree_skb(skb);
return 1;
}
/* Called by NSS in the DL exception case.
* Since the packet cannot be sent over the accelerated path, we need to
* handle it. Remove the ethernet header and pass it onward to the stack
* if possible.
*/
void rmnet_nss_receive(struct net_device *dev, struct sk_buff *skb,
struct napi_struct *napi)
{
rmnet_nss_inc_stat(RMNET_NSS_EXCEPTIONS);
if (!skb)
return;
if (rmnet_nss_ethhdr_pull(skb)) {
rmnet_nss_inc_stat(RMNET_NSS_EX_BAD_HDR);
goto drop;
}
/* reset header pointers */
skb_reset_transport_header(skb);
skb_reset_network_header(skb);
skb_reset_mac_header(skb);
/* reset packet type */
skb->pkt_type = PACKET_HOST;
skb->dev = dev;
/* reset protocol type */
switch (skb->data[0] & 0xF0) {
case 0x40:
skb->protocol = htons(ETH_P_IP);
break;
case 0x60:
skb->protocol = htons(ETH_P_IPV6);
break;
default:
rmnet_nss_inc_stat(RMNET_NSS_EX_BAD_IP);
goto drop;
}
rmnet_nss_inc_stat(RMNET_NSS_EX_SUCCESS);
/* Set this so that we dont loop around netif_receive_skb */
skb->cb[0] = 1;
netif_receive_skb(skb);
return;
drop:
kfree_skb(skb);
}
/* Called by NSS in the UL acceleration case.
* We are guaranteed to have an ethernet packet here from the NSS hardware,
* We need to pull the header off and invoke our ndo_start_xmit function
* to handle transmitting the packet to the network stack.
*/
void rmnet_nss_xmit(struct net_device *dev, struct sk_buff *skb)
{
netdev_tx_t ret;
skb_pull(skb, sizeof(struct ethhdr));
rmnet_nss_inc_stat(RMNET_NSS_RX_ETH);
/* NSS takes care of shaping, so bypassing Qdiscs like this is OK */
ret = dev->netdev_ops->ndo_start_xmit(skb, dev);
if (unlikely(ret == NETDEV_TX_BUSY)) {
dev_kfree_skb_any(skb);
rmnet_nss_inc_stat(RMNET_NSS_RX_BUSY);
}
}
/* Create and register an NSS context for an rmnet_data device */
int rmnet_nss_create_vnd(struct net_device *dev)
{
struct rmnet_nss_ctx *ctx;
ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
if (!ctx)
return -ENOMEM;
ctx->rmnet_dev = dev;
ctx->nss_ctx = nss_rmnet_rx_create_sync_nexthop(dev, NSS_N2H_INTERFACE,
NSS_C2C_TX_INTERFACE);
if (!ctx->nss_ctx) {
kfree(ctx);
return -1;
}
nss_rmnet_rx_register(ctx->nss_ctx, rmnet_nss_receive, dev);
nss_rmnet_rx_xmit_callback_register(ctx->nss_ctx, rmnet_nss_xmit);
hash_add_ptr(rmnet_nss_ctx_hashtable, &ctx->hnode, dev);
return 0;
}
/* Unregister and destroy the NSS context for an rmnet_data device */
int rmnet_nss_free_vnd(struct net_device *dev)
{
struct rmnet_nss_ctx *ctx;
ctx = rmnet_nss_find_ctx(dev);
rmnet_nss_free_ctx(ctx);
return 0;
}
static const struct rmnet_nss_cb rmnet_nss = {
.nss_create = rmnet_nss_create_vnd,
.nss_free = rmnet_nss_free_vnd,
.nss_tx = rmnet_nss_tx,
};
int __init rmnet_nss_init(void)
{
pr_err("%s(): initializing rmnet_nss\n", __func__);
RCU_INIT_POINTER(rmnet_nss_callbacks, &rmnet_nss);
return 0;
}
void __exit rmnet_nss_exit(void)
{
struct hlist_node *tmp;
struct rmnet_nss_ctx *ctx;
int bkt;
pr_err("%s(): exiting rmnet_nss\n", __func__);
RCU_INIT_POINTER(rmnet_nss_callbacks, NULL);
/* Tear down all NSS contexts */
hash_for_each_safe(rmnet_nss_ctx_hashtable, bkt, tmp, ctx, hnode)
rmnet_nss_free_ctx(ctx);
}
#if 0
MODULE_LICENSE("GPL v2");
module_init(rmnet_nss_init);
module_exit(rmnet_nss_exit);
#endif

View File

@@ -0,0 +1,26 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=quectel-sprd-pcie
PKG_VERSION:=1.6
PKG_RELEASE:=1
include $(INCLUDE_DIR)/kernel.mk
include $(INCLUDE_DIR)/package.mk
define KernelPackage/sprd-pcie
SUBMENU:=Network Devices
TITLE:=Kernel PCIe driver for SPRD device
DEPENDS:=@PCI_SUPPORT @BROKEN
FILES:=$(PKG_BUILD_DIR)/sprd_pcie.ko
AUTOLOAD:=$(call AutoLoad,41,sprd_pcie)
endef
define KernelPackage/sprd-pcie/description
Kernel module for register a custom pcispd platform device.
endef
define Build/Compile
+$(KERNEL_MAKE) M="$(PKG_BUILD_DIR)" modules
endef
$(eval $(call KernelPackage,sprd-pcie))

View File

@@ -0,0 +1,33 @@
#
# Makefile for the sprd staging modem files
#
EXTRA_CFLAGS += -Wno-error -Wno-packed-bitfield-compat
ccflags-y += -DCONFIG_SPRD_PCIE_EP_DEVICE -DCONFIG_SPRD_SIPA -DCONFIG_SPRD_ETHERNET
obj-m += sprd_pcie.o
sprd_pcie-objs := pcie/sprd_pcie_ep_device.o pcie/pcie_host_resource.o pcie/sprd_pcie_quirks.o sipc/sipc.o sipc/sblock.o sipc/sbuf.o \
sipc/sipc_debugfs.o sipc/smem.o sipc/smsg.o sipc/spipe.o sipc/spool.o power_manager/power_manager.o \
sipa/sipa_core.o sipa/sipa_eth.o sipa/sipa_nic.o sipa/sipa_skb_send.o sipa/sipa_skb_recv.o sipa/sipa_dummy.o sipa/sipa_debugfs.o sipa/sipa_dele_cmn.o \
sipa/sipa_phy_v0/sipa_fifo_irq_hal.o sipa/sipa_phy_v0/sipa_common_fifo_hal.o
PWD := $(shell pwd)
ifeq ($(ARCH),)
ARCH := $(shell uname -m)
endif
ifeq ($(CROSS_COMPILE),)
CROSS_COMPILE :=
endif
ifeq ($(KDIR),)
KDIR := /lib/modules/$(shell uname -r)/build
endif
sprd_pcie: clean
$(MAKE) ARCH=${ARCH} CROSS_COMPILE=${CROSS_COMPILE} -C $(KDIR) M=$(PWD) modules
#cp sprd_pcie.ko /tftpboot/
clean:
$(MAKE) ARCH=${ARCH} CROSS_COMPILE=${CROSS_COMPILE} -C $(KDIR) M=$(PWD) clean
find . -name *.o.ur-safe | xargs rm -f
install: sprd_pcie
sudo cp sprd_pcie.ko /lib/modules/${shell uname -r}/kernel/drivers/pci/
sudo depmod

View File

@@ -0,0 +1,31 @@
#ifndef _MDM_CTRL_H
#define _MDM_CTRL_H
/*
* For mcd driver,it offer modem_ctrl_send_abnormal_to_ap
* function for others. It means you can use this function to notify ap,
* some errors has been catched,by this way,ap will triger this error
* and to do something for recovery.
*/
#include <linux/notifier.h>
enum {
MDM_CTRL_POWER_OFF = 0,
MDM_CTRL_POWER_ON,
MDM_CTRL_WARM_RESET,
MDM_CTRL_COLD_RESET,
MDM_WATCHDOG_RESET,
MDM_ASSERT,
MDM_PANIC,
MDM_CTRL_PCIE_RECOVERY,
MDM_POWER_OFF,
MDM_CTRL_SET_CFG
};
void modem_ctrl_send_abnormal_to_ap(int status);
void modem_ctrl_poweron_modem(int on);
void modem_ctrl_enable_cp_event(void);
int modem_ctrl_register_notifier(struct notifier_block *nb);
void modem_ctrl_unregister_notifier(struct notifier_block *nb);
#endif

View File

@@ -0,0 +1,49 @@
#ifndef _PCIE_RC_SPRD_H
#define _PCIE_RC_SPRD_H
#include <linux/platform_device.h>
enum sprd_pcie_event {
SPRD_PCIE_EVENT_INVALID = 0,
SPRD_PCIE_EVENT_LINKDOWN = 0x1,
SPRD_PCIE_EVENT_LINKUP = 0x2,
SPRD_PCIE_EVENT_WAKEUP = 0x4,
};
struct sprd_pcie_register_event {
u32 events;
struct platform_device *pdev;
void (*callback)(enum sprd_pcie_event event, void *data);
void *data;
};
/*
* SPRD PCIe root complex (e.g. UD710 SoC) can't support PCI hotplug
* capability. Therefore, the standard hotplug driver can't be used.
*
* Whenever one endpoint is plugged or powered on, the EP driver must
* call sprd_pcie_configure_device() in order to add EP device to system
* and probe EP driver. If one endpoint is unplugged or powered off,
* the EP driver must call sprd_pcie_unconfigure_device() in order to
* remove all PCI devices on PCI bus.
*
* return 0 on success, otherwise return a negative number.
*/
/* dumy sprd api */
static inline int sprd_pcie_configure_device(struct platform_device *pdev) { return 0; }
static inline int sprd_pcie_unconfigure_device(struct platform_device *pdev) { return 0; }
static inline void sprd_pcie_teardown_msi_irq(unsigned int irq) { }
static inline void sprd_pcie_dump_rc_regs(struct platform_device *pdev) { }
static inline int sprd_pcie_register_event(struct sprd_pcie_register_event *reg) { return 0; }
static inline int sprd_pcie_deregister_event(struct sprd_pcie_register_event *reg) { return 0; }
#ifdef CONFIG_SPRD_PCIE_AER
void sprd_pcie_alloc_irq_vectors(struct pci_dev *dev, int *irqs, int services) { }
#else
static inline void sprd_pcie_alloc_irq_vectors(struct pci_dev *dev, int *irqs,
int services)
{
}
#endif
#endif

View File

@@ -0,0 +1,59 @@
#ifndef _SIPA_H_
#define _SIPA_H_
#include <linux/skbuff.h>
#include <linux/types.h>
#include <linux/if_ether.h>
enum sipa_evt_type {
SIPA_RECEIVE,
SIPA_ENTER_FLOWCTRL,
SIPA_LEAVE_FLOWCTRL,
SIPA_ERROR,
};
typedef void (*sipa_notify_cb)(void *priv, enum sipa_evt_type evt,
unsigned int data);
enum sipa_term_type {
SIPA_TERM_PCIE0 = 0x10,
SIPA_TERM_PCIE1 = 0x11,
SIPA_TERM_PCIE2 = 0x12,
SIPA_TERM_CP0 = 0x4,
SIPA_TERM_CP1 = 0x5,
SIPA_TERM_VCP = 0x6,
SIPA_TERM_MAX = 0x20, /* max 5-bit register */
};
enum sipa_nic_id {
SIPA_NIC_BB0,
SIPA_NIC_BB1,
SIPA_NIC_BB2,
SIPA_NIC_BB3,
SIPA_NIC_BB4,
SIPA_NIC_BB5,
SIPA_NIC_BB6,
SIPA_NIC_BB7,
SIPA_NIC_BB8,
SIPA_NIC_BB9,
SIPA_NIC_BB10,
SIPA_NIC_BB11,
SIPA_NIC_MAX,
};
struct sk_buff *sipa_recv_skb(int *netid, int index);
bool sipa_check_recv_tx_fifo_empty(void);
int sipa_nic_open(enum sipa_term_type src, int netid,
sipa_notify_cb cb, void *priv);
void sipa_nic_close(enum sipa_nic_id nic_id);
int sipa_nic_tx(enum sipa_nic_id nic_id, enum sipa_term_type dst,
int netid, struct sk_buff *skb);
int sipa_nic_rx(int *netid, struct sk_buff **out_skb, int index);
int sipa_nic_rx_has_data(enum sipa_nic_id nic_id);
int sipa_nic_trigger_flow_ctrl_work(enum sipa_nic_id nic_id, int err);
u32 sipa_nic_get_filled_num(void);
void sipa_nic_restore_irq(void);
void sipa_nic_set_tx_fifo_rp(u32 rptr);
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,85 @@
/*
* Copyright (C) 2019 Spreadtrum Communications Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __SIPC_BIG_TO_LITTLE_H
#define __SIPC_BIG_TO_LITTLE_H
//#define CONFIG_SIPC_BIG_TO_LITTLE /* sipc little */
#define BL_READB(addr) \
({ unsigned char __v = (*(volatile unsigned char *) (addr)); __v; })
#define BL_WRITEB(b,addr) (void)((*(volatile unsigned int *) (addr)) = (b))
#define BL_GETB(v) ((v))
#define BL_SETB(v, b) ((v) = (b))
#ifdef CONFIG_SIPC_BIG_TO_LITTLE
/* little 0x78563412
0x12
0x34
0x56
0x78
read:
big: 0x12345678==>0x78563412
write: 0x78563412 ===> 0x12345678*/
#define BL_READW(addr) \
({ unsigned short __t = (*(volatile unsigned short *) (addr)); \
unsigned short __v = ((__t & 0x00ff) << 8) + ((__t & 0xff00) >> 8); \
__v; })
#define BL_READL(addr) \
({ unsigned int __t = (*(volatile unsigned int *) (addr)); \
unsigned int __v = ((__t & 0x000000ff) << 24) + ((__t & 0x0000ff00) << 8) + \
((__t & 0x00ff0000) >> 8) + ((__t & 0xff000000) >> 24); \
__v; })
#define BL_WRITEW(b,addr) \
({ unsigned short __v = (((b) & 0x00ff) << 8) + (((b) & 0xff00) >> 8); \
(*(volatile unsigned short *) (addr)) = __v; })
#define BL_WRITEL(b,addr) \
({ unsigned int __v = (((b) & 0x000000ff) << 24) + (((b) & 0xff00) >> 8) + \
(((b) & 0x00ff0000) >> 8) + (((b) & 0xff000000) >> 24); \
(*(volatile unsigned int *) (addr)) = __v; })
#define BL_GETL(v) \
({unsigned int __v = (((v) & 0x000000ff) << 24) + (((v) & 0x0000ff00) << 8) + \
(((v) & 0x00ff0000) >> 8) + (((v) & 0xff000000) >> 24); \
__v; })
#define BL_SETL(v, b) \
((v) = (((b) & 0x000000ff) << 24) + (((b) & 0x0000ff00) << 8) + \
(((b) & 0x00ff0000) >> 8) + (((b) & 0xff000000) >> 24))
#define BL_GETW(v) \
({unsigned int __v = (((v) & 0x00ff) << 8) + (((v) & 0xff00) >> 8); \
__v; })
#define BL_SETW(v, b) \
((v) = (((b) & 0x00ff) << 8) + (((b) & 0xff00) >> 8))
#else
#define BL_GETW(v) v
#define BL_GETL(v) v
#define BL_SETW(v, b) ((v) = (b))
#define BL_SETL(v, b) ((v) = (b))
#define BL_READW(addr) \
({ unsigned short __v = (*(volatile unsigned short *) (addr)); __v; })
#define BL_READL(addr) \
({ unsigned int __v = (*(volatile unsigned int *) (addr)); __v; })
#define BL_WRITEW(b,addr) (void)((*(volatile unsigned short *) (addr)) = (b))
#define BL_WRITEL(b,addr) (void)((*(volatile unsigned int *) (addr)) = (b))
#endif
#endif

View File

@@ -0,0 +1,184 @@
/*
* Copyright (C) 2019 Spreadtrum Communications Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/* MPM: modem power manger
* PMS: power manage source which be used to request
* a modem power manage resource.
*/
#ifndef _SPRD_MPM_H
#define _SPRD_MPM_H
/*
* MPM modem powermanger source state define,
* if in idle state, we can release
* the related resources(such as pcie) of modem.
*/
enum {
SPRD_MPM_IDLE = 0,
SPRD_MPM_BUSY
};
/*
* @sprd_pms: the power manager source data struct,
* can usd it to request wake lock or request modem resource.
*
* @name: the name of a pms.
* @data: the point of MPM.
* @multitask: whether to support multitasking, default is false.
* false, the source can only be used in single task context.
* true, the source can be used multitask context.
* @awake: whether stay awake.
* @awake_cnt: total awake times.
* @pre_awake_cnt pre_awake_cnt.
* @active_cnt: the active counter of the pms.
* @expires: the timer expires value.
* @active_lock: use for protect the active_cnt member.
* @expires_lock: use for protect expires member.
* @entry: an entry of all pms list.
* @wake_timer: used for delay release wakelock.
*/
struct sprd_pms {
const char *name;
void *data;
bool multitask;
bool awake;
unsigned int awake_cnt;
unsigned int pre_awake_cnt;
unsigned int active_cnt;
unsigned long expires;
spinlock_t active_lock;
spinlock_t expires_lock;
struct list_head entry;
struct timer_list wake_timer;
};
/**
* sprd_mpm_create - create a modem powermanger source instacnce.
*
* @dst, which mpm (PSCP, SP, WCN, etc.) will be created.
* @later_idle, will release resource later (in ms).
*/
int sprd_mpm_create(unsigned int dst,
const char *name,
unsigned int later_idle);
/**
* sprd_mpm_init_resource_ops - int resource ops for mpm.
*
* @wait_resource, used to wait request resource ready.
* @request_resource, used to request a resource
* @release_resource, used to release a resource
*/
int sprd_mpm_init_resource_ops(unsigned int dst,
int (*wait_resource)(unsigned int dst,
int timeout),
int (*request_resource)(unsigned int dst),
int (*release_resource)(unsigned int dst));
/**
* sprd_mpm_destroy - destroy a modem powermanger source instacnce.
*
* @dst, which mpm (PSCP, SP, WCN, etc.) will be destroyed.
*/
int sprd_mpm_destroy(unsigned int dst);
/**
* sprd_pms_create - init a pms,
* a module which used it to request a modem power manage resource.
* All the pms interface are not safe in multi-thread or multi-cpu.
* if you want use in multi-thread, please use the pms_ext interface.
*
* @dst, the pms belong to which mpm.
* @name, the name of this pms.
* @pms, the point of this pms.
* @multitask: support multitask.
*
* Returns: NULL failed, > 0 succ.
*/
struct sprd_pms *sprd_pms_create(unsigned int dst,
const char *name, bool multitask);
/**
* sprd_pms_destroy - destroy a pms.
*
* @pms, the point of this pms.
*/
void sprd_pms_destroy(struct sprd_pms *pms);
/**
* sprd_pms_request_resource - request mpm resource
*
* @pms, the point of this pms.
* @timeout, in ms.
*
* Returns:
* 0 resource ready,
* < 0 resoure not ready,
* -%ERESTARTSYS if it was interrupted by a signal.
*/
int sprd_pms_request_resource(struct sprd_pms *pms, int timeout);
/**
* sprd_pms_release_resource - release mpm resource.
*
* @pms, the point of this pms.
*/
void sprd_pms_release_resource(struct sprd_pms *pms);
/**
* sprd_pms_request_wakelock - request wakelock
*
* @pms, the point of this pms.
*/
void sprd_pms_request_wakelock(struct sprd_pms *pms);
/**
* sprd_pms_release_wakelock - release wakelock
*
* @pms, the point of this pms.
*/
void sprd_pms_release_wakelock(struct sprd_pms *pms);
/**
* sprd_pms_request_wakelock_period -
* request wake lock, and will auto reaslse in msec ms.
*
* @pms, the point of this pms.
* @msec, will auto reaslse in msec ms
*/
void sprd_pms_request_wakelock_period(struct sprd_pms *pms, unsigned int msec);
/**
* sprd_pms_release_wakelock_later - release wakelock later.
*
* @pms, the point of this pms.
* @msec, later time (in ms).
*/
void sprd_pms_release_wakelock_later(struct sprd_pms *pms,
unsigned int msec);
/**
* sprd_pms_power_up - just powe up, not wait result.
*
* @pms, the point of this pms.
*/
void sprd_pms_power_up(struct sprd_pms *pms);
/**
* sprd_pms_power_up - just power down,.
*
* @pms, the point of this pms.
* @immediately, whether immediately power down.
*/
void sprd_pms_power_down(struct sprd_pms *pms, bool immediately);
#endif

View File

@@ -0,0 +1,99 @@
/**
* SPRD ep device driver in host side for Spreadtrum SoCs
*
* Copyright (C) 2019 Spreadtrum Co., Ltd.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 of
* the License as published by the Free Software Foundation.
*
* This program is used to control ep device driver in host side for
* Spreadtrum SoCs.
*/
#ifndef __SPRD_PCIE_EP_DEVICE_H
#define __SPRD_PCIE_EP_DEVICE_H
#include <linux/interrupt.h>
/* host receive msi irq */
enum {
PCIE_MSI_SIPC_IRQ = 0,
PCIE_MSI_REQUEST_RES,
PCIE_MSI_EP_READY_FOR_RESCAN,
PCIE_MSI_RELEASE_RES,
PCIE_MSI_SCANNED_RESPOND,
PCIE_MSI_REMOVE_RESPOND,
PCIE_MSI_IPA,
PCIE_MSI_MAX_IRQ
};
/* host send doorbell irq */
enum {
PCIE_DBELL_SIPC_IRQ = 0,
PCIE_DBEL_EP_SCANNED,
PCIE_DBEL_EP_REMOVING,
PCIE_DBEL_IRQ_MAX
};
enum {
PCIE_EP_MODEM = 0,
/* PCIE_EP_WCN, */
PCIE_EP_NR
};
enum {
PCIE_EP_PROBE = 0,
PCIE_EP_REMOVE,
PCIE_EP_PROBE_BEFORE_SPLIT_BAR
};
#ifdef CONFIG_SPRD_SIPA
enum {
PCIE_IPA_TYPE_MEM = 0,
PCIE_IPA_TYPE_REG
};
#endif
#define MINI_REGION_SIZE 0x10000 /*64 K default */
int sprd_ep_dev_register_notify(int ep,
void (*notify)(int event, void *data),
void *data);
int sprd_ep_dev_unregister_notify(int ep);
int sprd_ep_dev_register_irq_handler(int ep,
int irq,
irq_handler_t handler,
void *data);
int sprd_ep_dev_unregister_irq_handler(int ep, int irq);
int sprd_ep_dev_register_irq_handler_ex(int ep,
int from_irq,
int to_irq,
irq_handler_t handler,
void *data);
int sprd_ep_dev_unregister_irq_handler_ex(int ep,
int from_irq,
int to_irq);
int sprd_ep_dev_set_irq_addr(int ep, void __iomem *irq_addr);
int sprd_ep_dev_raise_irq(int ep, int irq);
int sprd_ep_dev_clear_doolbell_irq(int ep, int irq);
int sprd_ep_dev_set_backup(int ep);
int sprd_ep_dev_clear_backup(int ep);
void __iomem *sprd_ep_map_memory(int ep,
phys_addr_t cpu_addr,
size_t size);
void sprd_ep_unmap_memory(int ep, const void __iomem *bar_addr);
int sprd_ep_dev_pass_smem(int ep, u32 base, u32 size);
int sipa_module_init(struct device *dev);
void sipa_module_exit(void);
int sipa_eth_init(void);
void sipa_eth_exit(void);
int sipa_dummy_init(void);
void sipa_dummy_exit(void);
#ifdef CONFIG_SPRD_SIPA
phys_addr_t sprd_ep_ipa_map(int type, phys_addr_t target_addr, size_t size);
int sprd_ep_ipa_unmap(int type, phys_addr_t cpu_addr);
#endif
#endif

View File

@@ -0,0 +1,107 @@
/*
* Copyright (C) 2019 Spreadtrum Communications Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/* mpms: modem powermanger source */
#ifndef _SPRD_PCIE_RESOURCE_H
#define _SPRD_PCIE_RESOURCE_H
#ifdef CONFIG_SPRD_PCIE_EP_DEVICE
#include <linux/platform_device.h>
#endif
#if 0
//#undef pr_debug
//#define pr_debug pr_emerg
#undef pr_info
#define pr_info pr_emerg
#undef pr_err
#define pr_err pr_emerg
#undef dev_dbg
#define dev_dbg dev_emerg
#undef dev_info
#define dev_info dev_emerg
#undef dev_err
#define dev_err dev_emerg
#endif
#if defined(CONFIG_SPRD_PCIE_EP_DEVICE) || defined(CONFIG_PCIE_EPF_SPRD)
/*
* sprd_pcie_wait_resource
* Returns:
* 0 resource ready,
* < 0 resoure not ready,
* -%ERESTARTSYS if it was interrupted by a signal.
*/
int sprd_pcie_wait_resource(u32 dst, int timeout);
int sprd_pcie_request_resource(u32 dst);
int sprd_pcie_release_resource(u32 dst);
int sprd_pcie_resource_trash(u32 dst);
bool sprd_pcie_is_defective_chip(void);
#else
/* dummy functions */
static inline int sprd_pcie_wait_resource(u32 dst, int timeout) {return 0; }
static inline int sprd_pcie_request_resource(u32 dst) {return 0; }
static inline int sprd_pcie_release_resource(u32 dst) {return 0; }
static inline int sprd_pcie_resource_trash(u32 dst) {return 0; }
static inline bool sprd_pcie_is_defective_chip(void) {return false; }
#endif
#ifdef CONFIG_PCIE_EPF_SPRD
int sprd_pcie_resource_client_init(u32 dst, u32 ep_fun);
int sprd_register_pcie_resource_first_ready(u32 dst,
void (*notify)(void *p),
void *data);
#endif
#ifdef CONFIG_SPRD_PCIE_EP_DEVICE
int sprd_pcie_resource_host_init(u32 dst, u32 ep_dev,
struct platform_device *pcie_dev);
/*
* sprd_pcie_resource_reboot_ep
* reboot ep contains rescan ep device.
*/
void sprd_pcie_resource_reboot_ep(u32 dst);
/*
* sprd_pcie_wait_load_resource
* In case of the open the feature CONFIG_PCIE_SPRD_SPLIT_BAR,
* It has 2 times pcie scan action in host side boot process.
* After the first scan, the ep only have 2 bar can be used for
* memory map, the pcie resource is not completely ready,
* but the host can load images for ep, so we add the special api
* sprd_pcie_wait_load_resource, this api will return after
* the first scan action.
* Returns:
* 0 resource ready,
* < 0 resoure not ready,
* -%ERESTARTSYS if it was interrupted by a signal.
*/
int sprd_pcie_wait_load_resource(u32 dst);
/* Because the ep bar can only be split by ep itself,
* After all modem images be loaded, notify pcie resource
* can rescan ep now.
*/
void sprd_pcie_resource_notify_load_done(u32 dst);
#endif /* CONFIG_SPRD_PCIE_EP_DEVICE */
#endif /* _SPRD_PCIE_RESOURCE_H */

View File

@@ -0,0 +1,7 @@
config SPRD_MCD
tristate "SPRD modem power control module"
default n
help
mcd is a module for spreadtrum AP/CP communicaiton control driver,
it can control modem power on/off,triger modem event of assert,watchdog
reset,panic.

View File

@@ -0,0 +1 @@
obj-y += modem_ctrl.o

View File

@@ -0,0 +1,814 @@
/*
* Copyright (C) 2019 Spreadtrum Communications Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/of_device.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/cdev.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/kthread.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/uaccess.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/gpio/consumer.h>
#include <linux/reboot.h>
#ifdef CONFIG_PCIE_PM_NOTIFY
#include <linux/pcie_notifier.h>
#endif
#include "../include/sprd_pcie_resource.h"
#include "../include/sipc.h"
#include "../include/mdm_ctrl.h"
enum {
ROC1_SOC = 0,
ORCA_SOC
};
static char *const mdm_stat[] = {
"mdm_power_off", "mdm_power_on", "mdm_warm_reset", "mdm_cold_reset",
"mdm_watchdog_reset", "mdm_assert", "mdm_panic"
};
#define REBOOT_MODEM_DELAY 1000
#define POWERREST_MODEM_DELAY 2000
#define RESET_MODEM_DELAY 50
char cdev_name[] = "mdm_ctrl";
struct modem_ctrl_init_data {
char *name;
struct gpio_desc *gpio_poweron; /* Poweron */
struct gpio_desc *gpio_reset; /* Reset modem */
struct gpio_desc *gpio_preset; /* Pcie reset */
struct gpio_desc *gpio_cpwatchdog;
struct gpio_desc *gpio_cpassert;
struct gpio_desc *gpio_cppanic;
struct gpio_desc *gpio_cppoweroff;
u32 irq_cpwatchdog;
u32 irq_cpassert;
u32 irq_cppanic;
u32 irq_cppoweroff;
u32 modem_status;
bool enable_cp_event;
};
struct modem_ctrl_device {
struct modem_ctrl_init_data *init;
int major;
int minor;
struct cdev cdev;
struct device *dev;
int soc_type;
};
static struct class *modem_ctrl_class;
static struct modem_ctrl_device *mcd_dev;
/* modem control evnet notify */
static ATOMIC_NOTIFIER_HEAD(modem_ctrl_chain);
int modem_ctrl_register_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_register(&modem_ctrl_chain, nb);
}
EXPORT_SYMBOL(modem_ctrl_register_notifier);
void modem_ctrl_unregister_notifier(struct notifier_block *nb)
{
atomic_notifier_chain_unregister(&modem_ctrl_chain, nb);
}
EXPORT_SYMBOL(modem_ctrl_unregister_notifier);
static void send_event_msg(struct kobject *kobj)
{
char *msg[3];
char buff[100];
char mbuff[100];
memset(mbuff, 0, sizeof(mbuff));
if (!mcd_dev || !mcd_dev->init || !kobj)
return;
snprintf(buff, sizeof(buff), "MODEM_STAT=%d",
mcd_dev->init->modem_status);
snprintf(mbuff, sizeof(mbuff), "MODEM_EVENT=%s",
mdm_stat[mcd_dev->init->modem_status]);
msg[0] = buff;
msg[1] = mbuff;
msg[2] = NULL;
kobject_uevent_env(kobj, KOBJ_CHANGE, msg);
dev_dbg(mcd_dev->dev, "send uevent to userspace\n");
}
static irqreturn_t cpwatchdogtriger_handler(int irq, void *dev_id)
{
if (!mcd_dev || !mcd_dev->init || !mcd_dev->init->enable_cp_event)
return IRQ_NONE;
mcd_dev->init->modem_status = MDM_WATCHDOG_RESET;
atomic_notifier_call_chain(&modem_ctrl_chain, MDM_WATCHDOG_RESET, NULL);
send_event_msg(&mcd_dev->dev->kobj);
return IRQ_HANDLED;
}
static irqreturn_t cpasserttriger_handler(int irq, void *dev_id)
{
if (!mcd_dev || !mcd_dev->init || !mcd_dev->init->enable_cp_event)
return IRQ_NONE;
mcd_dev->init->modem_status = MDM_ASSERT;
atomic_notifier_call_chain(&modem_ctrl_chain, MDM_ASSERT, NULL);
send_event_msg(&mcd_dev->dev->kobj);
return IRQ_HANDLED;
}
static irqreturn_t cppanictriger_handler(int irq, void *dev_id)
{
if (!mcd_dev || !mcd_dev->init || !mcd_dev->init->enable_cp_event)
return IRQ_NONE;
mcd_dev->init->modem_status = MDM_PANIC;
atomic_notifier_call_chain(&modem_ctrl_chain, MDM_PANIC, NULL);
send_event_msg(&mcd_dev->dev->kobj);
return IRQ_HANDLED;
}
static irqreturn_t cppoweroff_handler(int irq, void *dev_id)
{
if (!mcd_dev || !mcd_dev->init)
return IRQ_NONE;
/* To this reserve here for receve power off event from AP*/
atomic_notifier_call_chain(&modem_ctrl_chain,
MDM_POWER_OFF, NULL);
kernel_power_off();
return IRQ_HANDLED;
}
static int request_gpio_to_irq(struct gpio_desc *cp_gpio,
struct modem_ctrl_device *mcd_dev)
{
int ret = 0;
if (!mcd_dev || !mcd_dev->init)
return -EINVAL;
ret = gpiod_to_irq(cp_gpio);
if (ret < 0) {
dev_err(mcd_dev->dev, "requset irq %d failed\n", ret);
return ret;
}
dev_dbg(mcd_dev->dev, "gpio to irq %d\n", ret);
if (cp_gpio == mcd_dev->init->gpio_cpwatchdog) {
mcd_dev->init->irq_cpwatchdog = ret;
ret = devm_request_threaded_irq(mcd_dev->dev,
mcd_dev->init->irq_cpwatchdog,
NULL, cpwatchdogtriger_handler,
IRQF_ONESHOT | IRQF_TRIGGER_FALLING,
"cpwatchdog_irq", mcd_dev);
if (ret < 0) {
dev_err(mcd_dev->dev, "can not request irq for cp watchdog\n");
return ret;
}
enable_irq_wake(mcd_dev->init->irq_cpwatchdog);
} else if (cp_gpio == mcd_dev->init->gpio_cpassert) {
mcd_dev->init->irq_cpassert = ret;
ret = devm_request_threaded_irq(mcd_dev->dev,
mcd_dev->init->irq_cpassert,
NULL, cpasserttriger_handler,
IRQF_ONESHOT | IRQF_TRIGGER_FALLING,
"cpassert_irq", mcd_dev);
if (ret < 0) {
dev_err(mcd_dev->dev, "can not request irq for cp assert\n");
return ret;
}
enable_irq_wake(mcd_dev->init->irq_cpassert);
} else if (cp_gpio == mcd_dev->init->gpio_cppanic) {
mcd_dev->init->irq_cppanic = ret;
ret = devm_request_threaded_irq(mcd_dev->dev,
mcd_dev->init->irq_cppanic,
NULL, cppanictriger_handler,
IRQF_ONESHOT | IRQF_TRIGGER_FALLING,
"cppanic_irq", mcd_dev);
if (ret < 0) {
dev_err(mcd_dev->dev,
"can not request irq for panic\n");
return ret;
}
enable_irq_wake(mcd_dev->init->irq_cppanic);
} else if (cp_gpio == mcd_dev->init->gpio_cppoweroff) {
mcd_dev->init->irq_cppoweroff = ret;
ret = devm_request_threaded_irq(mcd_dev->dev,
mcd_dev->init->irq_cppoweroff,
NULL, cppoweroff_handler,
IRQF_ONESHOT | IRQF_TRIGGER_LOW,
"cppoweroff_irq", mcd_dev);
if (ret < 0) {
dev_err(mcd_dev->dev,
"can not request irq for cppoweroff\n");
return ret;
}
enable_irq_wake(mcd_dev->init->irq_cppoweroff);
}
return 0;
}
static int modem_gpios_init(struct modem_ctrl_device *mcd_dev, int soc_type)
{
int ret;
if (!mcd_dev || !mcd_dev->init)
return -EINVAL;
if (soc_type == ROC1_SOC) {
gpiod_direction_input(mcd_dev->init->gpio_cpwatchdog);
gpiod_direction_input(mcd_dev->init->gpio_cpassert);
gpiod_direction_input(mcd_dev->init->gpio_cppanic);
ret = request_gpio_to_irq(mcd_dev->init->gpio_cpwatchdog,
mcd_dev);
if (ret)
return ret;
ret = request_gpio_to_irq(mcd_dev->init->gpio_cpassert,
mcd_dev);
if (ret)
return ret;
ret = request_gpio_to_irq(mcd_dev->init->gpio_cppanic,
mcd_dev);
if (ret)
return ret;
/* IRQF_TRIGGER_LOW, default must set to high */
gpiod_set_value_cansleep(mcd_dev->init->gpio_cppoweroff, 1);
} else {
gpiod_direction_input(mcd_dev->init->gpio_cppoweroff);
ret = request_gpio_to_irq(mcd_dev->init->gpio_cppoweroff,
mcd_dev);
if (ret)
return ret;
/* TRIGGER_FALLING, defaultmust set to high */
gpiod_set_value_cansleep(mcd_dev->init->gpio_cpwatchdog, 1);
gpiod_set_value_cansleep(mcd_dev->init->gpio_cpassert, 1);
gpiod_set_value_cansleep(mcd_dev->init->gpio_cppanic, 1);
}
return 0;
}
void modem_ctrl_enable_cp_event(void)
{
if (mcd_dev && mcd_dev->init)
mcd_dev->init->enable_cp_event = true;
}
EXPORT_SYMBOL_GPL(modem_ctrl_enable_cp_event);
void modem_ctrl_send_abnormal_to_ap(int status)
{
struct gpio_desc *gpiodesc;
if (!mcd_dev || !mcd_dev->init)
return;
if (mcd_dev->soc_type != ORCA_SOC) {
dev_err(mcd_dev->dev, "operation not be allowed for %d\n",
mcd_dev->soc_type);
return;
}
switch (status) {
case MDM_WATCHDOG_RESET:
gpiodesc = mcd_dev->init->gpio_cpwatchdog;
break;
case MDM_ASSERT:
gpiodesc = mcd_dev->init->gpio_cpassert;
break;
case MDM_PANIC:
gpiodesc = mcd_dev->init->gpio_cppanic;
break;
default:
dev_info(mcd_dev->dev,
"get status %d is not right for operation\n", status);
return;
}
mcd_dev->init->modem_status = status;
dev_info(mcd_dev->dev,
"operation unnormal status %d send to ap\n",
status);
if (!IS_ERR(gpiodesc))
gpiod_set_value_cansleep(gpiodesc, 0);
}
static void modem_ctrl_send_cmd_to_cp(int status)
{
struct gpio_desc *gpiodesc = NULL;
if (!mcd_dev || !mcd_dev->init)
return;
if (mcd_dev->soc_type != ROC1_SOC) {
dev_err(mcd_dev->dev, "operation not be allowed for %d\n",
mcd_dev->soc_type);
return;
}
if (status == MDM_POWER_OFF)
gpiodesc = mcd_dev->init->gpio_cppoweroff;
mcd_dev->init->modem_status = status;
dev_info(mcd_dev->dev,
"operation cmd %d ms send to cp\n",
status);
if (!IS_ERR(gpiodesc)) {
gpiod_set_value_cansleep(gpiodesc, 0);
msleep(20);
gpiod_set_value_cansleep(gpiodesc, 20);
}
}
static void modem_ctrl_notify_abnormal_status(int status)
{
if (!mcd_dev || !mcd_dev->init)
return;
if (mcd_dev->soc_type != ORCA_SOC) {
dev_err(mcd_dev->dev, "operation not be allowed for %d\n",
mcd_dev->soc_type);
return;
}
if (status < MDM_WATCHDOG_RESET || status > MDM_PANIC) {
dev_err(mcd_dev->dev,
"operation not be allowed for status %d\n", status);
return;
}
modem_ctrl_send_abnormal_to_ap(status);
}
void modem_ctrl_poweron_modem(int on)
{
if (!mcd_dev || !mcd_dev->init)
return;
switch (on) {
case MDM_CTRL_POWER_ON:
if (!IS_ERR(mcd_dev->init->gpio_poweron)) {
atomic_notifier_call_chain(&modem_ctrl_chain,
MDM_CTRL_POWER_ON, NULL);
dev_info(mcd_dev->dev, "set modem_poweron: %d\n", on);
gpiod_set_value_cansleep(mcd_dev->init->gpio_poweron,
1);
/* Base the spec modem boot flow that need to wait 1s */
msleep(REBOOT_MODEM_DELAY);
mcd_dev->init->modem_status = MDM_CTRL_POWER_ON;
gpiod_set_value_cansleep(mcd_dev->init->gpio_poweron,
0);
}
break;
case MDM_CTRL_POWER_OFF:
/*
*To do
*/
break;
case MDM_CTRL_SET_CFG:
/*
*To do
*/
break;
case MDM_CTRL_WARM_RESET:
if (!IS_ERR(mcd_dev->init->gpio_reset)) {
atomic_notifier_call_chain(&modem_ctrl_chain,
MDM_CTRL_WARM_RESET, NULL);
dev_dbg(mcd_dev->dev, "set warm reset: %d\n", on);
gpiod_set_value_cansleep(mcd_dev->init->gpio_reset, 1);
/* Base the spec modem that need to wait 50ms */
msleep(RESET_MODEM_DELAY);
mcd_dev->init->modem_status = MDM_CTRL_WARM_RESET;
gpiod_set_value_cansleep(mcd_dev->init->gpio_reset, 0);
}
break;
case MDM_CTRL_COLD_RESET:
if (!IS_ERR(mcd_dev->init->gpio_poweron)) {
mcd_dev->init->enable_cp_event = false;
atomic_notifier_call_chain(&modem_ctrl_chain,
MDM_CTRL_COLD_RESET, NULL);
dev_info(mcd_dev->dev, "modem_power reset: %d\n", on);
gpiod_set_value_cansleep(mcd_dev->init->gpio_poweron,
1);
/* Base the spec modem boot flow that need to wait 2s */
msleep(POWERREST_MODEM_DELAY);
mcd_dev->init->modem_status = MDM_CTRL_COLD_RESET;
gpiod_set_value_cansleep(mcd_dev->init->gpio_poweron,
0);
}
break;
case MDM_CTRL_PCIE_RECOVERY:
#ifdef CONFIG_PCIE_PM_NOTIFY
pcie_ep_pm_notify(PCIE_EP_POWER_OFF);
/* PCIE poweroff to poweron need 100ms*/
msleep(100);
pcie_ep_pm_notify(PCIE_EP_POWER_ON);
#endif
break;
case MDM_POWER_OFF:
atomic_notifier_call_chain(&modem_ctrl_chain,
MDM_POWER_OFF, NULL);
modem_ctrl_send_cmd_to_cp(MDM_POWER_OFF);
break;
default:
dev_err(mcd_dev->dev, "cmd not support: %d\n", on);
}
}
EXPORT_SYMBOL_GPL(modem_ctrl_poweron_modem);
#if defined(CONFIG_DEBUG_FS)
static int modem_ctrl_debug_show(struct seq_file *m, void *private)
{
dev_dbg(mcd_dev->dev, "%s\n", __func__);
return 0;
}
static int modem_ctrl_debug_open(struct inode *inode, struct file *file)
{
return single_open(file, modem_ctrl_debug_show, inode->i_private);
}
static const struct file_operations modem_ctrl_debug_fops = {
.open = modem_ctrl_debug_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
#endif /* CONFIG_DEBUG_FS */
static int modem_ctrl_open(struct inode *inode, struct file *filp)
{
struct modem_ctrl_device *modem_ctrl;
modem_ctrl = container_of(inode->i_cdev,
struct modem_ctrl_device, cdev);
filp->private_data = modem_ctrl;
dev_dbg(modem_ctrl->dev, "modem_ctrl: %s\n", __func__);
return 0;
}
static int modem_ctrl_release(struct inode *inode, struct file *filp)
{
struct modem_ctrl_device *modem_ctrl;
modem_ctrl = container_of(inode->i_cdev,
struct modem_ctrl_device, cdev);
dev_dbg(modem_ctrl->dev, "modem_ctrl: %s\n", __func__);
return 0;
}
static ssize_t modem_ctrl_read(struct file *filp,
char __user *buf,
size_t count,
loff_t *ppos)
{
char tmpbuf[30];
int r;
struct modem_ctrl_device *mcd_dev = filp->private_data;
if (!mcd_dev || !mcd_dev->init)
return -EINVAL;
r = snprintf(tmpbuf, sizeof(tmpbuf), "%s\n",
mdm_stat[mcd_dev->init->modem_status]);
return simple_read_from_buffer(buf, count, ppos, tmpbuf, r);
}
static ssize_t modem_ctrl_write(struct file *filp,
const char __user *buf,
size_t count, loff_t *ppos)
{
char sbuf[100];
int ret;
u32 mcd_cmd;
struct modem_ctrl_device *mcd_dev = filp->private_data;
if (!mcd_dev)
return -EINVAL;
if (unalign_copy_from_user((void *)sbuf, buf, count)) {
dev_err(mcd_dev->dev, "copy buf %s error\n", buf);
return -EFAULT;
}
dev_dbg(mcd_dev->dev, "get info:%s", sbuf);
sbuf[count - 1] = '\0';
ret = kstrtouint(sbuf, 10, &mcd_cmd);
if (ret) {
dev_err(mcd_dev->dev, "Invalid input!\n");
return ret;
}
if (mcd_dev->soc_type == ROC1_SOC) {
if (mcd_cmd >= MDM_CTRL_POWER_OFF &&
mcd_cmd <= MDM_CTRL_SET_CFG)
modem_ctrl_poweron_modem(mcd_cmd);
else
dev_info(mcd_dev->dev, "cmd not support!\n");
} else {
modem_ctrl_notify_abnormal_status(mcd_cmd);
}
return count;
}
static long modem_ctrl_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
if (!mcd_dev || mcd_dev->soc_type == ORCA_SOC)
return -EINVAL;
switch (cmd) {
case MDM_CTRL_POWER_OFF:
modem_ctrl_poweron_modem(MDM_CTRL_POWER_OFF);
break;
case MDM_CTRL_POWER_ON:
modem_ctrl_poweron_modem(MDM_CTRL_POWER_ON);
break;
case MDM_CTRL_WARM_RESET:
modem_ctrl_poweron_modem(MDM_CTRL_WARM_RESET);
break;
case MDM_CTRL_COLD_RESET:
modem_ctrl_poweron_modem(MDM_CTRL_COLD_RESET);
break;
case MDM_CTRL_PCIE_RECOVERY:
modem_ctrl_poweron_modem(MDM_CTRL_PCIE_RECOVERY);
break;
case MDM_CTRL_SET_CFG:
break;
default:
return -EINVAL;
}
return 0;
}
static const struct file_operations modem_ctrl_fops = {
.open = modem_ctrl_open,
.release = modem_ctrl_release,
.read = modem_ctrl_read,
.write = modem_ctrl_write,
.unlocked_ioctl = modem_ctrl_ioctl,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static int modem_ctrl_parse_modem_dt(struct modem_ctrl_init_data **init,
struct device *dev)
{
struct modem_ctrl_init_data *pdata = NULL;
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
pdata->name = cdev_name;
/* Triger watchdog,assert,panic of orca */
pdata->gpio_cpwatchdog = devm_gpiod_get(dev,
"cpwatchdog",
GPIOD_OUT_HIGH);
if (IS_ERR(pdata->gpio_cpwatchdog))
return PTR_ERR(pdata->gpio_cpwatchdog);
pdata->gpio_cpassert = devm_gpiod_get(dev, "cpassert", GPIOD_OUT_HIGH);
if (IS_ERR(pdata->gpio_cpassert))
return PTR_ERR(pdata->gpio_cpassert);
pdata->gpio_cppanic = devm_gpiod_get(dev, "cppanic", GPIOD_OUT_HIGH);
if (IS_ERR(pdata->gpio_cppanic))
return PTR_ERR(pdata->gpio_cppanic);
pdata->gpio_cppoweroff = devm_gpiod_get(dev, "cppoweroff", GPIOD_IN);
if (IS_ERR(pdata->gpio_cpassert))
return PTR_ERR(pdata->gpio_cppoweroff);
*init = pdata;
return 0;
}
static int modem_ctrl_parse_dt(struct modem_ctrl_init_data **init,
struct device *dev)
{
struct modem_ctrl_init_data *pdata;
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
pdata->name = cdev_name;
pdata->gpio_poweron = devm_gpiod_get(dev, "poweron", GPIOD_OUT_LOW);
if (IS_ERR(pdata->gpio_poweron))
return PTR_ERR(pdata->gpio_poweron);
pdata->gpio_reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(pdata->gpio_reset))
return PTR_ERR(pdata->gpio_reset);
/* Triger watchdog,assert,panic of orca */
pdata->gpio_cpwatchdog = devm_gpiod_get(dev, "cpwatchdog", GPIOD_IN);
if (IS_ERR(pdata->gpio_cpwatchdog))
return PTR_ERR(pdata->gpio_cpwatchdog);
pdata->gpio_cpassert = devm_gpiod_get(dev, "cpassert", GPIOD_IN);
if (IS_ERR(pdata->gpio_cpassert))
return PTR_ERR(pdata->gpio_cpassert);
pdata->gpio_cppanic = devm_gpiod_get(dev, "cppanic", GPIOD_IN);
if (IS_ERR(pdata->gpio_cppanic))
return PTR_ERR(pdata->gpio_cppanic);
pdata->gpio_cppoweroff = devm_gpiod_get(dev,
"cppoweroff", GPIOD_OUT_HIGH);
if (IS_ERR(pdata->gpio_cpassert))
return PTR_ERR(pdata->gpio_cppoweroff);
pdata->modem_status = MDM_CTRL_POWER_OFF;
*init = pdata;
return 0;
}
static inline void
modem_ctrl_destroy_pdata(struct modem_ctrl_init_data **init)
{
struct modem_ctrl_init_data *pdata = *init;
pdata = NULL;
}
static int modem_ctrl_restart_handle(struct notifier_block *this,
unsigned long mode, void *cmd)
{
if (!mcd_dev || mcd_dev->soc_type == ROC1_SOC)
return NOTIFY_DONE;
modem_ctrl_notify_abnormal_status(MDM_PANIC);
while (1)
;
return NOTIFY_DONE;
}
static struct notifier_block modem_ctrl_restart_handler = {
.notifier_call = modem_ctrl_restart_handle,
.priority = 150,
};
static int modem_ctrl_probe(struct platform_device *pdev)
{
struct modem_ctrl_init_data *init = pdev->dev.platform_data;
struct modem_ctrl_device *modem_ctrl_dev;
dev_t devid;
int rval;
struct device *dev = &pdev->dev;
modem_ctrl_dev = devm_kzalloc(dev, sizeof(*modem_ctrl_dev), GFP_KERNEL);
if (!modem_ctrl_dev)
return -ENOMEM;
mcd_dev = modem_ctrl_dev;
if (of_device_is_compatible(pdev->dev.of_node, "sprd,roc1-modem-ctrl"))
modem_ctrl_dev->soc_type = ROC1_SOC;
else
modem_ctrl_dev->soc_type = ORCA_SOC;
if (modem_ctrl_dev->soc_type == ROC1_SOC) {
rval = modem_ctrl_parse_dt(&init, &pdev->dev);
if (rval) {
dev_err(dev,
"Failed to parse modem_ctrl device tree, ret=%d\n",
rval);
return rval;
}
} else {
rval = modem_ctrl_parse_modem_dt(&init, &pdev->dev);
if (rval) {
dev_err(dev,
"Failed to parse modem_ctrl device tree, ret=%d\n",
rval);
return rval;
}
}
dev_dbg(dev, "after parse device tree, name=%s soctype=%d\n",
init->name,
modem_ctrl_dev->soc_type);
rval = alloc_chrdev_region(&devid, 0, 1, init->name);
if (rval != 0) {
dev_err(dev, "Failed to alloc modem_ctrl chrdev\n");
goto error3;
}
cdev_init(&modem_ctrl_dev->cdev, &modem_ctrl_fops);
rval = cdev_add(&modem_ctrl_dev->cdev, devid, 1);
if (rval != 0) {
dev_err(dev, "Failed to add modem_ctrl cdev\n");
goto error2;
}
modem_ctrl_dev->major = MAJOR(devid);
modem_ctrl_dev->minor = MINOR(devid);
modem_ctrl_dev->dev = device_create(modem_ctrl_class, NULL,
MKDEV(modem_ctrl_dev->major,
modem_ctrl_dev->minor),
NULL, "%s", init->name);
if (!modem_ctrl_dev->dev) {
dev_err(dev, "create dev failed\n");
rval = -ENODEV;
goto error1;
}
modem_ctrl_dev->init = init;
platform_set_drvdata(pdev, modem_ctrl_dev);
rval = modem_gpios_init(modem_ctrl_dev, modem_ctrl_dev->soc_type);
if (rval) {
dev_err(dev, "request gpios error\n");
goto error0;
}
rval = register_restart_handler(&modem_ctrl_restart_handler);
if (rval) {
dev_err(dev, "cannot register restart handler err=%d\n", rval);
goto error0;
}
return 0;
error0:
device_destroy(modem_ctrl_class,
MKDEV(modem_ctrl_dev->major,
modem_ctrl_dev->minor));
error1:
cdev_del(&modem_ctrl_dev->cdev);
error2:
unregister_chrdev_region(devid, 1);
error3:
modem_ctrl_destroy_pdata(&init);
return rval;
}
static int modem_ctrl_remove(struct platform_device *pdev)
{
struct modem_ctrl_device *modem_ctrl_dev = platform_get_drvdata(pdev);
unregister_reboot_notifier(&modem_ctrl_restart_handler);
device_destroy(modem_ctrl_class,
MKDEV(modem_ctrl_dev->major,
modem_ctrl_dev->minor));
cdev_del(&modem_ctrl_dev->cdev);
unregister_chrdev_region(MKDEV(modem_ctrl_dev->major,
modem_ctrl_dev->minor), 1);
modem_ctrl_destroy_pdata(&modem_ctrl_dev->init);
platform_set_drvdata(pdev, NULL);
return 0;
}
static void modem_ctrl_shutdown(struct platform_device *pdev)
{
if (mcd_dev->soc_type == ROC1_SOC) {
atomic_notifier_call_chain(&modem_ctrl_chain,
MDM_POWER_OFF, NULL);
/*
* sleep 50 ms for other module to do something
* before orca power down.
*/
msleep(50);
modem_ctrl_send_cmd_to_cp(MDM_POWER_OFF);
/* Sleep 500ms for cp to deal power down process otherwise
* cp will not power down clearly.
*/
msleep(500);
}
}
static const struct of_device_id modem_ctrl_match_table[] = {
{.compatible = "sprd,roc1-modem-ctrl", },
{.compatible = "sprd,orca-modem-ctrl", },
};
static struct platform_driver modem_ctrl_driver = {
.driver = {
.name = "modem_ctrl",
.of_match_table = modem_ctrl_match_table,
},
.probe = modem_ctrl_probe,
.remove = modem_ctrl_remove,
.shutdown = modem_ctrl_shutdown,
};
int modem_ctrl_init(void)
{
modem_ctrl_class = class_create(THIS_MODULE, "modem_ctrl");
if (IS_ERR(modem_ctrl_class))
return PTR_ERR(modem_ctrl_class);
return platform_driver_register(&modem_ctrl_driver);
}
EXPORT_SYMBOL_GPL(modem_ctrl_init);
void modem_ctrl_exit(void)
{
class_destroy(modem_ctrl_class);
platform_driver_unregister(&modem_ctrl_driver);
}
EXPORT_SYMBOL_GPL(modem_ctrl_exit);

View File

@@ -0,0 +1,7 @@
config SPRD_PCIE_EP_DEVICE
tristate "SPRD PCIE EP device"
default n
depends on PCI
help
SPRD pcie ep device driver in host side for Spreadtrum.

View File

@@ -0,0 +1,6 @@
ccflags-y += -DCONFIG_SPRD_PCIE_EP_DEVICE -DCONFIG_SPRD_SIPA
obj-y += sprd_pcie_ep_device.o
obj-y += pcie_host_resource.o
obj-y += sprd_pcie_quirks.o
obj-$(CONFIG_PCIE_EPF_SPRD) += pcie_client_resource.o
obj-$(CONFIG_SPRD_SIPA_RES) += pcie_sipa_res.o

View File

@@ -0,0 +1,528 @@
/*
* Copyright (C) 2019 Spreadtrum Communications Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/mdm_ctrl.h>
#include <linux/pcie-epf-sprd.h>
#include <linux/sched.h>
#include <linux/sipc.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/wait.h>
#include "../include/sprd_pcie_resource.h"
#ifdef CONFIG_SPRD_SIPA_RES
#include "pcie_sipa_res.h"
#endif
enum ep_msg {
RC_SCANNED_MSG = 0,
RC_REMOVING_MSG,
EPC_UNLINK_MSG,
EPC_LINKUP_MSG
};
enum pcie_ep_state {
SPRD_PCIE_WAIT_FIRST_READY = 0,
SPRD_PCIE_WAIT_SCANNED,
SPRD_PCIE_SCANNED,
SPRD_PCIE_WAIT_REMOVED,
SPRD_PCIE_REMOVED,
SPRD_PCIE_WAIT_POWER_OFF
};
struct sprd_pci_res_notify {
void (*notify)(void *p);
void *data;
};
struct sprd_pcie_res {
u32 dst;
u32 ep_fun;
enum pcie_ep_state state;
bool msi_later;
bool wakeup_later;
#ifdef CONFIG_SPRD_SIPA_RES
void *sipa_res;
#endif
/*
* in client(Orca), The PCIE module wll blocks the chip Deep,
* so we must get a wake lock when pcie work to avoid this situation:
* the system is deep, but the PCIE is still working.
*/
struct wakeup_source ws;
wait_queue_head_t wait_pcie_ready;
struct sprd_pci_res_notify first_ready_notify;
};
static struct sprd_pcie_res *g_pcie_res[SIPC_ID_NR];
/* the state machine of ep, init SPRD_PCIE_WAIT_FIRST_READY.
* SPRD_PCIE_WAIT_FIRST_READY (receive RC scanned) ==> SPRD_PCIE_SCANNED
* SPRD_PCIE_SCANNED (receive RC removing)==> SPRD_PCIE_WAIT_REMOVED
* SPRD_PCIE_WAIT_REMOVED(receive epc unlink)==>SPRD_PCIE_REMOVED
* SPRD_PCIE_REMOVED(receive epc linkup)==>SPRD_PCIE_WAIT_SCANNED
* SPRD_PCIE_WAIT_SCANNED(receive RC scanned)==>SPRD_PCIE_SCANNED
* SPRD_PCIE_WAIT_POWER_OFF can do nothing, just wait shutdown.
*/
static const char *change_msg[EPC_LINKUP_MSG + 1] = {
"rc scanned",
"rc removing",
"epc unlink",
"epc linkup"
};
static const char *state_msg[SPRD_PCIE_REMOVED + 1] = {
"wait first ready",
"wait sacanned",
"scanned",
"wait remove",
"removed"
};
static void pcie_resource_client_change_state(struct sprd_pcie_res *res,
enum ep_msg msg)
{
u32 old_state = res->state;
if (old_state == SPRD_PCIE_WAIT_POWER_OFF)
return;
pr_debug("pcie res: change state msg=%s, old_state=%s.\n",
change_msg[msg], state_msg[old_state]);
switch (msg) {
case RC_SCANNED_MSG:
if (old_state != SPRD_PCIE_WAIT_FIRST_READY
&& old_state != SPRD_PCIE_WAIT_SCANNED) {
pr_err("pcie res: %s msg err, old state=%s",
change_msg[msg], state_msg[old_state]);
return;
}
res->state = SPRD_PCIE_SCANNED;
break;
case RC_REMOVING_MSG:
if (old_state != SPRD_PCIE_SCANNED) {
pr_err("pcie res: %s msg err, old state=%s",
change_msg[msg], state_msg[old_state]);
return;
}
res->state = SPRD_PCIE_WAIT_REMOVED;
break;
case EPC_UNLINK_MSG:
if (old_state != SPRD_PCIE_WAIT_REMOVED) {
if (old_state != SPRD_PCIE_WAIT_FIRST_READY)
pr_err("pcie res: %s msg err, old state=%s",
change_msg[msg], state_msg[old_state]);
return;
}
res->state = SPRD_PCIE_REMOVED;
break;
case EPC_LINKUP_MSG:
if (old_state != SPRD_PCIE_REMOVED) {
if (old_state != SPRD_PCIE_WAIT_FIRST_READY)
pr_err("pcie res: %s msg err, old state=%s",
change_msg[msg], state_msg[old_state]);
return;
}
res->state = SPRD_PCIE_WAIT_SCANNED;
break;
}
pr_info("pcie res: change state from %s to %s.\n",
state_msg[old_state], state_msg[res->state]);
}
static void sprd_pcie_resource_first_ready_notify(struct sprd_pcie_res *res)
{
void (*notify)(void *p);
pr_info("pcie res: first ready.\n");
#ifdef CONFIG_SPRD_SIPA_RES
/*
* in client side, producer res id is SIPA_RM_RES_PROD_PCIE_EP,
* consumer res id is SIPA_RM_RES_CONS_WWAN_DL.
*/
res->sipa_res = pcie_sipa_res_create(res->dst,
SIPA_RM_RES_PROD_PCIE_EP,
SIPA_RM_RES_CONS_WWAN_DL);
if (!res->sipa_res)
pr_err("pcie res:create ipa res failed.\n");
#endif
notify = res->first_ready_notify.notify;
if (notify)
notify(res->first_ready_notify.data);
}
static void pcie_resource_client_epf_notify(int event, void *private)
{
struct sprd_pcie_res *res = (struct sprd_pcie_res *)private;
if (res->state == SPRD_PCIE_WAIT_POWER_OFF)
return;
switch (event) {
case SPRD_EPF_BIND:
pr_info("pcie res: epf be binded.\n");
if (sprd_pcie_is_defective_chip())
sprd_pci_epf_raise_irq(res->ep_fun,
PCIE_MSI_EP_READY_FOR_RESCAN);
break;
case SPRD_EPF_UNBIND:
pr_info("pcie res: epf be unbinded.\n");
break;
case SPRD_EPF_REMOVE:
pr_info("pcie res: epf be removed.\n");
break;
case SPRD_EPF_LINK_UP:
/* get a wakelock */
__pm_stay_awake(&res->ws);
pr_info("pcie res: epf linkup.\n");
pcie_resource_client_change_state(res, EPC_LINKUP_MSG);
/* first ready notify */
if (res->state == SPRD_PCIE_WAIT_FIRST_READY)
sprd_pcie_resource_first_ready_notify(res);
break;
case SPRD_EPF_UNLINK:
/* Here need this log to debug pcie scan and remove */
pr_info("pcie res: epf unlink.\n");
pcie_resource_client_change_state(res, EPC_UNLINK_MSG);
/* if has wakeup pending, send wakeup to rc */
if (res->wakeup_later) {
res->wakeup_later = false;
pr_info("pcie res: send wakeup to rc.\n");
if (sprd_pci_epf_start(res->ep_fun))
pr_err("pcie res: send wakeup to rc failed.\n");
}
/* relax a wakelock */
__pm_relax(&res->ws);
break;
default:
break;
}
}
static irqreturn_t pcie_resource_client_irq_handler(int irq, void *private)
{
struct sprd_pcie_res *res = (struct sprd_pcie_res *)private;
if (res->state == SPRD_PCIE_WAIT_POWER_OFF)
return IRQ_HANDLED;
if (irq == PCIE_DBEL_EP_SCANNED) {
pcie_resource_client_change_state(res, RC_SCANNED_MSG);
/* wakeup all blocked thread */
pr_info("pcie res: scanned, wakup all.\n");
wake_up_interruptible_all(&res->wait_pcie_ready);
/* if has msi pending, send msi to rc */
if (res->msi_later) {
res->msi_later = false;
pr_info("pcie res: request msi to rc.\n");
sprd_pci_epf_raise_irq(res->ep_fun,
PCIE_MSI_REQUEST_RES);
}
} else if (irq == PCIE_DBEL_EP_REMOVING) {
pr_info("pcie res: removing.\n");
pcie_resource_client_change_state(res, RC_REMOVING_MSG);
}
return IRQ_HANDLED;
}
static int sprd_pcie_resource_client_mcd(struct notifier_block *nb,
unsigned long mode, void *cmd)
{
struct sprd_pcie_res *res;
int i;
pr_info("pcie res: mcd event mode=%ld.\n", mode);
if (mode != MDM_POWER_OFF)
return NOTIFY_DONE;
for (i = 0; i < SIPC_ID_NR; i++) {
res = g_pcie_res[i];
if (res)
res->state = SPRD_PCIE_WAIT_POWER_OFF;
}
return NOTIFY_DONE;
}
static struct notifier_block mcd_notify = {
.notifier_call = sprd_pcie_resource_client_mcd,
.priority = 149,
};
int sprd_pcie_resource_client_init(u32 dst, u32 ep_fun)
{
struct sprd_pcie_res *res;
if (dst >= SIPC_ID_NR)
return -EINVAL;
res = kzalloc(sizeof(*res), GFP_KERNEL);
if (!res)
return -ENOMEM;
res->dst = dst;
res->state = SPRD_PCIE_WAIT_FIRST_READY;
res->ep_fun = ep_fun;
wakeup_source_init(&res->ws, "pcie_res");
init_waitqueue_head(&res->wait_pcie_ready);
sprd_pci_epf_register_irq_handler_ex(res->ep_fun,
PCIE_DBEL_EP_SCANNED,
PCIE_DBEL_EP_REMOVING,
pcie_resource_client_irq_handler,
res);
sprd_pci_epf_register_notify(res->ep_fun,
pcie_resource_client_epf_notify,
res);
modem_ctrl_register_notifier(&mcd_notify);
g_pcie_res[dst] = res;
return 0;
}
int sprd_pcie_resource_trash(u32 dst)
{
struct sprd_pcie_res *res;
if (dst >= SIPC_ID_NR || !g_pcie_res[dst])
return -EINVAL;
res = g_pcie_res[dst];
#ifdef CONFIG_SPRD_SIPA_RES
if (res->sipa_res)
pcie_sipa_res_destroy(res->sipa_res);
#endif
sprd_pci_epf_unregister_irq_handler_ex(res->ep_fun,
PCIE_DBEL_EP_SCANNED,
PCIE_DBEL_EP_REMOVING);
sprd_pci_epf_unregister_notify(res->ep_fun);
modem_ctrl_unregister_notifier(&mcd_notify);
kfree(res);
g_pcie_res[dst] = NULL;
return 0;
}
int sprd_pcie_wait_resource(u32 dst, int timeout)
{
struct sprd_pcie_res *res;
int ret, wait;
unsigned long delay;
if (dst >= SIPC_ID_NR || !g_pcie_res[dst])
return -EINVAL;
res = g_pcie_res[dst];
/* pcie ready, return succ immediately */
if (res->state == SPRD_PCIE_SCANNED)
return 0;
if (timeout == 0)
return -ETIME;
if (timeout < 0) {
wait = wait_event_interruptible(
res->wait_pcie_ready,
res->state == SPRD_PCIE_SCANNED
);
ret = wait;
} else {
/*
* timeout must add 1s,
* because the pcie rescan may took some time.
*/
delay = msecs_to_jiffies(timeout + 1000);
wait = wait_event_interruptible_timeout(res->wait_pcie_ready,
res->state ==
SPRD_PCIE_SCANNED,
delay);
if (wait == 0)
ret = -ETIME;
else if (wait > 0)
ret = 0;
else
ret = wait;
}
if (ret < 0 && ret != -ERESTARTSYS)
pr_err("pcie res: wait resource, val=%d.\n", ret);
return ret;
}
int sprd_pcie_request_resource(u32 dst)
{
struct sprd_pcie_res *res;
int ret = 0;
if (dst >= SIPC_ID_NR || !g_pcie_res[dst])
return -EINVAL;
res = g_pcie_res[dst];
if (res->state == SPRD_PCIE_WAIT_POWER_OFF)
return -EINVAL;
pr_debug("pcie res: request res, state=%d.\n", res->state);
switch (res->state) {
case SPRD_PCIE_WAIT_FIRST_READY:
case SPRD_PCIE_WAIT_SCANNED:
pr_info("pcie res: later send request msi to rc.\n");
res->msi_later = true;
break;
case SPRD_PCIE_WAIT_REMOVED:
pr_info("pcie res: later send wakeup to rc.\n");
res->wakeup_later = true;
break;
case SPRD_PCIE_SCANNED:
/*
* if pcie state is SCANNED, just send
* PCIE_MSI_REQUEST_RES to the host.
* After host receive res msi interrupt,
* it will increase one vote in modem power manger.
*/
pr_info("pcie res: send request msi to rc.\n");
ret = sprd_pci_epf_raise_irq(res->ep_fun,
PCIE_MSI_REQUEST_RES);
break;
case SPRD_PCIE_REMOVED:
/*
* if pcie state is removed, poll wake_up singnal
* to host, and he host will rescan the pcie.
*/
pr_info("pcie res: send wakeup to rc.\n");
if (sprd_pci_epf_start(res->ep_fun) == 0)
break;
/* may receive ep reset, wait linkup and scanned */
pr_info("pcie res: later send request msi to rc.\n");
res->msi_later = true;
break;
default:
pr_err("pcie res: request res err, state=%d.\n",
res->state);
ret = -EPERM;
break;
}
return ret;
}
int sprd_pcie_release_resource(u32 dst)
{
struct sprd_pcie_res *res;
int ret = 0;
if (dst >= SIPC_ID_NR || !g_pcie_res[dst])
return -EINVAL;
res = g_pcie_res[dst];
if (res->state == SPRD_PCIE_WAIT_POWER_OFF)
return -EINVAL;
switch (res->state) {
case SPRD_PCIE_SCANNED:
/*
* if pcie state is SCANNED, send PCIE_MSI_RELEASE_RES
* to the host, else, do nothing. After host receive res msi
* interrupt, it will decrease one vote in modem power manger,
* and if modem power manger is idle, the host will remove
* the pcie.
*/
pr_info("pcie res: send release msi to rc.\n");
ret = sprd_pci_epf_raise_irq(res->ep_fun,
PCIE_MSI_RELEASE_RES);
break;
case SPRD_PCIE_WAIT_FIRST_READY:
/* if has msi pending, remove it */
if (res->msi_later)
res->msi_later = false;
break;
default:
pr_err("pcie res: release res state=%d.\n", res->state);
ret = -EPERM;
break;
}
return ret;
}
int sprd_register_pcie_resource_first_ready(u32 dst,
void (*notify)(void *p), void *data)
{
struct sprd_pcie_res *res;
if (dst >= SIPC_ID_NR || !g_pcie_res[dst])
return -EINVAL;
res = g_pcie_res[dst];
res->first_ready_notify.data = data;
res->first_ready_notify.notify = notify;
return 0;
}
bool sprd_pcie_is_defective_chip(void)
{
static bool first_read = true, defective;
if (first_read) {
first_read = false;
defective = sprd_kproperty_chipid("UD710-AB") == 0;
}
return defective;
}

View File

@@ -0,0 +1,720 @@
/*
* Copyright (C) 2019 Spreadtrum Communications Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/pci.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
#include <linux/version.h>
#ifdef CONFIG_SPRD_SIPA_RES
#include "pcie_sipa_res.h"
#endif
#include "../include/pcie-rc-sprd.h"
#include "../include/sipc.h"
//#include "../include/mdm_ctrl.h"
#include "../include/sprd_pcie_ep_device.h"
#include "../include/sprd_mpm.h"
#include "../include/sprd_pcie_resource.h"
#define PCIE_REMOVE_SCAN_GAP msecs_to_jiffies(200)
#define MAX_PMS_WAIT_TIME 5000
#define MAX_PMS_DEFECTIVE_CHIP_FIRST_WAIT_TIME (55 * 1000)
enum rc_state {
SPRD_PCIE_WAIT_FIRST_READY = 0,
SPRD_PCIE_WAIT_SCANNED,
SPRD_PCIE_SCANNED,
SPRD_PCIE_WAIT_REMOVED,
SPRD_PCIE_REMOVED,
SPRD_PCIE_SCANNED_2BAR,
SPRD_PCIE_WAIT_POWER_OFF
};
struct sprd_pcie_res {
u32 dst;
u32 ep_dev;
u32 state;
u32 scan_cnt;
u32 max_wait_time;
bool ep_power_on;
bool ep_dev_probe;
bool smem_send_to_ep;
unsigned long action_jiff;
struct sprd_pms *pms;
char pms_name[20];
wait_queue_head_t wait_pcie_ready;
bool ep_ready_for_rescan;
wait_queue_head_t wait_load_ready;
wait_queue_head_t wait_first_rescan;
struct task_struct *thread;
#ifdef CONFIG_SPRD_SIPA_RES
void *sipa_res;
#endif
#if (LINUX_VERSION_CODE > KERNEL_VERSION( 4,18,0 ))
struct wakeup_source *ws;
#else
struct wakeup_source ws;
#endif
struct work_struct scan_work;
struct work_struct remove_work;
struct workqueue_struct *wq;
struct platform_device *pcie_dev;
struct sprd_pcie_register_event reg_event;
};
static int sprd_pcie_resource_rescan(struct sprd_pcie_res *res);
static struct sprd_pcie_res *g_pcie_res[SIPC_ID_NR];
static void sprd_pcie_resource_host_first_rescan_do(struct sprd_pcie_res *res)
{
int ret = sprd_pcie_register_event(&res->reg_event);
if (ret)
pr_err("pcie res: register pci ret=%d.\n", ret);
/* power up for ep after the first scan. */
res->ep_power_on = true;
sprd_pms_power_up(res->pms);
#ifdef CONFIG_SPRD_SIPA_RES
/*
* in host side, producer res id is SIPA_RM_RES_PROD_PCIE3,
* consumer res id is SIPA_RM_RES_CONS_WWAN_UL.
*/
res->sipa_res = pcie_sipa_res_create(res->dst,
SIPA_RM_RES_PROD_PCIE3,
SIPA_RM_RES_CONS_WWAN_UL);
if (!res->sipa_res)
pr_err("pcie res:create ipa res failed.\n");
#endif
}
static void sprd_pcie_resource_host_ep_notify(int event, void *data)
{
struct sprd_pcie_res *res = (struct sprd_pcie_res *)data;
u32 base, size;
/* wait power off, do nothing */
if (res->state == SPRD_PCIE_WAIT_POWER_OFF)
return;
switch (event) {
case PCIE_EP_PROBE:
/* set state to scanned */
res->state = SPRD_PCIE_SCANNED;
res->scan_cnt++;
res->ep_dev_probe = true;
//modem_ctrl_enable_cp_event();
if (smem_get_area(SIPC_ID_MINIAP, &base, &size) == 0)
sprd_ep_dev_pass_smem(res->ep_dev, base, size);
pr_info("pcie res: ep_notify, probed cnt=%d.\n",
res->scan_cnt);
/* firsrt scan do somtehing */
if (res->scan_cnt == 1)
sprd_pcie_resource_host_first_rescan_do(res);
/* clear removed irq and notify ep scanned */
sprd_ep_dev_clear_doolbell_irq(res->ep_dev,
PCIE_DBEL_EP_REMOVING);
sprd_ep_dev_raise_irq(res->ep_dev, PCIE_DBEL_EP_SCANNED);
/* wakeup all blocked thread */
wake_up_interruptible_all(&res->wait_pcie_ready);
break;
case PCIE_EP_REMOVE:
pr_info("pcie res: ep_notify, removed.\n");
res->state = SPRD_PCIE_REMOVED;
res->ep_dev_probe = false;
break;
case PCIE_EP_PROBE_BEFORE_SPLIT_BAR:
res->state = SPRD_PCIE_SCANNED_2BAR;
res->ep_dev_probe = true;
pr_info("pcie res: probed before split bar.\n");
if (!res->ep_ready_for_rescan) {
wake_up_interruptible_all(&res->wait_load_ready);
} else {
pr_info("pcie res: bar err, rescan.\n");
sprd_pcie_resource_rescan(res);
}
break;
default:
break;
}
}
static irqreturn_t sprd_pcie_resource_host_irq_handler(int irq, void *private)
{
struct sprd_pcie_res *res = (struct sprd_pcie_res *)private;
if (irq == PCIE_MSI_REQUEST_RES) {
pr_info("pcie res: ep request res.\n");
/*
* client modem power up,
* no need wake lock and no need wait resource.
*/
if (!res->ep_power_on) {
res->ep_power_on = true;
sprd_pms_power_up(res->pms);
}
/* only after received ep request can backup the ep configs. */
sprd_ep_dev_set_backup(res->ep_dev);
} else if (irq == PCIE_MSI_RELEASE_RES) {
pr_info("pcie res: ep release res.\n");
/*
* client modem power down,
* no need wake lock.
*/
if (res->ep_power_on) {
res->ep_power_on = false;
sprd_pms_power_down(res->pms, false);
}
} else if (irq == PCIE_MSI_EP_READY_FOR_RESCAN) {
pr_info("pcie res: ep ready for rescan.\n");
res->ep_ready_for_rescan = true;
wake_up_interruptible_all(&res->wait_first_rescan);
}
return IRQ_HANDLED;
}
static void sprd_pcie_resource_scan_fn(struct work_struct *work)
{
unsigned long diff;
unsigned int delay;
int ret;
struct sprd_pcie_res *res = container_of(work, struct sprd_pcie_res,
scan_work);
/* wait power off, do nothing */
if (res->state == SPRD_PCIE_WAIT_POWER_OFF)
return;
/* request wakelock */
sprd_pms_request_wakelock(res->pms);
diff = jiffies - res->action_jiff;
if (diff < PCIE_REMOVE_SCAN_GAP) {
/* must ensure that the scan starts after a period of remove. */
delay = jiffies_to_msecs(PCIE_REMOVE_SCAN_GAP - diff);
msleep(delay);
}
pr_info("pcie res: scan\n");
ret = sprd_pcie_configure_device(res->pcie_dev);
if (ret)
pr_err("pcie res: scan error = %d!\n", ret);
/* record the last scan jiffies */
res->action_jiff = jiffies;
/* release wakelock */
sprd_pms_release_wakelock(res->pms);
}
static void sprd_pcie_resource_remove_fn(struct work_struct *work)
{
unsigned long diff;
unsigned int delay;
int ret;
struct sprd_pcie_res *res = container_of(work, struct sprd_pcie_res,
remove_work);
/* request wakelock */
sprd_pms_request_wakelock(res->pms);
pr_info("pcie res: remove work!\n");
diff = jiffies - res->action_jiff;
if (diff < PCIE_REMOVE_SCAN_GAP) {
/* must ensure that the remove starts after a period of scan. */
delay = jiffies_to_msecs(PCIE_REMOVE_SCAN_GAP - diff);
msleep(delay);
}
/*
* in wait power off state, or ep device is not probing,
* can't access ep.
*/
if (res->state == SPRD_PCIE_WAIT_POWER_OFF ||
!res->ep_dev_probe) {
/* release wakelock */
sprd_pms_release_wakelock(res->pms);
return;
}
/* notify ep removed, must before removed */
sprd_ep_dev_clear_doolbell_irq(res->ep_dev, PCIE_DBEL_EP_SCANNED);
sprd_ep_dev_raise_irq(res->ep_dev, PCIE_DBEL_EP_REMOVING);
/* waiting for the doorbell irq to ep */
msleep(50);
pr_info("pcie res: remove\n");
/* start removed ep*/
ret = sprd_pcie_unconfigure_device(res->pcie_dev);
if (ret)
pr_err("pcie res: remove error = %d.\n!", ret);
/* record the last remov jiffies */
res->action_jiff = jiffies;
/* release wakelock */
sprd_pms_release_wakelock(res->pms);
}
static void sprd_pcie_resource_start_scan(struct sprd_pcie_res *res)
{
if (res->state == SPRD_PCIE_SCANNED ||
res->state == SPRD_PCIE_WAIT_SCANNED) {
pr_info("pcie res: scanned, do nothing!\n");
} else {
pr_info("pcie res: start scan!\n");
queue_work(res->wq, &res->scan_work);
}
}
static void sprd_pcie_resource_start_remove(struct sprd_pcie_res *res)
{
/* wait power off, do nothing */
if (res->state == SPRD_PCIE_WAIT_POWER_OFF)
return;
if (res->state == SPRD_PCIE_SCANNED ||
res->state == SPRD_PCIE_WAIT_FIRST_READY
|| (res->state == SPRD_PCIE_SCANNED_2BAR)
) {
res->state = SPRD_PCIE_WAIT_REMOVED;
pr_info("pcie res: start remove.");
queue_work(res->wq, &res->remove_work);
} else {
pr_err("pcie res: start remove, err=%d.", res->state);
}
}
static void sprd_pcie_resource_event_process(enum sprd_pcie_event event,
void *data)
{
struct sprd_pcie_res *res = data;
if (event == SPRD_PCIE_EVENT_WAKEUP) {
pr_info("pcie res: wakeup by ep, event=%d.\n", event);
if (!res->ep_power_on) {
res->ep_power_on = true;
sprd_pms_power_up(res->pms);
}
}
}
/*
* sprd_pcie_resource_rescan
* Because the ep bar can only be split by ep itself,
* After all modem images be loaded at the first time,
* the ep will run and split 2 64bit bar to 4 32bit bar.
* host must rescan the pcie ep device agian by this api,
* after receive ep driver ready for rescan msg and all
* modem images load done.
*/
static int sprd_pcie_resource_rescan(struct sprd_pcie_res *res)
{
pr_info("pcie res: rescan.\n");
sprd_pcie_resource_start_remove(res);
sprd_pcie_resource_start_scan(res);
return 0;
}
static int sprd_pcie_resource_check_first_rescan(void *data)
{
struct sprd_pcie_res *res = data;
int ret;
pr_info("pcie res: check first rescan.\n");
while (!kthread_should_stop()) {
ret = wait_event_interruptible(
res->wait_first_rescan,
res->ep_ready_for_rescan);
if (!ret) {
pr_info("pcie res:first resacn ready.\n");
sprd_pcie_resource_rescan(res);
break;
}
}
/* After the first rescan, restore the normal wait time. */
if (sprd_pcie_is_defective_chip())
res->max_wait_time = MAX_PMS_WAIT_TIME;
res->thread = NULL;
return 0;
}
#if 0
static int sprd_pcie_resource_host_mcd(struct notifier_block *nb,
unsigned long mode, void *cmd)
{
struct sprd_pcie_res *res;
int i;
u32 state;
pr_info("pcie res: mcd mode=%ld.\n", mode);
switch (mode) {
case MDM_POWER_OFF:
state = SPRD_PCIE_WAIT_POWER_OFF;
break;
default:
return NOTIFY_DONE;
}
for (i = 0; i < SIPC_ID_NR; i++) {
res = g_pcie_res[i];
/* wait power off, do nothing */
if (res->state == SPRD_PCIE_WAIT_POWER_OFF)
continue;
if (res) {
res->state = state;
cancel_work_sync(&res->scan_work);
cancel_work_sync(&res->remove_work);
}
}
return NOTIFY_DONE;
}
static struct notifier_block mcd_notify = {
.notifier_call = sprd_pcie_resource_host_mcd,
.priority = 149,
};
#endif
/* Because the ep bar can only be split by ep itself,
* After all modem images be loaded, notify the pcie resource.
*/
void sprd_pcie_resource_notify_load_done(u32 dst)
{
struct sprd_pcie_res *res;
pr_info("pcie res: load done.\n");
if (dst >= SIPC_ID_NR || !g_pcie_res[dst])
return;
res = g_pcie_res[dst];
res->thread = kthread_create(sprd_pcie_resource_check_first_rescan, res,
"first rescan");
if (IS_ERR(res->thread))
pr_err("pcie res: Failed to create rescan thread.\n");
else
wake_up_process(res->thread);
}
int sprd_pcie_wait_load_resource(u32 dst)
{
struct sprd_pcie_res *res;
if (dst >= SIPC_ID_NR || !g_pcie_res[dst])
return -EINVAL;
res = g_pcie_res[dst];
/* can load image, return immediately */
if (res->state == SPRD_PCIE_SCANNED ||
res->state == SPRD_PCIE_SCANNED_2BAR)
return 0;
return wait_event_interruptible(
res->wait_load_ready,
(res->state == SPRD_PCIE_SCANNED ||
res->state == SPRD_PCIE_SCANNED_2BAR));
}
void sprd_pcie_resource_reboot_ep(u32 dst)
{
struct sprd_pcie_res *res;
pr_info("pcie res: reboot ep.\n");
if (dst >= SIPC_ID_NR || !g_pcie_res[dst])
return;
res = g_pcie_res[dst];
/* wait power off, do nothing */
if (res->state == SPRD_PCIE_WAIT_POWER_OFF)
return;
res->state = SPRD_PCIE_WAIT_FIRST_READY;
res->smem_send_to_ep = false;
res->ep_ready_for_rescan = false;
/* The defective chip , the first wait time must be enough long. */
if (sprd_pcie_is_defective_chip())
res->max_wait_time = MAX_PMS_DEFECTIVE_CHIP_FIRST_WAIT_TIME;
else
res->max_wait_time = MAX_PMS_WAIT_TIME;
/* after ep reboot, can't backup ep configs*/
sprd_ep_dev_clear_backup(res->ep_dev);
sprd_pcie_resource_start_remove(res);
//modem_ctrl_poweron_modem(MDM_CTRL_COLD_RESET);
sprd_pcie_resource_start_scan(res);
}
int sprd_pcie_resource_host_init(u32 dst, u32 ep_dev,
struct platform_device *pcie_dev)
{
struct sprd_pcie_res *res;
if (dst >= SIPC_ID_NR)
return -EINVAL;
res = kzalloc(sizeof(*res), GFP_KERNEL);
if (!res)
return -ENOMEM;
res->wq = create_singlethread_workqueue("pcie_res");
if (!res->wq) {
pr_err("pcie res:create wq failed.\n");
kfree(res);
return -ENOMEM;
}
init_waitqueue_head(&res->wait_load_ready);
init_waitqueue_head(&res->wait_first_rescan);
#if (LINUX_VERSION_CODE > KERNEL_VERSION( 4,18,0 ))
res->ws = wakeup_source_register(NULL, "pcie_res");
#else
wakeup_source_init(&res->ws, "pcie_res");
#endif
res->dst = dst;
res->state = SPRD_PCIE_WAIT_FIRST_READY;
res->pcie_dev = pcie_dev;
/* The defective chip , the first wait time must be enough long. */
if (sprd_pcie_is_defective_chip())
res->max_wait_time = MAX_PMS_DEFECTIVE_CHIP_FIRST_WAIT_TIME;
else
res->max_wait_time = MAX_PMS_WAIT_TIME;
init_waitqueue_head(&res->wait_pcie_ready);
INIT_WORK(&res->scan_work, sprd_pcie_resource_scan_fn);
INIT_WORK(&res->remove_work, sprd_pcie_resource_remove_fn);
sprintf(res->pms_name, "ep-request-%d", dst);
res->pms = sprd_pms_create(dst, res->pms_name, false);
if (!res->pms)
pr_err("pcie res:create pms failed.\n");
sprd_ep_dev_register_irq_handler_ex(res->ep_dev,
PCIE_MSI_REQUEST_RES,
PCIE_MSI_RELEASE_RES,
sprd_pcie_resource_host_irq_handler, res);
sprd_ep_dev_register_notify(res->ep_dev,
sprd_pcie_resource_host_ep_notify, res);
//modem_ctrl_register_notifier(&mcd_notify);
/* init wake up event callback */
res->reg_event.events = SPRD_PCIE_EVENT_WAKEUP;
res->reg_event.pdev = pcie_dev;
res->reg_event.callback = sprd_pcie_resource_event_process;
res->reg_event.data = res;
g_pcie_res[dst] = res;
return 0;
}
int sprd_pcie_resource_trash(u32 dst)
{
struct sprd_pcie_res *res;
if (dst >= SIPC_ID_NR || !g_pcie_res[dst])
return -EINVAL;
res = g_pcie_res[dst];
if (!IS_ERR_OR_NULL(res->thread))
kthread_stop(res->thread);
#ifdef CONFIG_SPRD_SIPA_RES
if (res->sipa_res)
pcie_sipa_res_destroy(res->sipa_res);
#endif
cancel_work_sync(&res->scan_work);
cancel_work_sync(&res->remove_work);
destroy_workqueue(res->wq);
sprd_pcie_deregister_event(&res->reg_event);
sprd_ep_dev_unregister_irq_handler_ex(res->ep_dev,
PCIE_MSI_REQUEST_RES,
PCIE_MSI_RELEASE_RES);
sprd_ep_dev_unregister_notify(res->ep_dev);
//modem_ctrl_unregister_notifier(&mcd_notify);
sprd_pms_destroy(res->pms);
kfree(res);
g_pcie_res[dst] = NULL;
return 0;
}
int sprd_pcie_wait_resource(u32 dst, int timeout)
{
struct sprd_pcie_res *res;
int ret, wait;
unsigned long delay;
if (dst >= SIPC_ID_NR || !g_pcie_res[dst])
return -EINVAL;
res = g_pcie_res[dst];
/* pcie ready, return succ immediately. */
if (res->state == SPRD_PCIE_SCANNED)
return 0;
if (timeout == 0)
return -ETIME;
/*
* In some case, orca may has an exception, And the pcie
* resource may never ready again. So we must set a
* maximum wait time for let user to know thereis an
* exception in pcie, and can return an error code to the user.
*/
if (timeout < 0 || timeout > res->max_wait_time)
timeout = res->max_wait_time;
/*
* timeout must add 1s,
* because the pcie scan may took some time.
*/
delay = msecs_to_jiffies(timeout + 1000);
wait = wait_event_interruptible_timeout(res->wait_pcie_ready,
res->state ==
SPRD_PCIE_SCANNED,
delay);
if (wait == 0)
ret = -ETIME;
else if (wait > 0)
ret = 0;
else
ret = wait;
if (ret < 0 && ret != -ERESTARTSYS)
pr_err("pcie res: wait resource, val=%d.\n", ret);
return ret;
}
int sprd_pcie_request_resource(u32 dst)
{
struct sprd_pcie_res *res;
if (dst >= SIPC_ID_NR || !g_pcie_res[dst])
return -EINVAL;
res = g_pcie_res[dst];
/* get a wakelock */
#if (LINUX_VERSION_CODE > KERNEL_VERSION( 4,18,0 ))
__pm_stay_awake(res->ws);
#else
__pm_stay_awake(&res->ws);
#endif
pr_info("pcie res: request resource, state=%d.\n", res->state);
#ifdef CONFIG_SPRD_PCIE
/* The first scan is start by pcie driver automatically. */
if (res->state != SPRD_PCIE_WAIT_FIRST_READY)
sprd_pcie_resource_start_scan(res);
#endif
return 0;
}
int sprd_pcie_release_resource(u32 dst)
{
struct sprd_pcie_res *res;
if (dst >= SIPC_ID_NR || !g_pcie_res[dst])
return -EINVAL;
res = g_pcie_res[dst];
/* relax a wakelock */
#if (LINUX_VERSION_CODE > KERNEL_VERSION( 4,18,0 ))
__pm_relax(res->ws);
#else
__pm_relax(&res->ws);
#endif
#ifdef CONFIG_SPRD_PCIE
pr_info("pcie res: release resource.\n");
sprd_pcie_resource_start_remove(res);
#endif
return 0;
}
bool sprd_pcie_is_defective_chip(void)
{
#ifndef CONFIG_SPRD_PCIE
return false;
#else
static bool first_read = true, defective;
if (first_read) {
first_read = false;
defective = sprd_kproperty_chipid("UD710-AB") == 0;
}
return defective;
#endif
}

View File

@@ -0,0 +1,195 @@
/*
* Copyright (C) 2018-2019 Unisoc Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/soc/sprd/sprd_mpm.h>
#include <linux/workqueue.h>
#include "pcie_sipa_res.h"
#include "../include/sprd_pcie_resource.h"
struct pcie_sipa_res_prod {
u8 dst;
enum sipa_rm_res_id prod_id; /* producer res id */
enum sipa_rm_res_id cons_id; /* consumer res id */
struct sprd_pms *pms;
char pms_name[20];
struct work_struct wait_work;
struct delayed_work rm_work;
};
static void pcie_sipa_res_wait_res_work_fn(struct work_struct *work)
{
int ret;
struct pcie_sipa_res_prod *res = container_of(work,
struct pcie_sipa_res_prod,
wait_work);
ret = sprd_pcie_wait_resource(res->dst, -1);
/* pcie not ready, just return. */
if (ret) {
pr_err("pcie_sipa_res: wait res error = %d!\n", ret);
return;
}
/* notify ipa module that pcie is ready. */
sipa_rm_notify_completion(SIPA_RM_EVT_GRANTED,
res->prod_id);
}
static int pcie_sipa_res_request_resource(void *data)
{
int ret;
struct pcie_sipa_res_prod *res = data;
pr_info("pcie_sipa_res: request resource.\n");
sprd_pms_power_up(res->pms);
/*
* when the resource is not ready, the IPA module doesn't want be
* blocked in here until the pcie ready, the IPA owner designed
* a notification api sipa_rm_notify_completion to notify the
* IPA module that the resource requested by IPA is ready.
* The designated error value is -EINPROGRESS, so we must override the
* return value -ETIME to -EINPROGRESS.
*/
ret = sprd_pcie_wait_resource(res->dst, 0);
if (ret == -ETIME) {
/* add a work to wait pcie ready */
schedule_work(&res->wait_work);
ret = -EINPROGRESS;
}
return ret;
}
static int pcie_sipa_res_release_resource(void *data)
{
struct pcie_sipa_res_prod *res = data;
pr_info("pcie_sipa_res: release resource.\n");
sprd_pms_release_resource(res->pms);
return 0;
}
static void pcie_sipa_res_create_rm_work_fn(struct work_struct *work)
{
int ret;
struct sipa_rm_create_params rm_params;
struct pcie_sipa_res_prod *res = container_of(to_delayed_work(work),
struct pcie_sipa_res_prod,
rm_work);
rm_params.name = res->prod_id;
rm_params.floor_voltage = 0;
rm_params.reg_params.notify_cb = NULL;
rm_params.reg_params.user_data = res;
rm_params.request_resource = pcie_sipa_res_request_resource;
rm_params.release_resource = pcie_sipa_res_release_resource;
ret = sipa_rm_create_resource(&rm_params);
/* defer to create rm */
if (ret == -EPROBE_DEFER) {
schedule_delayed_work(&res->rm_work, msecs_to_jiffies(1000));
return;
}
/* add dependencys */
ret = sipa_rm_add_dependency(res->cons_id, res->prod_id);
if (ret < 0 && ret != -EINPROGRESS) {
pr_err("pcie_sipa_res: add_dependency error = %d!\n", ret);
sipa_rm_delete_resource(res->prod_id);
sprd_pms_destroy(res->pms);
kfree(res);
}
}
void *pcie_sipa_res_create(u8 dst, enum sipa_rm_res_id prod_id,
enum sipa_rm_res_id cons_id)
{
int ret;
struct sipa_rm_create_params rm_params;
struct pcie_sipa_res_prod *res;
res = kzalloc(sizeof(*res), GFP_KERNEL);
if (!res)
return NULL;
/* init wait pcie res work */
INIT_WORK(&res->wait_work, pcie_sipa_res_wait_res_work_fn);
INIT_DELAYED_WORK(&res->rm_work, pcie_sipa_res_create_rm_work_fn);
/* create pms */
strncpy(res->pms_name, "sipa", sizeof(res->pms_name));
res->pms = sprd_pms_create(dst, res->pms_name, false);
if (!res->pms) {
pr_err("pcie_sipa_res: create pms failed!\n");
kfree(res);
return NULL;
}
res->dst = dst;
res->prod_id = prod_id;
res->cons_id = cons_id;
/* create prod */
rm_params.name = prod_id;
rm_params.floor_voltage = 0;
rm_params.reg_params.notify_cb = NULL;
rm_params.reg_params.user_data = res;
rm_params.request_resource = pcie_sipa_res_request_resource;
rm_params.release_resource = pcie_sipa_res_release_resource;
ret = sipa_rm_create_resource(&rm_params);
/* defer to create rm */
if (ret == -EPROBE_DEFER) {
schedule_delayed_work(&res->rm_work, msecs_to_jiffies(1000));
return res;
} else if (ret) {
pr_err("pcie_sipa_res: create rm error = %d!\n", ret);
sprd_pms_destroy(res->pms);
kfree(res);
return NULL;
}
/* add dependencys */
ret = sipa_rm_add_dependency(cons_id, prod_id);
if (ret < 0 && ret != -EINPROGRESS) {
pr_err("pcie_sipa_res: add_dependency error = %d!\n", ret);
sipa_rm_delete_resource(prod_id);
sprd_pms_destroy(res->pms);
kfree(res);
return NULL;
}
return res;
}
void pcie_sipa_res_destroy(void *data)
{
struct pcie_sipa_res_prod *res = data;
cancel_work_sync(&res->wait_work);
cancel_delayed_work_sync(&res->rm_work);
sprd_pms_destroy(res->pms);
sipa_rm_delete_dependency(res->cons_id, res->prod_id);
sipa_rm_delete_resource(res->prod_id);
kfree(res);
}

View File

@@ -0,0 +1,37 @@
/*
* Copyright (C) 2018-2019 Unisoc Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#ifndef PCIE_SIPA_RES_H
#define PCIE_SIPA_RES_H
#include "../include/sipa.h"
/*
* pcie_sipa_res_create - create pcie res for sipa module.
* @prod_id: which res is the producer.
* @cons_id: which res is the consumer.
*
* Returns:
* failed, return NULL,
* succ, return a void * pointer.
*/
void *pcie_sipa_res_create(u8 dst, enum sipa_rm_res_id prod_id,
enum sipa_rm_res_id cons_id);
/*
* pcie_sipa_res_destroy -detroy pcie res for sipa module
* @res_id: the return point of call function pcie_sipa_res_create.
*/
void pcie_sipa_res_destroy(void *res);
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,126 @@
/*
* This file contains work-arounds for many known PCI hardware
* bugs. Devices present only on certain architectures (host
* bridges et cetera) should be handled in arch-specific code.
*
* Note: any quirks for hotpluggable devices must _NOT_ be declared __init.
*
* Copyright (c) 1999 Martin Mares <mj@ucw.cz>
*
* Init/reset quirks for USB host controllers should be in the
* USB quirks file, where their drivers can access reuse it.
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/acpi.h>
#include <linux/kallsyms.h>
#include <linux/dmi.h>
#include <linux/version.h>
#if (LINUX_VERSION_CODE < KERNEL_VERSION( 5,4,0 ))
#include <linux/pci-aspm.h>
#endif
#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/ktime.h>
#include <linux/mm.h>
#include <asm/dma.h> /* isa_dma_bridge_buggy */
#ifndef PCI_VENDOR_ID_SYNOPSYS
#define PCI_VENDOR_ID_SYNOPSYS 0x16c3
#endif
/*
* It's possible that ep bar size is larger than rc allocated
* memory, so need to resize ep bar to small size.
* Original ep bar size:bar0:256MB, bar1:64kb, bar2:256MB,
* bar3: 64kb, bar4:256MB, bar5:64kb.
* resize to bar0:8MB, bar1:64kb, bar2:2MB, bar3: 64kb,
* bar4:2MB, bar5:64kb.
*/
#define SPRD_PCI_BAR0 0x10
#define SPRD_BAR_NUM 0x6
#define SPRD_PCI_MISC_CTRL1_OFF 0x8bc
#define SPRD_PCI_DBI_RO_WR_EN (0x1 << 0)
#define SPRD_PCI_RESIZABLE_BAR_EXTENDED_CAP_HEADER 0x260
#define SPRD_PCI_RESIZABLE_BAR_EXTENDED_CAPID 0x15
/* Resizable BAR Capability Register */
#define SPRD_PCI_RESIZABLE_BAR0 0x264
#define SPRD_PCI_RESIZABLE_BAR2 0x26c
#define SPRD_PCI_RESIZABLE_BAR4 0x274
#define SPRD_BAR_SUPPORT_2MB (0x1 << 5)
#define SPRD_BAR_SUPPORT_4MB (0x1 << 6)
#define SPRD_BAR_SUPPORT_8MB (0x1 << 7)
/* Resizable BAR Control Register */
#define SPRD_PCI_RESIZABLE_BAR0_CTL 0x268
#define SPRD_PCI_RESIZABLE_BAR2_CTL 0x270
#define SPRD_PCI_RESIZABLE_BAR4_CTL 0x278
/* bit[13:8] is bar size */
#define SPRD_PCI_RESIZABLE_BAR_SIZE_MASK 0x3F00
#define SPRD_PCI_RESIZABLE_2MB (0x1 << 8)
#define SPRD_PCI_RESIZABLE_4MB (0x2 << 8)
#define SPRD_PCI_RESIZABLE_8MB (0x3 << 8)
#define SIZE(val) ((~(val & 0xFFFFFFF0)) + 1)
static void quirk_sprd_pci_resizebar(struct pci_dev *dev)
{
u32 val, i, backup;
pci_read_config_dword(dev,
SPRD_PCI_RESIZABLE_BAR_EXTENDED_CAP_HEADER, &val);
if ((val & SPRD_PCI_RESIZABLE_BAR_EXTENDED_CAPID) !=
SPRD_PCI_RESIZABLE_BAR_EXTENDED_CAPID) {
dev_info(&dev->dev, "%s: not support resize bar\n", __func__);
return;
}
pci_read_config_dword(dev, SPRD_PCI_MISC_CTRL1_OFF, &val);
val |= SPRD_PCI_DBI_RO_WR_EN;
pci_write_config_dword(dev, SPRD_PCI_MISC_CTRL1_OFF, val);
pci_read_config_dword(dev, SPRD_PCI_RESIZABLE_BAR0, &val);
pci_write_config_dword(dev, SPRD_PCI_RESIZABLE_BAR0,
val | SPRD_BAR_SUPPORT_4MB |
SPRD_BAR_SUPPORT_8MB);
pci_read_config_dword(dev, SPRD_PCI_RESIZABLE_BAR2, &val);
pci_write_config_dword(dev, SPRD_PCI_RESIZABLE_BAR2,
val | SPRD_BAR_SUPPORT_4MB |
SPRD_BAR_SUPPORT_8MB);
pci_read_config_dword(dev, SPRD_PCI_RESIZABLE_BAR4, &val);
pci_write_config_dword(dev, SPRD_PCI_RESIZABLE_BAR4,
val | SPRD_BAR_SUPPORT_4MB |
SPRD_BAR_SUPPORT_8MB);
pci_read_config_dword(dev, SPRD_PCI_MISC_CTRL1_OFF, &val);
val &= ~SPRD_PCI_DBI_RO_WR_EN;
pci_write_config_dword(dev, SPRD_PCI_MISC_CTRL1_OFF, val);
pci_read_config_dword(dev, SPRD_PCI_RESIZABLE_BAR0_CTL, &val);
pci_write_config_dword(dev, SPRD_PCI_RESIZABLE_BAR0_CTL,
(val & (~SPRD_PCI_RESIZABLE_BAR_SIZE_MASK)) |
SPRD_PCI_RESIZABLE_4MB);
pci_read_config_dword(dev, SPRD_PCI_RESIZABLE_BAR2_CTL, &val);
pci_write_config_dword(dev, SPRD_PCI_RESIZABLE_BAR2_CTL,
(val & (~SPRD_PCI_RESIZABLE_BAR_SIZE_MASK)) |
SPRD_PCI_RESIZABLE_4MB);
pci_read_config_dword(dev, SPRD_PCI_RESIZABLE_BAR4_CTL, &val);
pci_write_config_dword(dev, SPRD_PCI_RESIZABLE_BAR4_CTL,
(val & (~SPRD_PCI_RESIZABLE_BAR_SIZE_MASK)) |
SPRD_PCI_RESIZABLE_4MB);
for (i = 0; i < SPRD_BAR_NUM; i++) {
pci_read_config_dword(dev, SPRD_PCI_BAR0 + i * 4, &backup);
pci_write_config_dword(dev, SPRD_PCI_BAR0 + i * 4, 0xFFFFFFFF);
pci_read_config_dword(dev, SPRD_PCI_BAR0 + i * 4, &val);
pci_write_config_dword(dev, SPRD_PCI_BAR0 + i * 4, backup);
dev_info(&dev->dev, "%s: bar%d size 0x%x\n",
__func__, i, SIZE(val));
}
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SYNOPSYS, 0xabcd, quirk_sprd_pci_resizebar);

View File

@@ -0,0 +1 @@
obj-y += power_manager.o

View File

@@ -0,0 +1,964 @@
/*
* Copyright (C) 2019 Spreadtrum Communications Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/debugfs.h>
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/suspend.h>
#include <linux/types.h>
#include <linux/workqueue.h>
#include <linux/version.h>
#include "../include/sprd_mpm.h"
#include "../include/sipc.h"
/*
* The data struct of modem power manager.
*/
struct sprd_mpm_data {
#if (LINUX_VERSION_CODE > KERNEL_VERSION( 4,18,0 ))
struct wakeup_source *ws;
#else
struct wakeup_source ws;
#endif
struct list_head pms_list;
struct timer_list timer;
spinlock_t mpm_lock;
char name[20];
const char *last_name;
unsigned int dst;
unsigned int up_cnt;
unsigned int awake_cnt;
unsigned int wakelock_cnt;
unsigned int mpm_state;
unsigned long expires;
unsigned int later_idle;
/* resource ops functions */
int (*wait_resource)(unsigned int dst, int timeout);
int (*request_resource)(unsigned int dst);
int (*release_resource)(unsigned int dst);
struct work_struct release_res_work;
struct work_struct request_res_work;
};
/*
* Save all the instance of mpm in here.
*/
static struct sprd_mpm_data *g_sprd_mpm[SIPC_ID_NR];
/**
* sprd_mpm_print_awake
* print the wake up list to known who prevent system sleep.
*/
static void sprd_mpm_print_awake(struct sprd_mpm_data *mpm)
{
struct sprd_pms *pms;
char *awake_info;
int len = 0, max_len = 512;
awake_info = kmalloc(max_len, GFP_KERNEL);
if (!awake_info)
return;
/* print pms list */
list_for_each_entry(pms, &mpm->pms_list, entry) {
if (!pms->awake && pms->pre_awake_cnt == pms->awake_cnt)
continue;
pms->pre_awake_cnt = pms->awake_cnt;
snprintf(awake_info + len,
max_len - len,
"%s is awake, awake_cnt = %d\n",
pms->name,
pms->awake_cnt);
len = strlen(awake_info);
}
if (len)
pr_info("mpm: %s\n", awake_info);
kfree(awake_info);
}
/**
* sprd_mpm_pm_event
* monitor the PM_SUSPEND_PREPARE event.
*/
static int sprd_mpm_pm_event(struct notifier_block *notifier,
unsigned long pm_event, void *unused)
{
unsigned int i;
struct sprd_mpm_data *cur;
switch (pm_event) {
case PM_SUSPEND_PREPARE:
case PM_POST_SUSPEND:
/* check if has wake lock. */
for (i = 0; i < SIPC_ID_NR; i++) {
if (!g_sprd_mpm[i])
continue;
cur = g_sprd_mpm[i];
sprd_mpm_print_awake(cur);
}
break;
default:
break;
}
return NOTIFY_DONE;
}
/*
* The pm event notify data, for the register pm notifier.
*/
static struct notifier_block sprd_mpm_notifier_block = {
.notifier_call = sprd_mpm_pm_event,
};
/**
* sprd_mpm_request_resource
* request resource.
*/
static void sprd_mpm_request_resource(struct sprd_mpm_data *mpm)
{
if (mpm->request_resource)
schedule_work(&mpm->request_res_work);
}
/**
* sprd_mpm_release_resource
* release resource.
*/
static void sprd_mpm_release_resource(struct sprd_mpm_data *mpm)
{
if (mpm->release_resource)
schedule_work(&mpm->release_res_work);
}
/**
* sprd_mpm_wait_resource -wait resource.
*/
static int sprd_mpm_wait_resource(struct sprd_mpm_data *mpm, int timeout)
{
int ret = 0;
if (mpm->wait_resource) {
ret = mpm->wait_resource(mpm->dst, timeout);
if (ret < 0 && ret != -ERESTARTSYS && timeout)
pr_err("mpm: %s wait resource, ret=%d, timeout=%d.\n",
mpm->name, ret, timeout);
}
return ret;
}
/**
* sprd_mpm_active
* set the state to busy.
*/
static void sprd_mpm_active(struct sprd_mpm_data *mpm)
{
pr_debug("mpm: %s active, set state to busy.\n", mpm->name);
mpm->mpm_state = SPRD_MPM_BUSY;
sprd_mpm_request_resource(mpm);
}
/**
* sprd_mpm_deactive
* del the idle timer,
* set the state to idle.
*/
static void sprd_mpm_deactive(struct sprd_mpm_data *mpm)
{
pr_debug("mpm: %s deactive, set state to idle.\n", mpm->name);
mpm->mpm_state = SPRD_MPM_IDLE;
mpm->expires = 0;
sprd_mpm_release_resource(mpm);
}
/**
* sprd_mpm_start_deactive
* start the deactive timer.
*/
static void sprd_mpm_start_deactive(struct sprd_mpm_data *mpm)
{
pr_debug("mpm: %s start deactive.\n", mpm->name);
mpm->expires = jiffies + msecs_to_jiffies(mpm->later_idle);
if (!mpm->expires)
mpm->expires = 1;
mod_timer(&mpm->timer, mpm->expires);
}
/**
* sprd_mpm_request_res_work_fn
* do release resource call in here.
*/
static void sprd_mpm_request_res_work_fn(struct work_struct *work)
{
struct sprd_mpm_data *mpm = container_of(work, struct sprd_mpm_data,
request_res_work);
int ret;
pr_debug("mpm: %s request res work.\n", mpm->name);
ret = mpm->request_resource(mpm->dst);
if (ret)
pr_err("mpm: %s request res, ret = %d.\n", mpm->name, ret);
}
/**
* sprd_mpm_release_res_work_fn
* do relase resource call in here
*/
static void sprd_mpm_release_res_work_fn(struct work_struct *work)
{
struct sprd_mpm_data *mpm = container_of(work, struct sprd_mpm_data,
release_res_work);
int ret;
pr_debug("mpm: %s releae res work.\n", mpm->name);
ret = mpm->release_resource(mpm->dst);
if (ret)
pr_err("mpm: %s request res, ret = %d.\n", mpm->name, ret);
}
/**
* sprd_mpm_deactive_timer_fn
* in a period of time (mpm->later_idle),
* have no modem resource request,
* we consider that it doesn't need modem resource,
* than set the state to idle.
*/
static void sprd_mpm_deactive_timer_fn(
#if (LINUX_VERSION_CODE <= KERNEL_VERSION( 4,10,0 ))
unsigned long data)
{
struct sprd_mpm_data *mpm = (struct sprd_mpm_data *)data;
#else
struct timer_list *t)
{
struct sprd_mpm_data *mpm = from_timer(mpm, t, timer);
#endif
unsigned long flags;
pr_debug("mpm: %s deactive timer.\n", mpm->name);
spin_lock_irqsave(&mpm->mpm_lock, flags);
/* expires is 0, means the timer has been cancelled. */
if (mpm->expires)
sprd_mpm_deactive(mpm);
spin_unlock_irqrestore(&mpm->mpm_lock, flags);
}
/**
* sprd_pms_cancel_timer
* cancel the pms wakelock timer.
*/
static void sprd_pms_cancel_timer(struct sprd_pms *pms)
{
unsigned long flags;
bool print = false;
spin_lock_irqsave(&pms->expires_lock, flags);
if (pms->expires) {
print = true;
pms->expires = 0;
del_timer(&pms->wake_timer);
}
spin_unlock_irqrestore(&pms->expires_lock, flags);
if (print)
pr_debug("pms: %s del timer.\n", pms->name);
}
/**
* sprd_mpm_cancel_timer
* cancel the deactive timer.
*/
static void sprd_mpm_cancel_timer(struct sprd_mpm_data *mpm)
{
if (mpm->expires) {
pr_debug("mpm: %s del timer.\n", mpm->name);
mpm->expires = 0;
del_timer(&mpm->timer);
}
}
/**
* sprd_mpm_up
* modem power manger power up.
*/
static void sprd_mpm_up(struct sprd_mpm_data *mpm, const char *name)
{
unsigned long flags;
spin_lock_irqsave(&mpm->mpm_lock, flags);
/* first cancel deactive timer */
sprd_mpm_cancel_timer(mpm);
mpm->last_name = name;
mpm->up_cnt++;
/* when up_cnt is change form 0 to 1, ready active pms.
* Although the cnt is 0, but later down, the state may is still busy,
* so here must see whether the mpm state is idle.
*/
if (mpm->up_cnt == 1 &&
mpm->mpm_state == SPRD_MPM_IDLE)
sprd_mpm_active(mpm);
spin_unlock_irqrestore(&mpm->mpm_lock, flags);
pr_debug("mpm: %s up, up_cnt=%d.\n", mpm->name, mpm->up_cnt);
}
/**
* sprd_mpm_down
* modem power manger power down.
*/
static void sprd_mpm_down(struct sprd_mpm_data *mpm, bool immediately)
{
unsigned long flags;
/*
* when up_cnt count is change form 1 to 0,
* start deactive pms.
*/
spin_lock_irqsave(&mpm->mpm_lock, flags);
mpm->up_cnt--;
if (!mpm->up_cnt) {
if (mpm->later_idle && !immediately)
sprd_mpm_start_deactive(mpm);
else
sprd_mpm_deactive(mpm);
}
spin_unlock_irqrestore(&mpm->mpm_lock, flags);
pr_debug("mpm: %s down, up_cnt=%d.\n", mpm->name, mpm->up_cnt);
}
/**
* sprd_mpm_stay_awake
* modem power manager stay awake.
*/
static void sprd_mpm_stay_awake(struct sprd_mpm_data *mpm)
{
unsigned long flags;
/*
* when wakelock_cnt is change form 0 to 1,
* get the system wake lock.
*/
spin_lock_irqsave(&mpm->mpm_lock, flags);
mpm->wakelock_cnt++;
if (mpm->wakelock_cnt == 1) {
mpm->awake_cnt++;
#if (LINUX_VERSION_CODE > KERNEL_VERSION( 4,18,0 ))
__pm_stay_awake(mpm->ws);
#else
__pm_stay_awake(&mpm->ws);
#endif
}
spin_unlock_irqrestore(&mpm->mpm_lock, flags);
pr_debug("mpm: %s wake, wake_cnt=%d\n",
mpm->name, mpm->wakelock_cnt);
}
/**
* sprd_mpm_relax
* modem power manager relax wakelock.
*/
static void sprd_mpm_relax(struct sprd_mpm_data *mpm)
{
unsigned long flags;
/*
* when wakelock_cnt is change form 0 to 1,
* release the system wake lock.
*/
spin_lock_irqsave(&mpm->mpm_lock, flags);
mpm->wakelock_cnt--;
if (!mpm->wakelock_cnt)
#if (LINUX_VERSION_CODE > KERNEL_VERSION( 4,18,0 ))
__pm_relax(mpm->ws);
#else
__pm_relax(&mpm->ws);
#endif
spin_unlock_irqrestore(&mpm->mpm_lock, flags);
pr_debug("mpm: %s relax wake, wake_cnt=%d\n",
mpm->name, mpm->wakelock_cnt);
}
/**
* sprd_pms_do_up_single
* do pms power up.
*/
static void sprd_pms_do_up_single(struct sprd_pms *pms)
{
struct sprd_mpm_data *mpm = (struct sprd_mpm_data *)pms->data;
/*
* when active_cnt is change form 0 to 1, mpm up.
*/
pms->active_cnt++;
if (pms->active_cnt == 1)
sprd_mpm_up(mpm, pms->name);
pr_debug("pms: %s up, active_cnt=%d.\n",
pms->name, pms->active_cnt);
}
/**
* sprd_pms_do_up_multi
* do pms power up.
*/
static void sprd_pms_do_up_multi(struct sprd_pms *pms)
{
struct sprd_mpm_data *mpm = (struct sprd_mpm_data *)pms->data;
unsigned long flags;
bool active = false;
/*
* when active_cnt is change form 0 to 1, mpm up.
*/
spin_lock_irqsave(&pms->active_lock, flags);
pms->active_cnt++;
if (pms->active_cnt == 1)
active = true;
spin_unlock_irqrestore(&pms->active_lock, flags);
pr_debug("pms: %s up, active_cnt=%d.\n",
pms->name, pms->active_cnt);
if (active)
sprd_mpm_up(mpm, pms->name);
}
static void sprd_pms_do_up(struct sprd_pms *pms)
{
if (pms->multitask)
sprd_pms_do_up_multi(pms);
else
sprd_pms_do_up_single(pms);
}
/**
* sprd_pms_do_down_single
* do pms power down.
*/
static void sprd_pms_do_down_single(struct sprd_pms *pms, bool immediately)
{
struct sprd_mpm_data *mpm = (struct sprd_mpm_data *)pms->data;
/*
* when active_cnt is change form 1 to 0, mpm down.
*/
if (pms->active_cnt > 0) {
pms->active_cnt--;
if (pms->active_cnt == 0)
sprd_mpm_down(mpm, immediately);
}
pr_debug("pms: %s down, active_cnt=%d.\n",
pms->name, pms->active_cnt);
}
/**
* sprd_pms_do_down
* do pms power down.
*/
static void sprd_pms_do_down_multi(struct sprd_pms *pms, bool immediately)
{
struct sprd_mpm_data *mpm = (struct sprd_mpm_data *)pms->data;
unsigned long flags;
bool deactive = false;
/*
* when active_cnt is change form 1 to 0, mpm down.
*/
spin_lock_irqsave(&pms->active_lock, flags);
if (pms->active_cnt > 0) {
pms->active_cnt--;
if (pms->active_cnt == 0)
deactive = true;
}
spin_unlock_irqrestore(&pms->active_lock, flags);
pr_debug("pms: %s down, active_cnt=%d.\n",
pms->name, pms->active_cnt);
if (deactive)
sprd_mpm_down(mpm, immediately);
}
static void sprd_pms_do_down(struct sprd_pms *pms, bool immediately)
{
if (pms->multitask)
sprd_pms_do_down_multi(pms, immediately);
else
sprd_pms_do_down_single(pms, immediately);
}
/**
* sprd_pms_stay_awake
* power manger source stay awake.
*/
static void sprd_pms_stay_awake(struct sprd_pms *pms)
{
struct sprd_mpm_data *mpm = (struct sprd_mpm_data *)pms->data;
pr_debug("pms: %s stay awake.\n", pms->name);
pms->awake_cnt++;
if (!pms->awake) {
pms->awake = true;
sprd_mpm_stay_awake(mpm);
}
}
/**
* sprd_pms_relax
* power manger source release wakelock.
*/
static void sprd_pms_relax(struct sprd_pms *pms)
{
struct sprd_mpm_data *mpm = (struct sprd_mpm_data *)pms->data;
pr_debug("pms: %s relax awake.\n", pms->name);
if (pms->awake) {
pms->awake = false;
sprd_mpm_relax(mpm);
}
}
/**
* sprd_pms_relax_wakelock_timer
* the timer process function of pms delay release wakelock.
*/
static void sprd_pms_relax_wakelock_timer(
#if (LINUX_VERSION_CODE <= KERNEL_VERSION( 4,10,0 ))
unsigned long data)
{
struct sprd_pms *pms = (struct sprd_pms *)data;
#else
struct timer_list *t)
{
struct sprd_pms *pms = from_timer(pms, t, wake_timer);
#endif
unsigned long flags;
bool relax = false;
pr_debug("pms: %s timer down.\n", pms->name);
spin_lock_irqsave(&pms->expires_lock, flags);
/*
* if jiffies < pms->expires, mpm called has been canceled,
* don't call sprd_pms_down.
*/
if (pms->expires && time_after_eq(jiffies, pms->expires)) {
pms->expires = 0;
relax = true;
}
spin_unlock_irqrestore(&pms->expires_lock, flags);
if (relax)
sprd_pms_relax(pms);
}
int sprd_mpm_create(unsigned int dst, const char *name,
unsigned int later_idle)
{
struct sprd_mpm_data *mpm;
if (dst >= SIPC_ID_NR)
return -EINVAL;
mpm = kzalloc(sizeof(*mpm), GFP_KERNEL);
if (!mpm)
return -ENOMEM;
snprintf(mpm->name, sizeof(mpm->name), "%s-mpm-%d", name, dst);
mpm->dst = dst;
mpm->later_idle = later_idle;
spin_lock_init(&mpm->mpm_lock);
INIT_LIST_HEAD(&mpm->pms_list);
#if (LINUX_VERSION_CODE > KERNEL_VERSION( 4,18,0 ))
mpm->ws = wakeup_source_register(NULL, mpm->name);
#else
wakeup_source_init(&mpm->ws, mpm->name);
#endif
#if (LINUX_VERSION_CODE <= KERNEL_VERSION( 4,10,0 ))
setup_timer(&mpm->timer,
sprd_mpm_deactive_timer_fn,
(unsigned long)mpm);
#else
timer_setup(&mpm->timer,
sprd_mpm_deactive_timer_fn,
0);
#endif
INIT_WORK(&mpm->request_res_work, sprd_mpm_request_res_work_fn);
INIT_WORK(&mpm->release_res_work, sprd_mpm_release_res_work_fn);
g_sprd_mpm[dst] = mpm;
return 0;
}
int sprd_mpm_init_resource_ops(unsigned int dst,
int (*wait_resource)(unsigned int dst,
int timeout),
int (*request_resource)(unsigned int dst),
int (*release_resource)(unsigned int dst))
{
struct sprd_mpm_data *mpm;
if (dst >= SIPC_ID_NR)
return -EINVAL;
mpm = g_sprd_mpm[dst];
if (!mpm)
return -ENODEV;
mpm->wait_resource = wait_resource;
mpm->request_resource = request_resource;
mpm->release_resource = release_resource;
return 0;
}
int sprd_mpm_destroy(unsigned int dst)
{
struct sprd_pms *pms, *temp;
struct sprd_mpm_data *mpm;
unsigned long flags;
if (dst >= SIPC_ID_NR)
return -EINVAL;
mpm = g_sprd_mpm[dst];
if (!mpm)
return -ENODEV;
sprd_mpm_cancel_timer(mpm);
cancel_work_sync(&mpm->request_res_work);
cancel_work_sync(&mpm->release_res_work);
spin_lock_irqsave(&mpm->mpm_lock, flags);
list_for_each_entry_safe(pms,
temp,
&mpm->pms_list,
entry) {
sprd_pms_cancel_timer(pms);
list_del(&pms->entry);
}
spin_unlock_irqrestore(&mpm->mpm_lock, flags);
kfree(mpm);
g_sprd_mpm[dst] = NULL;
return 0;
}
struct sprd_pms *sprd_pms_create(unsigned int dst,
const char *name, bool multitask)
{
unsigned long flags;
struct sprd_pms *pms;
struct sprd_mpm_data *mpm;
if (dst >= SIPC_ID_NR)
return NULL;
mpm = g_sprd_mpm[dst];
if (!mpm) {
pr_err("mpm: %s pms init failed, dst=%d.\n", name, dst);
return NULL;
}
pms = kzalloc(sizeof(*pms), GFP_KERNEL);
if (!pms)
return NULL;
pms->multitask = multitask;
pms->name = name;
pms->data = (void *)mpm;
spin_lock_init(&pms->expires_lock);
spin_lock_init(&pms->active_lock);
#if (LINUX_VERSION_CODE <= KERNEL_VERSION( 4,10,0 ))
setup_timer(&pms->wake_timer,
sprd_pms_relax_wakelock_timer, (unsigned long)pms);
#else
timer_setup(&pms->wake_timer,
sprd_pms_relax_wakelock_timer, 0);
#endif
spin_lock_irqsave(&mpm->mpm_lock, flags);
list_add(&pms->entry, &mpm->pms_list);
spin_unlock_irqrestore(&mpm->mpm_lock, flags);
return pms;
}
void sprd_pms_destroy(struct sprd_pms *pms)
{
unsigned long flags;
struct sprd_mpm_data *mpm;
if (pms) {
sprd_pms_cancel_timer(pms);
mpm = (struct sprd_mpm_data *)pms->data;
spin_lock_irqsave(&mpm->mpm_lock, flags);
list_del(&pms->entry);
spin_unlock_irqrestore(&mpm->mpm_lock, flags);
kfree(pms);
}
}
/**
* sprd_pms_request_resource - request mpm resource
*
* @pms, the point of this pms.
* @timeout, in ms.
*
* Returns:
* 0 resource ready,
* < 0 resoure not ready,
* -%ERESTARTSYS if it was interrupted by a signal.
*/
int sprd_pms_request_resource(struct sprd_pms *pms, int timeout)
{
int ret;
struct sprd_mpm_data *mpm;
if (!pms)
return -EINVAL;
sprd_pms_do_up(pms);
/* wait resource */
mpm = (struct sprd_mpm_data *)pms->data;
ret = sprd_mpm_wait_resource(mpm, timeout);
if (ret)
sprd_pms_do_down(pms, false);
return ret;
}
/**
* sprd_pms_release_resource - release mpm resource.
*
* @pms, the point of this pms.
*/
void sprd_pms_release_resource(struct sprd_pms *pms)
{
if (pms)
sprd_pms_do_down(pms, false);
}
/**
* sprd_pms_request_wakelock - request wakelock
*
* @pms, the point of this pms.
*/
void sprd_pms_request_wakelock(struct sprd_pms *pms)
{
if (pms) {
sprd_pms_cancel_timer(pms);
sprd_pms_stay_awake(pms);
}
}
/**
* sprd_pms_release_wakelock - release wakelock
*
* @pms, the point of this pms.
*/
void sprd_pms_release_wakelock(struct sprd_pms *pms)
{
if (pms) {
sprd_pms_cancel_timer(pms);
sprd_pms_relax(pms);
}
}
/**
* sprd_pms_request_wakelock_period -
* request wake lock, and will auto reaslse in msec ms.
*
* @pms, the point of this pms.
* @msec, will auto reaslse in msec ms
*/
void sprd_pms_request_wakelock_period(struct sprd_pms *pms, unsigned int msec)
{
sprd_pms_request_wakelock(pms);
sprd_pms_release_wakelock_later(pms, msec);
}
/**
* sprd_pms_release_wakelock_later - release wakelock later.
*
* @pms, the point of this pms.
* @msec, later time (in ms).
*/
void sprd_pms_release_wakelock_later(struct sprd_pms *pms,
unsigned int msec)
{
unsigned long expires;
unsigned long flags;
if (pms) {
pr_debug("pms: %s release wakelock after %d ms.\n",
pms->name, msec);
spin_lock_irqsave(&pms->expires_lock, flags);
expires = jiffies + msecs_to_jiffies(msec);
if (!expires)
expires = 1;
/* always update the timer with new time */
pms->expires = expires;
mod_timer(&pms->wake_timer, expires);
spin_unlock_irqrestore(&pms->expires_lock, flags);
}
}
void sprd_pms_power_up(struct sprd_pms *pms)
{
if (pms)
sprd_pms_do_up(pms);
}
void sprd_pms_power_down(struct sprd_pms *pms, bool immediately)
{
if (pms)
sprd_pms_do_down(pms, immediately);
}
#if defined(CONFIG_DEBUG_FS)
static int sprd_mpm_stats_show(struct seq_file *m, void *unused)
{
unsigned long flags;
struct sprd_pms *pms;
struct sprd_mpm_data *cur;
unsigned int i, ms;
seq_puts(m, "---------------------------------------------\n");
seq_puts(m, "All mpm list:\n");
for (i = 0; i < SIPC_ID_NR; i++) {
if (!g_sprd_mpm[i])
continue;
cur = g_sprd_mpm[i];
seq_puts(m, "------------------------------------\n");
seq_printf(m, "mpm = %s info:\n", cur->name);
seq_printf(m, "last up module = %s info:\n",
cur->last_name ? cur->last_name : "null");
if (cur->expires > 0) {
ms = jiffies_to_msecs(cur->expires - jiffies);
seq_printf(m, "left %d ms to idle\n", ms);
}
seq_printf(m, "up_cnt=%d, state=%d.\n",
cur->up_cnt, cur->mpm_state);
seq_printf(m, "wakelock_cnt=%d, awake_cnt=%d\n",
cur->wakelock_cnt, cur->awake_cnt);
seq_puts(m, "------------------------------------\n");
seq_puts(m, "active pms list:\n");
spin_lock_irqsave(&cur->mpm_lock, flags);
list_for_each_entry(pms, &cur->pms_list, entry) {
if (!pms->active_cnt && !pms->awake)
continue;
seq_printf(m, " %s: active_cnt=%d, awake=%d\n",
pms->name, pms->active_cnt, pms->awake);
}
spin_unlock_irqrestore(&cur->mpm_lock, flags);
}
seq_puts(m, "---------------------------------------------\n");
return 0;
}
static int sprd_mpm_stats_open(struct inode *inode, struct file *file)
{
return single_open(file, sprd_mpm_stats_show, NULL);
}
static const struct file_operations sprd_mpm_stats_fops = {
.owner = THIS_MODULE,
.open = sprd_mpm_stats_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int sprd_mpm_init_debugfs(void)
{
struct dentry *root = debugfs_create_dir("mpm", NULL);
if (!root)
return -ENXIO;
debugfs_create_file("power_manage", 0444,
(struct dentry *)root,
NULL, &sprd_mpm_stats_fops);
return 0;
}
#endif
int modem_power_manager_init(void)
{
register_pm_notifier(&sprd_mpm_notifier_block);
#if defined(CONFIG_DEBUG_FS)
sprd_mpm_init_debugfs();
#endif
return 0;
}
EXPORT_SYMBOL(modem_power_manager_init);
void modem_power_manager_exit(void)
{
unregister_pm_notifier(&sprd_mpm_notifier_block);
}
EXPORT_SYMBOL(modem_power_manager_exit);

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,9 @@
menu "SIPA modules"
config SPRD_SIPA
bool "sipa ipa"
default n
help
sipa is a module for spreadtrum ip packet accelator driver.
endmenu

View File

@@ -0,0 +1,6 @@
EXTRA_CFLAGS += -Wno-error -Wno-packed-bitfield-compat
ccflags-y += -DCONFIG_SPRD_SIPA
obj-y += sipa_core.o sipa_skb_send.o sipa_skb_recv.o \
sipa_nic.o sipa_debugfs.o sipa_dele_cmn.o \
sipa_eth.o sipa_dummy.o
obj-y += sipa_phy_v0/

View File

@@ -0,0 +1,333 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note.
*
* UNISOC 'virt sipa' driver
*
* Qingsheng.Li <qingsheng.li@unisoc.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License v2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/device.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/of_device.h>
#include <linux/mfd/syscon.h>
#include <linux/io.h>
#include <linux/cdev.h>
#include <linux/pm_wakeup.h>
#include <linux/pm_runtime.h>
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
#include <linux/pci.h>
#include <linux/sched.h>
#include <linux/debugfs.h>
#include <linux/kthread.h>
#include <linux/delay.h>
#include "../include/sipa.h"
#include "../include/sprd_pcie_ep_device.h"
#include "../include/sipc.h"
#include "sipa_core.h"
#define DRV_NAME "virt_sipa"
struct sipa_core *sipa_ctrl;
struct sipa_core *sipa_get_ctrl_pointer(void)
{
return sipa_ctrl;
}
EXPORT_SYMBOL(sipa_get_ctrl_pointer);
static void sipa_notify_sender_flow_ctrl(struct work_struct *work)
{
struct sipa_core *sipa_ctrl = container_of(work, struct sipa_core,
flow_ctrl_work);
if (sipa_ctrl->sender && sipa_ctrl->sender->free_notify_net)
wake_up(&sipa_ctrl->sender->free_waitq);
}
static int sipa_init_cmn_fifo(struct sipa_core *ipa,
enum sipa_cmn_fifo_index id)
{
size_t size;
dma_addr_t dma_addr;
struct sipa_cmn_fifo_cfg_tag *cmn_fifo;
cmn_fifo = &ipa->cmn_fifo_cfg[id];
cmn_fifo->fifo_id = id;
cmn_fifo->dst = SIPA_TERM_VCP;
cmn_fifo->cur = SIPA_TERM_PCIE0;
size = cmn_fifo->tx_fifo.depth *
sizeof(struct sipa_node_description_tag);
cmn_fifo->tx_fifo.virtual_addr = dma_alloc_coherent(ipa->pci_dev, size,
&dma_addr,
GFP_KERNEL);
if (!cmn_fifo->tx_fifo.virtual_addr)
return -ENOMEM;
cmn_fifo->tx_fifo.dma_ptr = dma_addr;
memset(cmn_fifo->tx_fifo.virtual_addr, 0, size);
pr_info("comfifo%d tx_fifo addr-0x%lx\n", id, (long unsigned int)cmn_fifo->tx_fifo.virtual_addr);
cmn_fifo->tx_fifo.fifo_base_addr_l = lower_32_bits(dma_addr);
cmn_fifo->tx_fifo.fifo_base_addr_h = 0x2;
size = cmn_fifo->rx_fifo.depth *
sizeof(struct sipa_node_description_tag);
cmn_fifo->rx_fifo.virtual_addr = dma_alloc_coherent(ipa->pci_dev, size,
&dma_addr,
GFP_KERNEL);
if (!cmn_fifo->rx_fifo.virtual_addr)
return -ENOMEM;
cmn_fifo->rx_fifo.dma_ptr = dma_addr;
memset(cmn_fifo->rx_fifo.virtual_addr, 0, size);
pr_info("comfifo%d rx_fifo addr-0x%lx\n", id, (long unsigned int)cmn_fifo->rx_fifo.virtual_addr);
cmn_fifo->rx_fifo.fifo_base_addr_l = lower_32_bits(dma_addr);
cmn_fifo->rx_fifo.fifo_base_addr_h = 0x2;
return 0;
}
static void sipa_free_cmn_fifo(struct sipa_core *ipa, enum sipa_cmn_fifo_index id)
{
size_t size;
struct sipa_cmn_fifo_cfg_tag *cmn_fifo;
cmn_fifo = &ipa->cmn_fifo_cfg[id];
size = cmn_fifo->tx_fifo.depth * sizeof(struct sipa_node_description_tag);
dma_free_coherent(ipa->dev, size, cmn_fifo->tx_fifo.virtual_addr, cmn_fifo->tx_fifo.dma_ptr);
size = cmn_fifo->rx_fifo.depth * sizeof(struct sipa_node_description_tag);
dma_free_coherent(ipa->dev, size, cmn_fifo->rx_fifo.virtual_addr, cmn_fifo->rx_fifo.dma_ptr);
}
static void sipa_init_ep(struct sipa_core *ipa)
{
struct sipa_endpoint *ep = &ipa->ep;
ep->send_fifo = &ipa->cmn_fifo_cfg[SIPA_FIFO_PCIE_UL];
ep->recv_fifo = &ipa->cmn_fifo_cfg[SIPA_FIFO_PCIE_DL];
}
#ifdef SPRD_PCIE_USE_DTS
static int sipa_parse_dts_configuration(struct platform_device *pdev,
struct sipa_core *ipa)
{
int ret;
struct sipa_cmn_fifo_cfg_tag *cmn_fifo;
ipa->reg_res = platform_get_resource_byname(pdev,
IORESOURCE_MEM, "ipa-base");
if (!ipa->reg_res) {
dev_err(&pdev->dev, "get ipa-base res fail\n");
return -EINVAL;
}
cmn_fifo = &ipa->cmn_fifo_cfg[SIPA_FIFO_PCIE_DL];
ret = of_property_read_u32(pdev->dev.of_node, "pcie-dl-tx-fifo-depth",
&cmn_fifo->tx_fifo.depth);
if (ret) {
dev_err(&pdev->dev,
"get pcie-dl-tx-fifo-depth ret = %d\n", ret);
return ret;
}
ret = of_property_read_u32(pdev->dev.of_node, "pcie-dl-rx-fifo-depth",
&cmn_fifo->rx_fifo.depth);
if (ret) {
dev_err(&pdev->dev,
"get pcie-dl-rx-fifo-depth ret = %d\n", ret);
return ret;
}
cmn_fifo = &ipa->cmn_fifo_cfg[SIPA_FIFO_PCIE_UL];
ret = of_property_read_u32(pdev->dev.of_node, "pcie-ul-tx-fifo-depth",
&cmn_fifo->tx_fifo.depth);
if (ret) {
dev_err(&pdev->dev,
"get pcie-ul-tx-fifo-depth ret = %d\n", ret);
return ret;
}
ret = of_property_read_u32(pdev->dev.of_node, "pcie-ul-rx-fifo-depth",
&cmn_fifo->rx_fifo.depth);
if (ret) {
dev_err(&pdev->dev,
"get pcie-ul-rx-fifo-depth ret = %d\n", ret);
return ret;
}
return 0;
}
#else
static struct resource ipa_res = {
.start = 0x2e000000,
.end = 0x2e000000 + 0x2000 -1,
.flags = IORESOURCE_MEM,
};
static int sipa_parse_dts_configuration(struct platform_device *pdev,
struct sipa_core *ipa)
{
struct sipa_cmn_fifo_cfg_tag *cmn_fifo;
ipa->reg_res = &ipa_res;
cmn_fifo = &ipa->cmn_fifo_cfg[SIPA_FIFO_PCIE_DL];
cmn_fifo->tx_fifo.depth = 4096;
cmn_fifo->rx_fifo.depth = 4096;
cmn_fifo = &ipa->cmn_fifo_cfg[SIPA_FIFO_PCIE_UL];
cmn_fifo->tx_fifo.depth = 4096;
cmn_fifo->rx_fifo.depth = 4096;
return 0;
}
#endif
static int sipa_plat_drv_probe(struct platform_device *pdev)
{
int ret;
struct sipa_core *ipa;
struct device *dev = &pdev->dev;
struct device *pci_dev;
pci_dev = (struct device *)dev_get_drvdata(dev);
if(!pci_dev)
return -1;
ipa = devm_kzalloc(dev, sizeof(*ipa), GFP_KERNEL);
if (!ipa)
return -ENOMEM;
sipa_ctrl = ipa;
ipa->dev = dev;
ipa->pci_dev = pci_dev;
ipa->pcie_mem_offset = SIPA_PCIE_MEM_OFFSET;
dev_set_drvdata(dev, ipa);
ret = sipa_parse_dts_configuration(pdev, ipa);
if (ret)
return ret;
ret = sipa_init_cmn_fifo(ipa, SIPA_FIFO_PCIE_DL);
if (ret)
return ret;
ret = sipa_init_cmn_fifo(ipa, SIPA_FIFO_PCIE_UL);
if (ret)
return ret;
sipa_init_ep(ipa);
sipa_fifo_ops_init(&ipa->hal_ops);
INIT_WORK(&ipa->flow_ctrl_work, sipa_notify_sender_flow_ctrl);
create_sipa_skb_receiver(&ipa->ep, &ipa->receiver);
create_sipa_skb_sender(&ipa->ep, &ipa->sender);
device_init_wakeup(dev, true);
sipa_create_smsg_channel(ipa);
sprd_ep_dev_register_irq_handler(PCIE_EP_MODEM, PCIE_MSI_IPA,
(irq_handler_t)sipa_int_callback_func,
(void *)ipa);
sipa_init_debugfs(ipa);
return 0;
}
extern void destroy_sipa_skb_receiver(struct sipa_skb_receiver *receiver);
extern void destroy_sipa_skb_sender(struct sipa_skb_sender *sender);
static int sipa_plat_drv_remove(struct platform_device *pdev)
{
struct sipa_core *ipa;
ipa = dev_get_drvdata(&pdev->dev);
smsg_ch_close(SIPC_ID_MINIAP, SMSG_CH_COMM_SIPA, 1000);
if(ipa->smsg_thread){
kthread_stop(ipa->smsg_thread);
ipa->smsg_thread = NULL;
}
destroy_sipa_skb_sender(ipa->sender);
destroy_sipa_skb_receiver(ipa->receiver);
cancel_work_sync(&ipa->flow_ctrl_work);
mdelay(1000);
sipa_free_cmn_fifo(ipa, SIPA_FIFO_PCIE_UL);
sipa_free_cmn_fifo(ipa, SIPA_FIFO_PCIE_DL);
if (!IS_ERR_OR_NULL(ipa->dentry))
debugfs_remove_recursive(ipa->dentry);
devm_kfree(&pdev->dev, ipa);
platform_set_drvdata(pdev, NULL);
return 0;
}
#ifdef SPRD_PCIE_USE_DTS
static const struct of_device_id sipa_plat_drv_match[] = {
{ .compatible = "sprd,virt-sipa"},
};
#endif
static struct platform_driver sipa_plat_drv = {
.probe = sipa_plat_drv_probe,
.remove = sipa_plat_drv_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
#ifdef SPRD_PCIE_USE_DTS
.of_match_table = sipa_plat_drv_match,
#endif
},
};
#ifndef SPRD_PCIE_USE_DTS
static struct platform_device *sipa_plat_dev;
static int sipa_core_platform_device_reigster(struct device *dev)
{
int retval = -ENOMEM;
sipa_plat_dev = platform_device_alloc("virt_sipa", -1);
if (!sipa_plat_dev)
return retval;
sipa_plat_dev->dev.dma_mask = dev->dma_mask;
sipa_plat_dev->dev.coherent_dma_mask = dev->coherent_dma_mask;
sipa_plat_dev->dev.archdata = dev->archdata;
dev_set_drvdata(&sipa_plat_dev->dev, dev);
retval = platform_device_add(sipa_plat_dev);
if (retval < 0)
platform_device_put(sipa_plat_dev);
return retval;
}
#endif
int sipa_module_init(struct device *dev)
{
#ifndef SPRD_PCIE_USE_DTS
sipa_core_platform_device_reigster(dev);
#endif
return platform_driver_register(&sipa_plat_drv);
}
EXPORT_SYMBOL(sipa_module_init);
void sipa_module_exit(void)
{
platform_driver_unregister(&sipa_plat_drv);
#ifndef SPRD_PCIE_USE_DTS
platform_device_unregister(sipa_plat_dev);
#endif
}
EXPORT_SYMBOL(sipa_module_exit);

View File

@@ -0,0 +1,519 @@
/*
* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _SIPA_CORE_H_
#define _SIPA_CORE_H_
#include <linux/bitops.h>
#include <linux/spinlock.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <asm/byteorder.h>
enum sipa_cmn_fifo_index {
SIPA_FIFO_PCIE_DL,
SIPA_FIFO_PCIE_UL,
SIPA_FIFO_MAX,
};
enum sipa_irq_evt_type {
SIPA_IRQ_TX_FIFO_THRESHOLD_SW = BIT(22),
SIPA_IRQ_EXIT_FLOW_CTRL = BIT(20),
SIPA_IRQ_ENTER_FLOW_CTRL = BIT(19),
SIPA_IRQ_TXFIFO_FULL_INT = BIT(18),
SIPA_IRQ_TXFIFO_OVERFLOW = BIT(17),
SIPA_IRQ_ERRORCODE_IN_TX_FIFO = BIT(16),
SIPA_IRQ_INTR_BIT = BIT(15),
SIPA_IRQ_THRESHOLD = BIT(14),
SIPA_IRQ_DELAY_TIMER = BIT(13),
SIPA_IRQ_DROP_PACKT_OCCUR = BIT(12),
SIPA_IRQ_ERROR = 0x0,
};
#define SIPA_FIFO_THRESHOLD_IRQ_EN BIT(1)
#define SIPA_FIFO_DELAY_TIMER_IRQ_EN BIT(0)
#define SIPA_PCIE_MEM_OFFSET 0x200000000ULL
enum sipa_nic_status_e {
NIC_OPEN,
NIC_CLOSE
};
#define SIPA_RECV_EVT (SIPA_IRQ_INTR_BIT | SIPA_IRQ_THRESHOLD | \
SIPA_IRQ_DELAY_TIMER | SIPA_IRQ_TX_FIFO_THRESHOLD_SW)
#define SIPA_RECV_WARN_EVT (SIPA_IRQ_TXFIFO_FULL_INT | SIPA_IRQ_TXFIFO_OVERFLOW)
#define SMSG_FLG_DELE_REQUEST 0x1
#define SMSG_FLG_DELE_RELEASE 0x2
typedef void (*sipa_irq_notify_cb)(void *priv,
enum sipa_irq_evt_type evt,
u32 data);
struct sipa_node_description_tag {
/*soft need to set*/
u64 address : 40;
/*soft need to set*/
u32 length : 20;
/*soft need to set*/
u16 offset : 12;
/*soft need to set*/
u8 net_id;
/*soft need to set*/
u8 src : 5;
/*soft need to set*/
u8 dst : 5;
u8 prio : 3;
u8 bear_id : 7;
/*soft need to set*/
u8 intr : 1;
/*soft need to set*/
u8 indx : 1;
u8 err_code : 4;
u32 reserved : 22;
} __attribute__((__packed__));
struct sipa_cmn_fifo_params {
u32 tx_intr_delay_us;
u32 tx_intr_threshold;
bool flowctrl_in_tx_full;
u32 flow_ctrl_cfg;
u32 flow_ctrl_irq_mode;
u32 tx_enter_flowctrl_watermark;
u32 tx_leave_flowctrl_watermark;
u32 rx_enter_flowctrl_watermark;
u32 rx_leave_flowctrl_watermark;
u32 data_ptr_cnt;
u32 buf_size;
dma_addr_t data_ptr;
};
struct sipa_skb_dma_addr_node {
struct sk_buff *skb;
u64 dma_addr;
struct list_head list;
};
struct sipa_cmn_fifo_tag {
u32 depth;
u32 wr;
u32 rd;
u32 fifo_base_addr_l;
u32 fifo_base_addr_h;
void *virtual_addr;
dma_addr_t dma_ptr;
};
struct sipa_cmn_fifo_cfg_tag {
const char *fifo_name;
void *priv;
enum sipa_cmn_fifo_index fifo_id;
bool state;
u32 dst;
u32 cur;
void __iomem *fifo_reg_base;
struct sipa_cmn_fifo_tag rx_fifo;
struct sipa_cmn_fifo_tag tx_fifo;
u32 enter_flow_ctrl_cnt;
u32 exit_flow_ctrl_cnt;
sipa_irq_notify_cb irq_cb;
};
struct sipa_endpoint {
/* Centered on CPU/PAM */
struct sipa_cmn_fifo_cfg_tag *send_fifo;
struct sipa_cmn_fifo_cfg_tag *recv_fifo;
struct sipa_cmn_fifo_params send_fifo_param;
struct sipa_cmn_fifo_params recv_fifo_param;
bool inited;
bool connected;
bool suspended;
};
struct sipa_nic {
enum sipa_nic_id nic_id;
struct sipa_endpoint *send_ep;
struct sk_buff_head rx_skb_q;
int need_notify;
u32 src_mask;
int netid;
struct list_head list;
sipa_notify_cb cb;
void *cb_priv;
atomic_t status;
bool flow_ctrl_status;
bool continue_notify;
bool rm_flow_ctrl;
};
struct sipa_skb_array {
struct sipa_skb_dma_addr_node *array;
u32 rp;
u32 wp;
u32 depth;
};
struct sipa_skb_sender {
struct device *dev;
struct sipa_endpoint *ep;
atomic_t left_cnt;
/* To be used for add/remove nic device */
spinlock_t nic_lock;
/* To be used for send skb process */
spinlock_t send_lock;
spinlock_t exit_lock;
struct list_head nic_list;
struct list_head sending_list;
struct list_head pair_free_list;
struct sipa_skb_dma_addr_node *pair_cache;
bool free_notify_net;
bool ep_cover_net;
bool send_notify_net;
wait_queue_head_t free_waitq;
struct task_struct *free_thread;
struct task_struct *send_thread;
bool init_flag;
u32 no_mem_cnt;
u32 no_free_cnt;
u32 enter_flow_ctrl_cnt;
u32 exit_flow_ctrl_cnt;
u32 run;
};
struct sipa_skb_receiver {
struct sipa_endpoint *ep;
u32 rsvd;
struct sipa_skb_array recv_array;
wait_queue_head_t recv_waitq;
wait_queue_head_t fill_recv_waitq;
spinlock_t lock;
spinlock_t exit_lock;
u32 nic_cnt;
atomic_t need_fill_cnt;
struct sipa_nic *nic_array[SIPA_NIC_MAX];
struct task_struct *fill_thread;
u32 tx_danger_cnt;
u32 rx_danger_cnt;
u32 run;
};
struct sipa_fifo_hal_ops {
int (*open)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base, void *cookie);
int (*close)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base);
int (*set_rx_depth)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base, u32 depth);
int (*set_tx_depth)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base, u32 depth);
u32 (*get_rx_depth)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base);
int (*hal_set_tx_depth)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base,
u32 depth);
u32 (*get_tx_depth)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base);
int (*set_intr_drop_packet)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base,
u32 enable, sipa_irq_notify_cb cb);
int (*set_intr_error_code)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base,
u32 enable, sipa_irq_notify_cb cb);
int (*set_intr_timeout)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base,
u32 enable, u32 time, sipa_irq_notify_cb cb);
int (*set_hw_intr_timeout)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base,
u32 enable, u32 time, sipa_irq_notify_cb cb);
int (*set_intr_threshold)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base,
u32 enable, u32 cnt, sipa_irq_notify_cb cb);
int (*set_hw_intr_thres)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base,
u32 enable, u32 cnt, sipa_irq_notify_cb cb);
int (*set_src_dst_term)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base,
u32 src, u32 dst);
int (*enable_local_flowctrl_intr)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *
cfg_base, u32 enable, u32 irq_mode,
sipa_irq_notify_cb cb);
int (*enable_remote_flowctrl_intr)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *
cfg_base, u32 work_mode,
u32 tx_entry_watermark,
u32 tx_exit_watermark,
u32 rx_entry_watermark,
u32 rx_exit_watermark);
int (*set_interrupt_intr)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base,
u32 enable, sipa_irq_notify_cb cb);
int (*set_intr_txfifo_overflow)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base,
u32 enable, sipa_irq_notify_cb cb);
int (*set_intr_txfifo_full)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base,
u32 enable, sipa_irq_notify_cb cb);
int (*put_node_to_rx_fifo)(struct device *dev,
enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base,
struct sipa_node_description_tag *node,
u32 force_intr, u32 num);
u32 (*get_left_cnt)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base);
u32 (*recv_node_from_tx_fifo)(struct device *dev,
enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base,
u32 num);
void (*get_rx_ptr)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base,
u32 *wr, u32 *rd);
void (*get_tx_ptr)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base,
u32 *wr, u32 *rd);
void (*get_filled_depth)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base,
u32 *rx_filled, u32 *tx_filled);
u32 (*get_tx_full_status)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base);
u32 (*get_tx_empty_status)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base);
u32 (*get_rx_full_status)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base);
u32 (*get_rx_empty_status)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base);
bool (*set_rx_fifo_wptr)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base,
u32 wptr);
bool (*set_tx_fifo_wptr)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base,
u32 wptr);
int (*set_rx_tx_fifo_ptr)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base,
u32 rx_rd, u32 rx_wr, u32 tx_rd, u32 tx_wr);
int (*ctrl_receive)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base,
bool stop);
struct sipa_node_description_tag *
(*get_tx_fifo_rp)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base,
u32 index);
struct sipa_node_description_tag *
(*get_rx_fifo_wr)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base,
u32 index);
int (*set_tx_fifo_rp)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base,
u32 tx_rd);
int (*set_rx_fifo_wr)(struct device *dev, enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base,
u32 num);
int (*set_intr_eb)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base,
bool eb, u32 type);
void (*clr_tout_th_intr)(enum sipa_cmn_fifo_index id,
struct sipa_cmn_fifo_cfg_tag *cfg_base);
};
struct sipa_core {
const char *name;
struct device *dev;
struct device *pci_dev;
struct dentry *dentry;
struct sipa_endpoint ep;
struct sipa_cmn_fifo_cfg_tag cmn_fifo_cfg[SIPA_FIFO_MAX];
struct work_struct flow_ctrl_work;
/* ipa low power*/
bool remote_ready;
struct resource *reg_res;
phys_addr_t reg_mapped;
void __iomem *virt_reg_addr;
/* IPA NIC interface */
struct sipa_nic *nic[SIPA_NIC_MAX];
/* sender & receiver */
struct sipa_skb_sender *sender;
struct sipa_skb_receiver *receiver;
atomic_t recv_cnt;
u64 pcie_mem_offset;
struct sipa_fifo_hal_ops hal_ops;
struct task_struct *smsg_thread;
struct dentry *debugfs_root;
const void *debugfs_data;
};
void sipa_fifo_ops_init(struct sipa_fifo_hal_ops *ops);
struct sipa_core *sipa_get_ctrl_pointer(void);
void sipa_receiver_add_nic(struct sipa_skb_receiver *receiver,
struct sipa_nic *nic);
void sipa_receiver_open_cmn_fifo(struct sipa_skb_receiver *receiver);
void sipa_sender_open_cmn_fifo(struct sipa_skb_sender *sender);
int create_sipa_skb_sender(struct sipa_endpoint *ep,
struct sipa_skb_sender **sender_pp);
void destroy_sipa_skb_sender(struct sipa_skb_sender *sender);
void sipa_skb_sender_add_nic(struct sipa_skb_sender *sender,
struct sipa_nic *nic);
void sipa_skb_sender_remove_nic(struct sipa_skb_sender *sender,
struct sipa_nic *nic);
int sipa_skb_sender_send_data(struct sipa_skb_sender *sender,
struct sk_buff *skb,
enum sipa_term_type dst,
u8 netid);
int create_sipa_skb_receiver(struct sipa_endpoint *ep,
struct sipa_skb_receiver **receiver_pp);
void sipa_nic_notify_evt(struct sipa_nic *nic, enum sipa_evt_type evt);
void sipa_nic_try_notify_recv(struct sipa_nic *nic);
void sipa_nic_push_skb(struct sipa_nic *nic, struct sk_buff *skb);
void sipa_nic_check_flow_ctrl(void);
int sipa_create_smsg_channel(struct sipa_core *ipa);
int sipa_init_debugfs(struct sipa_core *ipa);
int sipa_int_callback_func(int evt, void *cookie);
#if defined (__BIG_ENDIAN_BITFIELD)
static inline int sipa_get_node_desc(u8 *node_addr,
struct sipa_node_description_tag *node)
{
if (!node_addr || !node)
return -EINVAL;
node->address = node_addr[0] + ((u32)node_addr[1] << 8) +
((u32)node_addr[2] << 16) + ((u32)node_addr[3] << 24) +
((u64)node_addr[4] << 32);
#if 0
node->length = node_addr[5] + ((u32)node_addr[6] << 8) +
((u32)(node_addr[7] & 0xf) << 16);
node->offset = ((node_addr[7] & 0xf0) >> 4) +
((u16)node_addr[8] << 4);
#endif
node->net_id = node_addr[9];
node->src = node_addr[10] & 0x1f;
#if 0
node->dst = ((node_addr[11] & 0x3) << 3) +
((node_addr[10] & 0xe0) >> 5);
#endif
node->err_code = ((node_addr[12] & 0xc0) >> 6) +
((node_addr[13] & 0x03) << 2);
#if 0
node->prio = (node_addr[11] & 0x1c) >> 2;
node->bear_id = ((node_addr[11] & 0xe0) >> 5) +
((node_addr[12] & 0xf) << 3);
node->intr = !!(node_addr[12] & BIT(4));
node->indx = !!(node_addr[12] & BIT(5));
node->reserved = ((node_addr[13] & 0xfc) >> 2) +
((u32)node_addr[14] << 6) + ((u32)node_addr[15] << 14);
#endif
smp_rmb();
return 0;
}
static inline int sipa_set_node_desc(u8 *dst_addr, u8 *src_addr)
{
if (!dst_addr || !src_addr)
return -EINVAL;
/* address */
dst_addr[0] = src_addr[4];
dst_addr[1] = src_addr[3];
dst_addr[2] = src_addr[2];
dst_addr[3] = src_addr[1];
dst_addr[4] = src_addr[0];
/* length */
dst_addr[5] = (src_addr[7] >> 4) + ((src_addr[6] & 0x0f) << 4);
dst_addr[6] = (src_addr[6] >> 4) + ((src_addr[5] & 0x0f) << 4);
dst_addr[7] = src_addr[5] >> 4;
/* offset */
dst_addr[7] += ((src_addr[8] & 0x0f) << 4);
dst_addr[8] = (src_addr[7] << 4) + (src_addr[8] >> 4);
/* netid */
dst_addr[9] = src_addr[9];
/* src */
dst_addr[10] = ((src_addr[10] & 0xf8) >> 3);
/* dst */
dst_addr[10] +=
((src_addr[11] >> 6) + ((src_addr[10] & 0x01) << 2)) << 5;
dst_addr[11] = (src_addr[10] & 0x6) >> 1;
/* prio */
dst_addr[11] += ((src_addr[11] & 0x38) >> 1);
/* bear_id */
dst_addr[11] += ((src_addr[12] & 0x70) << 1);
dst_addr[12] = ((src_addr[11] & 0x7) << 1) + (src_addr[12] >> 7);
/* intx */
dst_addr[12] += ((src_addr[12] & 0x8) << 1);
/* indx */
dst_addr[12] += ((src_addr[12] & 0x4) << 3);
/* err code */
dst_addr[12] += (src_addr[13] & 0xc0);
dst_addr[13] = src_addr[12] & 0x3;
/* reserved */
dst_addr[13] += src_addr[15] << 2;
dst_addr[14] = (src_addr[15] & 0x3) + (src_addr[14] << 2);
dst_addr[15] = ((src_addr[13] & 0x3f) << 2) +
((src_addr[14] & 0xc0) >> 6);
smp_wmb();
return 0;
}
#endif
#endif

View File

@@ -0,0 +1,590 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/regmap.h>
#include <linux/dma-mapping.h>
#include <uapi/linux/swab.h>
#include "../include/sipa.h"
#include "sipa_core.h"
static u32 debug_cmd[5], data_buf[5];
static struct sipa_node_description_tag ipa_node;
static int sipa_params_debug_show(struct seq_file *s, void *unused)
{
int i;
u32 tmp;
struct sipa_core *ipa = (struct sipa_core *)s->private;
struct sipa_cmn_fifo_cfg_tag *fifo_cfg;
seq_printf(s, "dma_mask = 0x%llx coherent_dma_mask = 0x%llx\n",
(u64)*ipa->pci_dev->dma_mask, (u64)ipa->pci_dev->coherent_dma_mask);
seq_printf(s, "remote ready = %d reg_mapped = 0x%llx virt_reg_addr = 0x%p\n",
ipa->remote_ready, (long long unsigned int)ipa->reg_mapped, ipa->virt_reg_addr);
seq_printf(s, "ipa reg start = 0x%llx size = 0x%llx pcie_mem_offset = %llx\n",
(long long unsigned int)ipa->reg_res->start, (long long unsigned int)resource_size(ipa->reg_res),
(long long unsigned int)ipa->pcie_mem_offset);
for (i = 0; i < SIPA_NIC_MAX; i++) {
if (!ipa->nic[i])
continue;
seq_printf(s, "open = %d src_mask = 0x%x netid = %d flow_ctrl_status = %d",
atomic_read(&ipa->nic[i]->status), ipa->nic[i]->src_mask,
ipa->nic[i]->netid, ipa->nic[i]->flow_ctrl_status);
seq_printf(s, " qlen = %d need_notify = %d continue_notify = %d\n",
ipa->nic[i]->rx_skb_q.qlen, ipa->nic[i]->need_notify,
ipa->nic[i]->continue_notify);
}
seq_printf(s, "sender no_mem_cnt = %d no_free_cnt = %d left_cnt = %d\n",
ipa->sender->no_mem_cnt, ipa->sender->no_free_cnt,
atomic_read(&ipa->sender->left_cnt));
seq_printf(s, "sender enter_flow_ctrl_cnt=%d, exit_flow_ctrl_cnt=%d, free_notify_net=%d, ep_cover_net=%d\n",
ipa->sender->enter_flow_ctrl_cnt, ipa->sender->exit_flow_ctrl_cnt,
ipa->sender->free_notify_net, ipa->sender->ep_cover_net);
seq_printf(s, "receiver need_fill_cnt = %d",
atomic_read(&ipa->receiver->need_fill_cnt));
seq_printf(s, " tx_danger_cnt = %d rx_danger_cnt = %d\n",
ipa->receiver->tx_danger_cnt, ipa->receiver->rx_danger_cnt);
fifo_cfg = &ipa->cmn_fifo_cfg[SIPA_FIFO_PCIE_DL];
seq_printf(s, "[PCIE_DL]state = %d fifo_reg_base = %p\n",
fifo_cfg->state, fifo_cfg->fifo_reg_base);
seq_printf(s, "[PCIE_DL]rx fifo depth = 0x%x wr = 0x%x rd = 0x%x\n",
fifo_cfg->rx_fifo.depth,
fifo_cfg->rx_fifo.wr,
fifo_cfg->rx_fifo.rd);
seq_printf(s, "[PCIE_DL]rx_fifo fifo_addrl = 0x%x fifo_addrh = 0x%x\n",
fifo_cfg->rx_fifo.fifo_base_addr_l,
fifo_cfg->rx_fifo.fifo_base_addr_h);
seq_printf(s, "[PCIE_DL]rx fifo virt addr = %p\n",
fifo_cfg->rx_fifo.virtual_addr);
seq_printf(s, "[PCIE_DL]tx fifo depth = 0x%x wr = 0x%x rd = 0x%x\n",
fifo_cfg->tx_fifo.depth, fifo_cfg->tx_fifo.wr,
fifo_cfg->tx_fifo.rd);
seq_printf(s, "[PCIE_DL]tx_fifo fifo_addrl = 0x%x fifo_addrh = 0x%x\n",
fifo_cfg->tx_fifo.fifo_base_addr_l,
fifo_cfg->tx_fifo.fifo_base_addr_h);
seq_printf(s, "[PCIE_DL]tx fifo virt addr = %p\n",
fifo_cfg->tx_fifo.virtual_addr);
fifo_cfg = &ipa->cmn_fifo_cfg[SIPA_FIFO_PCIE_UL];
seq_printf(s, "[PCIE_UL]state = %d fifo_reg_base = %p\n",
fifo_cfg->state, fifo_cfg->fifo_reg_base);
seq_printf(s, "[PCIE_UL]rx fifo depth = 0x%x wr = 0x%x rd = 0x%x\n",
fifo_cfg->rx_fifo.depth,
fifo_cfg->rx_fifo.wr,
fifo_cfg->rx_fifo.rd);
seq_printf(s, "[PCIE_UL]rx_fifo fifo_addrl = 0x%x fifo_addrh = 0x%x\n",
fifo_cfg->rx_fifo.fifo_base_addr_l,
fifo_cfg->rx_fifo.fifo_base_addr_h);
seq_printf(s, "[PCIE_UL]rx fifo virt addr = %p\n",
fifo_cfg->rx_fifo.virtual_addr);
seq_printf(s, "[PCIE_UL]tx fifo depth = 0x%x wr = 0x%x rd = 0x%x\n",
fifo_cfg->tx_fifo.depth, fifo_cfg->tx_fifo.wr,
fifo_cfg->tx_fifo.rd);
seq_printf(s, "[PCIE_UL]tx_fifo fifo_addrl = 0x%x fifo_addrh = 0x%x\n",
fifo_cfg->tx_fifo.fifo_base_addr_l,
fifo_cfg->tx_fifo.fifo_base_addr_h);
seq_printf(s, "[PCIE_UL]tx fifo virt addr = %p\n",
fifo_cfg->tx_fifo.virtual_addr);
//ep: IPA_COMMON_TX_FIFO_DEPTH 0x0Cl
tmp = readl_relaxed(ipa->virt_reg_addr + 0xc00 + 0x0C);
seq_printf(s, "neil: read IPA_COMMON_TX_FIFO_DEPTH, value = %x\n", (tmp >> 16));
//ep: IPA_COMMON_TX_FIFO_WR 0x10l
tmp = readl_relaxed(ipa->virt_reg_addr + 0xc00 + 0x10);
seq_printf(s, "neil: read IPA_COMMON_TX_FIFO_WR, value = %x\n", (tmp >> 16));
//ep: IPA_COMMON_TX_FIFO_RD 0x14l
tmp = readl_relaxed(ipa->virt_reg_addr + 0xc00 + 0x14);
seq_printf(s, "neil: read IPA_COMMON_TX_FIFO_RD, value = %x\n", (tmp >> 16));
return 0;
}
static int sipa_params_debug_open(struct inode *inode,
struct file *file)
{
return single_open(file, sipa_params_debug_show,
inode->i_private);
}
static ssize_t sipa_endian_debug_write(struct file *f, const char __user *buf,
size_t size, loff_t *l)
{
ssize_t len;
u32 debug_cmd[24], data_buf[24];
len = min(size, sizeof(data_buf) - 1);
if (copy_from_user((char *)data_buf, buf, len))
return -EFAULT;
len = sscanf((char *)data_buf, "%x %x %x %x %x %x %x %x %x %x %x %x\n",
&debug_cmd[0], &debug_cmd[1], &debug_cmd[2],
&debug_cmd[3], &debug_cmd[4], &debug_cmd[5],
&debug_cmd[6], &debug_cmd[7], &debug_cmd[8],
&debug_cmd[9], &debug_cmd[10], &debug_cmd[11]);
ipa_node.address = debug_cmd[0];
ipa_node.length = debug_cmd[1];
ipa_node.offset = debug_cmd[2];
ipa_node.net_id = debug_cmd[3];
ipa_node.src = debug_cmd[4];
ipa_node.dst = debug_cmd[5];
ipa_node.prio = debug_cmd[6];
ipa_node.bear_id = debug_cmd[7];
ipa_node.intr = debug_cmd[8];
ipa_node.indx = debug_cmd[9];
ipa_node.err_code = debug_cmd[10];
ipa_node.reserved = debug_cmd[11];
return size;
}
static int sipa_endian_debug_show(struct seq_file *s, void *unused)
{
int i;
u8 *byte;
seq_printf(s, "address = 0x%llx length = 0x%x offset = 0x%x net_id = 0x%x\n",
(u64)ipa_node.address, ipa_node.length, ipa_node.offset,
ipa_node.net_id);
seq_printf(s, "src = 0x%x dst = 0x%x prio = 0x%x bear_id = 0x%x\n",
ipa_node.src, ipa_node.dst, ipa_node.prio, ipa_node.bear_id);
seq_printf(s, "intr = 0x%x indx = 0x%x err_code = 0x%x reserved = 0x%x\n",
ipa_node.intr, ipa_node.indx,
ipa_node.err_code, ipa_node.reserved);
byte = (u8 *)&ipa_node;
for (i = 0; i < sizeof(ipa_node); i++)
seq_printf(s, "0x%x ", *(byte + i));
seq_puts(s, "\n");
return 0;
}
static const struct file_operations sipa_params_fops = {
.open = sipa_params_debug_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int sipa_endian_debug_open(struct inode *inode,
struct file *file)
{
return single_open(file, sipa_endian_debug_show,
inode->i_private);
}
static const struct file_operations sipa_endian_fops = {
.open = sipa_endian_debug_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
.write = sipa_endian_debug_write,
};
static ssize_t sipa_get_node_debug_write(struct file *f, const char __user *buf,
size_t size, loff_t *l)
{
int i;
ssize_t len;
u8 debug_cmd[16], data_buf[128];
len = min(size, sizeof(data_buf) - 1);
if (copy_from_user((char *)data_buf, buf, len))
return -EFAULT;
len = sscanf((char *)data_buf, "%4hhx %4hhx %4hhx %4hhx %4hhx %4hhx %4hhx %4hhx %4hhx %4hhx %4hhx %4hhx %4hhx %4hhx %4hhx %4hhx\n",
&debug_cmd[0], &debug_cmd[1], &debug_cmd[2],
&debug_cmd[3], &debug_cmd[4], &debug_cmd[5],
&debug_cmd[6], &debug_cmd[7], &debug_cmd[8],
&debug_cmd[9], &debug_cmd[10], &debug_cmd[11],
&debug_cmd[12], &debug_cmd[13], &debug_cmd[14],
&debug_cmd[15]);
for (i = 0; i < 16; i++)
pr_err("0x%x ", debug_cmd[i]);
pr_err("\n");
#if defined (__BIG_ENDIAN_BITFIELD)
sipa_get_node_desc(debug_cmd, &ipa_node);
#else
ipa_node.address = debug_cmd[4] + ((u32)debug_cmd[3] << 8) +
((u32)debug_cmd[2] << 16) + ((u32)debug_cmd[1] << 24) +
((u64)debug_cmd[0] << 32);
ipa_node.net_id = debug_cmd[9];
ipa_node.src = debug_cmd[10] & 0x1f;
ipa_node.err_code = ((debug_cmd[13] & 0xc0) >> 6) +
((debug_cmd[12] & 0x03) << 2);
#endif
return size;
}
static int sipa_get_node_debug_show(struct seq_file *s, void *unused)
{
int i;
u8 *byte;
seq_printf(s, "address = 0x%llx length = 0x%x offset = 0x%x net_id = 0x%x\n",
(u64)ipa_node.address, ipa_node.length, ipa_node.offset,
ipa_node.net_id);
seq_printf(s, "src = 0x%x dst = 0x%x prio = 0x%x bear_id = 0x%x\n",
ipa_node.src, ipa_node.dst, ipa_node.prio, ipa_node.bear_id);
seq_printf(s, "intr = 0x%x indx = 0x%x err_code = 0x%x reserved = 0x%x\n",
ipa_node.intr, ipa_node.indx,
ipa_node.err_code, ipa_node.reserved);
byte = (u8 *)&ipa_node;
for (i = 0; i < sizeof(ipa_node); i++)
seq_printf(s, "0x%x ", *(byte + i));
seq_puts(s, "\n");
return 0;
}
static int sipa_get_node_debug_open(struct inode *inode,
struct file *file)
{
return single_open(file, sipa_get_node_debug_show,
inode->i_private);
}
static const struct file_operations sipa_get_node_fops = {
.open = sipa_get_node_debug_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
.write = sipa_get_node_debug_write,
};
static ssize_t sipa_set_node_debug_write(struct file *f, const char __user *buf,
size_t size, loff_t *l)
{
ssize_t len;
u32 debug_cmd[24], data_buf[24];
len = min(size, sizeof(data_buf) - 1);
if (copy_from_user((char *)data_buf, buf, len))
return -EFAULT;
len = sscanf((char *)data_buf, "%x %x %x %x %x %x %x %x %x %x %x %x\n",
&debug_cmd[0], &debug_cmd[1], &debug_cmd[2],
&debug_cmd[3], &debug_cmd[4], &debug_cmd[5],
&debug_cmd[6], &debug_cmd[7], &debug_cmd[8],
&debug_cmd[9], &debug_cmd[10], &debug_cmd[11]);
ipa_node.address = debug_cmd[0];
ipa_node.length = debug_cmd[1];
ipa_node.offset = debug_cmd[2];
ipa_node.net_id = debug_cmd[3];
ipa_node.src = debug_cmd[4];
ipa_node.dst = debug_cmd[5];
ipa_node.prio = debug_cmd[6];
ipa_node.bear_id = debug_cmd[7];
ipa_node.intr = debug_cmd[8];
ipa_node.indx = debug_cmd[9];
ipa_node.err_code = debug_cmd[10];
ipa_node.reserved = debug_cmd[11];
return size;
}
static int sipa_set_node_debug_show(struct seq_file *s, void *unused)
{
#if defined (__BIG_ENDIAN_BITFIELD)
int i;
u8 node_buf[16];
#endif
seq_printf(s, "address = 0x%llx length = 0x%x offset = 0x%x net_id = 0x%x\n",
(u64)ipa_node.address, ipa_node.length, ipa_node.offset,
ipa_node.net_id);
seq_printf(s, "src = 0x%x dst = 0x%x prio = 0x%x bear_id = 0x%x\n",
ipa_node.src, ipa_node.dst, ipa_node.prio, ipa_node.bear_id);
seq_printf(s, "intr = 0x%x indx = 0x%x err_code = 0x%x reserved = 0x%x\n",
ipa_node.intr, ipa_node.indx,
ipa_node.err_code, ipa_node.reserved);
#if defined (__BIG_ENDIAN_BITFIELD)
sipa_set_node_desc(node_buf, (u8 *)&ipa_node);
for (i = 0; i < sizeof(node_buf); i++)
seq_printf(s, "0x%x ", node_buf[i]);
#endif
seq_puts(s, "\n");
return 0;
}
static int sipa_set_node_debug_open(struct inode *inode,
struct file *file)
{
return single_open(file, sipa_set_node_debug_show,
inode->i_private);
}
static const struct file_operations sipa_set_node_fops = {
.open = sipa_set_node_debug_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
.write = sipa_set_node_debug_write,
};
static ssize_t sipa_reg_debug_write(struct file *f, const char __user *buf,
size_t size, loff_t *l)
{
ssize_t len;
struct sipa_core *ipa = f->f_inode->i_private;
len = min(size, sizeof(data_buf) - 1);
if (copy_from_user((char *)data_buf, buf, len))
return -EFAULT;
len = sscanf((char *)data_buf, "%x %x %x %x %x\n",
&debug_cmd[0], &debug_cmd[1], &debug_cmd[2],
&debug_cmd[3], &debug_cmd[4]);
if (debug_cmd[2])
writel_relaxed(debug_cmd[1], ipa->virt_reg_addr + debug_cmd[0]);
return size;
}
static int sipa_reg_debug_show(struct seq_file *s, void *unused)
{
u32 tx_filled, rx_filled;
u32 tx_wr, tx_rd, rx_wr, rx_rd;
struct sipa_core *ipa = (struct sipa_core *)s->private;
seq_printf(s, "0x%x\n",
readl_relaxed(ipa->virt_reg_addr + debug_cmd[0]));
seq_printf(s, "pcie dl tx fifo empty = %d full = %d rx fifo empty = %d full = %d\n",
ipa->hal_ops.get_tx_empty_status(SIPA_FIFO_PCIE_DL,
ipa->cmn_fifo_cfg),
ipa->hal_ops.get_tx_full_status(SIPA_FIFO_PCIE_DL,
ipa->cmn_fifo_cfg),
ipa->hal_ops.get_rx_empty_status(SIPA_FIFO_PCIE_DL,
ipa->cmn_fifo_cfg),
ipa->hal_ops.get_rx_full_status(SIPA_FIFO_PCIE_DL,
ipa->cmn_fifo_cfg));
seq_printf(s, "pcie ul tx fifo empty = %d full = %d rx fifo empty = %d full = %d\n",
ipa->hal_ops.get_tx_empty_status(SIPA_FIFO_PCIE_UL,
ipa->cmn_fifo_cfg),
ipa->hal_ops.get_tx_full_status(SIPA_FIFO_PCIE_UL,
ipa->cmn_fifo_cfg),
ipa->hal_ops.get_rx_empty_status(SIPA_FIFO_PCIE_UL,
ipa->cmn_fifo_cfg),
ipa->hal_ops.get_rx_full_status(SIPA_FIFO_PCIE_UL,
ipa->cmn_fifo_cfg));
ipa->hal_ops.get_filled_depth(SIPA_FIFO_PCIE_DL, ipa->cmn_fifo_cfg,
&rx_filled, &tx_filled);
seq_printf(s, "pcie dl tx filled = 0x%x rx filled = 0x%x\n",
tx_filled, rx_filled);
ipa->hal_ops.get_filled_depth(SIPA_FIFO_PCIE_UL, ipa->cmn_fifo_cfg,
&rx_filled, &tx_filled);
seq_printf(s, "pcie ul tx filled = 0x%x rx filled = 0x%x\n",
tx_filled, rx_filled);
ipa->hal_ops.get_rx_ptr(SIPA_FIFO_PCIE_UL, ipa->cmn_fifo_cfg, &rx_wr, &rx_rd);
ipa->hal_ops.get_tx_ptr(SIPA_FIFO_PCIE_UL, ipa->cmn_fifo_cfg, &tx_wr, &tx_rd);
seq_printf(s, "pcie ul rx_wr = 0x%x, rx_rd = 0x%x, tx_wr = 0x%x, tx_rd = 0x%x\n",
rx_wr, rx_rd, tx_wr, tx_rd);
ipa->hal_ops.get_rx_ptr(SIPA_FIFO_PCIE_DL, ipa->cmn_fifo_cfg, &rx_wr, &rx_rd);
ipa->hal_ops.get_tx_ptr(SIPA_FIFO_PCIE_DL, ipa->cmn_fifo_cfg, &tx_wr, &tx_rd);
seq_printf(s, "pcie dl rx_wr = 0x%x, rx_rd = 0x%x, tx_wr = 0x%x, tx_rd = 0x%x\n",
rx_wr, rx_rd, tx_wr, tx_rd);
sipa_int_callback_func(0, NULL);
return 0;
}
static int sipa_reg_debug_open(struct inode *inode,
struct file *file)
{
return single_open(file, sipa_reg_debug_show,
inode->i_private);
}
static const struct file_operations sipa_reg_debug_fops = {
.open = sipa_reg_debug_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
.write = sipa_reg_debug_write,
};
static int sipa_send_test_show(struct seq_file *s, void *unused)
{
struct sk_buff *skb = NULL;
struct sipa_core *ipa = (struct sipa_core *)s->private;
if (!skb) {
skb = __dev_alloc_skb(256, GFP_KERNEL | GFP_NOWAIT);
if (!skb) {
dev_err(ipa->dev, "failed to alloc skb!\n");
return 0;
}
skb_put(skb, 128);
memset(skb->data, 0xE7, skb->len);
sipa_skb_sender_send_data(ipa->sender, skb, 0x19, 0);
}
return 0;
}
static int sipa_send_test_open(struct inode *inode, struct file *file)
{
return single_open(file, sipa_send_test_show, inode->i_private);
}
static const struct file_operations sipa_send_test_fops = {
.open = sipa_send_test_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static ssize_t sipa_nic_debug_write(struct file *f, const char __user *buf,
size_t size, loff_t *l)
{
ssize_t len;
u8 debug_cmd[24], data_buf[24];
len = min(size, sizeof(data_buf) - 1);
if (copy_from_user((char *)data_buf, buf, len))
return -EFAULT;
len = sscanf((char *)data_buf, "%4hhx %4hhx\n",
&debug_cmd[0], &debug_cmd[1]);
if (debug_cmd[1])
sipa_nic_open(debug_cmd[0], 0, NULL, NULL);
else
sipa_nic_close(debug_cmd[0]);
return size;
}
static int sipa_nic_debug_show(struct seq_file *s, void *unused)
{
//struct sk_buff *skb = NULL;
struct sipa_core *ipa = (struct sipa_core *)s->private;
struct sipa_cmn_fifo_cfg_tag *pcie_dl = &ipa->cmn_fifo_cfg[SIPA_FIFO_PCIE_DL];
//struct sipa_cmn_fifo_cfg_tag *pcie_ul = &ipa->cmn_fifo_cfg[SIPA_FIFO_PCIE_UL];
//struct sipa_cmn_fifo_tag *dl_tx_fifo = &pcie_dl->tx_fifo;
struct sipa_cmn_fifo_tag *dl_rx_fifo = &pcie_dl->rx_fifo;
//struct sipa_cmn_fifo_tag *ul_tx_fifo = &pcie_ul->tx_fifo;
//struct sipa_cmn_fifo_tag *ul_rx_fifo = &pcie_ul->rx_fifo;
struct sipa_node_description_tag *node;
int i = 0;
pr_info("dl rx_fifo addr: 0x%lx wp-%d rp-%d\n", (long unsigned int)dl_rx_fifo->virtual_addr,
dl_rx_fifo->wr, dl_rx_fifo->rd);
node = (struct sipa_node_description_tag *)dl_rx_fifo->virtual_addr;
for (i = 0; i < dl_rx_fifo->depth; i++, node++) {
pr_info("node addr 0x%lx\n", (long unsigned int)node);
pr_info("node info i-%d, addr-0x%llx len-%u off-%u netid-%u src-%u dst-%u pro-%u bearid-%u intr-%u indx-%u err-%u resd-%u\n",
i, (long long unsigned int)node->address, node->length, node->offset, node->net_id,
node->src, node->dst, node->prio, node->bear_id, node->intr,
node->indx, node->err_code, node->reserved);
}
return 0;
}
static int sipa_nic_debug_open(struct inode *inode, struct file *file)
{
return single_open(file, sipa_nic_debug_show, inode->i_private);
}
static const struct file_operations sipa_nic_debug_fops = {
.open = sipa_nic_debug_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
.write = sipa_nic_debug_write,
};
int sipa_init_debugfs(struct sipa_core *ipa)
{
struct dentry *root;
struct dentry *file;
root = debugfs_create_dir(dev_name(ipa->dev), NULL);
if (!root) {
dev_err(ipa->dev, "sipa create debugfs fail\n");
return -ENOMEM;
}
file = debugfs_create_file("params", 0444, root, ipa,
&sipa_params_fops);
if (!file) {
dev_err(ipa->dev, "sipa create params file debugfs fail\n");
debugfs_remove_recursive(root);
return -ENOMEM;
}
file = debugfs_create_file("endian", 0444, root, ipa,
&sipa_endian_fops);
if (!file) {
dev_err(ipa->dev, "sipa create endian file debugfs fail\n");
debugfs_remove_recursive(root);
return -ENOMEM;
}
file = debugfs_create_file("get_node", 0444, root, ipa,
&sipa_get_node_fops);
if (!file) {
dev_err(ipa->dev, "sipa create endian file debugfs fail\n");
debugfs_remove_recursive(root);
return -ENOMEM;
}
file = debugfs_create_file("set_node", 0444, root, ipa,
&sipa_set_node_fops);
if (!file) {
dev_err(ipa->dev, "sipa create set node file debugfs fail\n");
debugfs_remove_recursive(root);
return -ENOMEM;
}
file = debugfs_create_file("reg", 0444, root, ipa,
&sipa_reg_debug_fops);
if (!file) {
dev_err(ipa->dev, "sipa create reg debug file debugfs fail\n");
debugfs_remove_recursive(root);
return -ENOMEM;
}
file = debugfs_create_file("send_test", 0444, root, ipa,
&sipa_send_test_fops);
if (!file) {
dev_err(ipa->dev, "sipa create send_test debug file debugfs fail\n");
debugfs_remove_recursive(root);
return -ENOMEM;
}
file = debugfs_create_file("nic", 0444, root, ipa,
&sipa_nic_debug_fops);
if (!file) {
dev_err(ipa->dev, "sipa create nic debug file debugfs fail\n");
debugfs_remove_recursive(root);
return -ENOMEM;
}
ipa->dentry = root;
return 0;
}
EXPORT_SYMBOL(sipa_init_debugfs);

View File

@@ -0,0 +1,156 @@
/*
* Copyright (C) 2018-2019 Unisoc Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <linux/device.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/kthread.h>
#include <linux/delay.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/completion.h>
#include "../include/sipa.h"
#include "../include/sipc.h"
#include "../include/sprd_pcie_ep_device.h"
#include "sipa_core.h"
#define SIPA_PCIE_DL_CMN_FIFO_REG_OFFSET 0x980
#define SIPA_PCIE_UL_CMN_FIFO_REG_OFFSET 0x200
static int sipa_dele_start_req_work(void)
{
struct smsg msg;
msg.channel = SMSG_CH_COMM_SIPA;
msg.type = SMSG_TYPE_CMD;
msg.flag = SMSG_FLG_DELE_REQUEST;
msg.value = 0;
return smsg_send(SIPC_ID_MINIAP, &msg, -1);
}
static int sipa_init_cmn_fifo_reg_addr(struct sipa_core *ipa)
{
ipa->reg_mapped = sprd_ep_ipa_map(PCIE_IPA_TYPE_REG,
ipa->reg_res->start,
resource_size(ipa->reg_res));
#ifndef devm_ioremap_nocache
#define devm_ioremap_nocache devm_ioremap
#endif
ipa->virt_reg_addr = devm_ioremap_nocache(ipa->dev,
(resource_size_t)ipa->reg_mapped,
(resource_size_t)(resource_size(ipa->reg_res)));
if (!ipa->virt_reg_addr) {
dev_err(ipa->dev, "ipa reg base remap fail\n");
return -ENOMEM;
}
ipa->cmn_fifo_cfg[SIPA_FIFO_PCIE_DL].fifo_reg_base =
ipa->virt_reg_addr + SIPA_PCIE_DL_CMN_FIFO_REG_OFFSET;
ipa->cmn_fifo_cfg[SIPA_FIFO_PCIE_UL].fifo_reg_base =
ipa->virt_reg_addr + SIPA_PCIE_UL_CMN_FIFO_REG_OFFSET;
return 0;
}
static int conn_thread(void *data)
{
struct smsg mrecv;
int ret, timeout = 500;
struct sipa_core *ipa = data;
/* since the channel open may hang, we call it in the thread context */
ret = smsg_ch_open(SIPC_ID_MINIAP, SMSG_CH_COMM_SIPA, -1);
if (ret != 0) {
dev_err(ipa->dev, "sipa_delegator failed to open dst %d channel %d\n",
SIPC_ID_MINIAP, SMSG_CH_COMM_SIPA);
/* assign NULL to thread poniter as failed to open channel */
return ret;
}
while (sipa_dele_start_req_work() && timeout--)
usleep_range(5000, 10000);
/* start listen the smsg events */
while (!kthread_should_stop()) {
/* monitor seblock recv smsg */
smsg_set(&mrecv, SMSG_CH_COMM_SIPA, 0, 0, 0);
ret = smsg_recv(SIPC_ID_MINIAP, &mrecv, -1);
if (ret == -EIO || ret == -ENODEV) {
/* channel state is FREE */
usleep_range(5000, 10000);
continue;
}
dev_dbg(ipa->dev, "sipa type=%d, flag=0x%x, value=0x%08x\n",
mrecv.type, mrecv.flag, mrecv.value);
switch (mrecv.type) {
case SMSG_TYPE_OPEN:
/* just ack open */
smsg_open_ack(SIPC_ID_AP, SMSG_CH_COMM_SIPA);
break;
case SMSG_TYPE_CLOSE:
/* handle channel close */
smsg_close_ack(SIPC_ID_AP, SMSG_CH_COMM_SIPA);
break;
case SMSG_TYPE_CMD:
/* handle commads */
break;
case SMSG_TYPE_DONE:
sipa_init_cmn_fifo_reg_addr(ipa);
dev_info(ipa->dev, "remote ipa ready reg_mapped = 0x%llx\n", (long long unsigned int)ipa->reg_mapped);
sipa_receiver_open_cmn_fifo(ipa->receiver);
sipa_sender_open_cmn_fifo(ipa->sender);
sipa_nic_check_flow_ctrl();
ipa->remote_ready = true;
/* handle cmd done */
break;
case SMSG_TYPE_EVENT:
/* handle events */
break;
default:
ret = 1;
break;
};
if (ret) {
dev_info(ipa->dev, "unknown msg in conn_thrd: %d, %d, %d\n",
mrecv.type, mrecv.flag, mrecv.value);
ret = 0;
}
}
return ret;
}
int sipa_create_smsg_channel(struct sipa_core *ipa)
{
/* create channel thread for this seblock channel */
ipa->smsg_thread = kthread_create(conn_thread, ipa, "sipa-dele");
if (IS_ERR(ipa->smsg_thread)) {
dev_err(ipa->dev, "Failed to create monitor smsg kthread\n");
return PTR_ERR(ipa->smsg_thread);
}
wake_up_process(ipa->smsg_thread);
return 0;
}
EXPORT_SYMBOL(sipa_create_smsg_channel);

View File

@@ -0,0 +1,583 @@
/*
* Copyright (C) 2020 Spreadtrum Communications Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#define pr_fmt(fmt) "sipa_dummy: " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/device.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/udp.h>
#include <linux/if_arp.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/kthread.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/of_device.h>
#include <linux/interrupt.h>
#include <linux/netdev_features.h>
#include <linux/mutex.h>
#include <net/arp.h>
#include "sipa_eth.h"
#include "sipa_core.h"
#include "../include/sipa.h"
/* Device status */
#define DEV_ON 1
#define DEV_OFF 0
#define SIPA_DUMMY_NAPI_WEIGHT 64
extern struct sipa_eth_netid_device * dev_list[];
static struct net_device *dummy_dev;
static struct dentry *dummy_root;
static int sipa_dummy_debugfs_mknod(void *data);
#ifndef CONFIG_SPRD_ETHERNET
static int sipa_arp_reply(struct net_device *net, struct sk_buff *skb) {
struct arphdr *parp;
u8 *arpptr, *sha;
u8 sip[4], tip[4];
struct sk_buff *reply = NULL;
parp = arp_hdr(skb);
if (parp->ar_hrd == htons(ARPHRD_ETHER) && parp->ar_pro == htons(ETH_P_IP)
&& parp->ar_op == htons(ARPOP_REQUEST) && parp->ar_hln == 6 && parp->ar_pln == 4) {
arpptr = (u8 *)parp + sizeof(struct arphdr);
sha = arpptr;
arpptr += net->addr_len; /* sha */
memcpy(sip, arpptr, sizeof(sip));
arpptr += sizeof(sip);
arpptr += net->addr_len; /* tha */
memcpy(tip, arpptr, sizeof(tip));
pr_info("%s sip = %d.%d.%d.%d, tip=%d.%d.%d.%d\n", netdev_name(net), sip[0], sip[1], sip[2], sip[3], tip[0], tip[1], tip[2], tip[3]);
reply = arp_create(ARPOP_REPLY, ETH_P_ARP, *((__be32 *)sip), skb->dev, *((__be32 *)tip), sha, net->dev_addr, sha);
if (reply) {
dev_queue_xmit(reply);
}
return 1;
}
return 0;
}
static void sipa_get_modem_mac(struct sk_buff *skb, struct SIPA_ETH *sipa_eth)
{
struct ethhdr *ehdr;
struct iphdr *iph;
struct udphdr *udph;
struct sipa_eth_init_data *pdata = sipa_eth->pdata;
ehdr = (struct ethhdr *)(skb->data - ETH_HLEN);
iph = ip_hdr(skb);
udph = (struct udphdr *)(skb->data + iph->ihl*4);
if (ehdr->h_proto == htons(ETH_P_ARP)) {
sipa_arp_reply(skb->dev, skb);
return;
}
//printk("%s skb=%p, h_proto=%x, protocol=%x, saddr=%x, daddr=%x dest=%x\n", __func__, skb, ehdr->h_proto, iph->protocol, iph->saddr, iph->daddr, udph->dest);
if (ehdr->h_proto == htons(ETH_P_IP) && iph->protocol == IPPROTO_UDP && iph->saddr != 0x00000000 && iph->daddr == 0xFFFFFFFF) {
if (udph->dest == htons(68)) //DHCP offer/ACK
{
memcpy(pdata->modem_mac, ehdr->h_source, ETH_ALEN);
pr_info("Modem Mac Address: %02x:%02x:%02x:%02x:%02x:%02x\n",
pdata->modem_mac[0], pdata->modem_mac[1], pdata->modem_mac[2], pdata->modem_mac[3], pdata->modem_mac[4], pdata->modem_mac[5]);
}
}
}
#endif
/* Term type 0x6 means we are in direct mode, currently.
* we will recv pkt with a dummy mac header, which will
* cause us fail to get skb->pkt_type and skb->protocol.
*/
static void sipa_dummy_prepare_skb(struct sk_buff *skb)
{
struct iphdr *iph;
struct ipv6hdr *ipv6h;
struct net_device *dev;
unsigned int real_len = 0, payload_len = 0;
bool ip_arp = true;
dev = skb->dev;
skb->protocol = eth_type_trans(skb, dev);
skb_reset_network_header(skb);
switch (ntohs(skb->protocol)) {
case ETH_P_IP:
iph = ip_hdr(skb);
real_len = ntohs(iph->tot_len);
break;
case ETH_P_IPV6:
ipv6h = ipv6_hdr(skb);
payload_len = ntohs(ipv6h->payload_len);
real_len = payload_len + sizeof(struct ipv6hdr);
break;
case ETH_P_ARP:
real_len = arp_hdr_len(dev);
break;
default:
ip_arp = false;
break;
}
if (ip_arp)
skb_trim(skb, real_len);
/* TODO chechsum ... */
skb->ip_summed = CHECKSUM_NONE;
skb->pkt_type = PACKET_HOST;
}
/* Term type 0x6 means we are in direct mode, currently.
* we will recv pkt with a dummy mac header, which will
* cause us fail to get skb->pkt_type and skb->protocol.
*/
static void sipa_dummy_direct_mode_prepare_skb(struct sk_buff *skb)
{
struct iphdr *iph;
struct ipv6hdr *ipv6h;
unsigned int real_len = 0, payload_len = 0;
skb_pull_inline(skb, ETH_HLEN);
skb_reset_network_header(skb);
iph = ip_hdr(skb);
if (iph->version == 4) {
skb->protocol = htons(ETH_P_IP);
iph = ip_hdr(skb);
real_len = ntohs(iph->tot_len);
skb_trim(skb, real_len);
} else if(iph->version == 6){
skb->protocol = htons(ETH_P_IPV6);
ipv6h = ipv6_hdr(skb);
payload_len = ntohs(ipv6h->payload_len);
real_len = payload_len + sizeof(struct ipv6hdr);
skb_trim(skb, real_len);
} else {
pr_err("unrecognized ip version %d\n", iph->version);
}
skb->ip_summed = CHECKSUM_NONE;
skb->pkt_type = PACKET_HOST;
}
static int sipa_dummy_rx(struct SIPA_DUMMY *sipa_dummy, int budget)
{
struct sk_buff *skb;
struct sipa_eth_netid_device *netid_dev_info;
struct SIPA_ETH *sipa_eth;
int real_netid = 0;
int skb_cnt = 0;
int ret;
if (!sipa_dummy) {
pr_err("no sipa_dummy device\n");
return -EINVAL;
}
atomic_set(&sipa_dummy->rx_evt, 0);
while (skb_cnt < budget) {
ret = sipa_nic_rx(&real_netid, &skb, skb_cnt);
if (ret) {
switch (ret) {
case -ENODEV:
pr_err("sipa fail to find dev\n");
sipa_dummy->stats.rx_errors++;
sipa_dummy->netdev->stats.rx_errors++;
break;
case -ENODATA:
pr_err("sipa no data\n");
atomic_set(&sipa_dummy->rx_busy, 0);
break;
}
break;
}
skb_cnt++;
sipa_dummy->stats.rx_packets++;
sipa_dummy->stats.rx_bytes += skb->len;
if (real_netid < 0) {
pr_err("sipa invaild netid");
break;
}
/*
* We should determine the real device before we do eth_types_tran,
*/
if (real_netid < 0 || real_netid >= SIPA_DUMMY_IFACE_NUM) {
pr_err("illegal real_netid %d\n", real_netid);
dev_kfree_skb_any(skb);
break;
}
netid_dev_info = dev_list[real_netid];
if (!netid_dev_info || netid_dev_info->state == DEV_OFF) {
pr_info("netid= %d net is not DEV_ON\n", real_netid);
dev_kfree_skb_any(skb);
break;
}
skb->dev = netid_dev_info->ndev;
sipa_eth = netdev_priv(skb->dev);
sipa_eth->stats.rx_packets++;
sipa_eth->stats.rx_bytes += skb->len;
if (sipa_eth->pdata->term_type == 0x6) {
sipa_dummy_direct_mode_prepare_skb(skb);
} else {
sipa_dummy_prepare_skb(skb);
#ifndef CONFIG_SPRD_ETHERNET
sipa_get_modem_mac(skb, sipa_eth);
#endif
}
napi_gro_receive(&sipa_dummy->napi, skb);
}
return skb_cnt;
}
static int sipa_dummy_rx_poll_handler(struct napi_struct *napi, int budget)
{
int pkts = 0, num, tmp = 0;
struct SIPA_DUMMY *sipa_dummy = container_of(napi, struct SIPA_DUMMY, napi);
READ_AGAIN:
num = sipa_nic_get_filled_num();
if (!num)
goto check;
if (num > budget)
num = budget;
pkts = sipa_dummy_rx(sipa_dummy, num);
if (pkts > 0)
sipa_nic_set_tx_fifo_rp(pkts);
tmp += pkts;
budget -= pkts;
if (!budget)
goto out;
check:
if (!sipa_check_recv_tx_fifo_empty() ||
atomic_read(&sipa_dummy->rx_evt)) {
atomic_set(&sipa_dummy->rx_evt, 0);
goto READ_AGAIN;
}
atomic_set(&sipa_dummy->rx_busy, 0);
napi_complete(napi);
sipa_nic_restore_irq();
if (atomic_read(&sipa_dummy->rx_evt) ||
atomic_read(&sipa_dummy->rx_busy) ||
!sipa_check_recv_tx_fifo_empty()) {
atomic_set(&sipa_dummy->rx_evt, 0);
napi_schedule(&sipa_dummy->napi);
}
out:
return tmp;
}
static void sipa_dummy_rx_handler (void *priv)
{
struct SIPA_DUMMY *sipa_dummy = (struct SIPA_DUMMY *)priv;
if (!sipa_dummy) {
pr_err("data is NULL\n");
return;
}
if (!atomic_cmpxchg(&sipa_dummy->rx_busy, 0, 1)) {
atomic_set(&sipa_dummy->rx_evt, 0);
napi_schedule(&sipa_dummy->napi);
}
}
/* for sipa to invoke */
void sipa_dummy_recv_trigger(void)
{
struct SIPA_DUMMY *sipa_dummy;
if (!dummy_dev)
return;
sipa_dummy = netdev_priv(dummy_dev);
atomic_set(&sipa_dummy->rx_evt, 1);
sipa_dummy_rx_handler(sipa_dummy);
}
static int sipa_dummy_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct SIPA_DUMMY *sipa_dummy = netdev_priv(dev);
/* update netdev statistics */
sipa_dummy->stats.tx_packets++;
sipa_dummy->stats.tx_bytes += skb->len;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
/* Open interface */
static int sipa_dummy_open(struct net_device *dev)
{
struct SIPA_DUMMY *sipa_dummy = netdev_priv(dev);
struct sipa_core *ctrl = sipa_get_ctrl_pointer();
if (!ctrl) {
return -EINVAL;
}
if(!ctrl->remote_ready)
return -EINVAL;
pr_info("dummy open\n");
if (!netif_carrier_ok(sipa_dummy->netdev)) {
netif_carrier_on(sipa_dummy->netdev);
}
netif_start_queue(dev);
//napi_enable(&sipa_dummy->napi);
napi_schedule(&sipa_dummy->napi);
return 0;
}
/* Close interface */
static int sipa_dummy_close(struct net_device *dev)
{
//struct SIPA_DUMMY *sipa_dummy = netdev_priv(dev);
pr_info("close dummy!\n");
//napi_disable(&sipa_dummy->napi);
netif_stop_queue(dev);
netif_carrier_off(dev);
return 0;
}
static struct net_device_stats *sipa_dummy_get_stats(struct net_device *dev)
{
struct SIPA_DUMMY *sipa_dummy = netdev_priv(dev);
return &sipa_dummy->stats;
}
static const struct net_device_ops sipa_dummy_ops = {
.ndo_open = sipa_dummy_open,
.ndo_stop = sipa_dummy_close,
.ndo_start_xmit = sipa_dummy_start_xmit,
.ndo_get_stats = sipa_dummy_get_stats,
};
static void s_setup(struct net_device *dev)
{
ether_setup(dev);
}
static int sipa_dummy_probe(struct platform_device *pdev)
{
struct SIPA_DUMMY *sipa_dummy;
struct net_device *netdev;
int ret;
#ifdef NET_NAME_PREDICTABLE
netdev = alloc_netdev(
sizeof(struct SIPA_DUMMY),
"sipa_dummy0",
NET_NAME_PREDICTABLE,
s_setup);
#else
netdev = alloc_netdev(
sizeof(struct SIPA_DUMMY),
"sipa_dummy0",
s_setup);
#endif
if (!netdev) {
pr_err("alloc_netdev() failed.\n");
return -ENOMEM;
}
dummy_dev = netdev;
netdev->type = ARPHRD_ETHER;
sipa_dummy = netdev_priv(netdev);
sipa_dummy->netdev = netdev;
netdev->netdev_ops = &sipa_dummy_ops;
netdev->watchdog_timeo = 1 * HZ;
netdev->irq = 0;
netdev->dma = 0;
netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM);
random_ether_addr(netdev->dev_addr);
netif_napi_add(netdev,
&sipa_dummy->napi,
sipa_dummy_rx_poll_handler,
SIPA_DUMMY_NAPI_WEIGHT);
/* Register new Ethernet interface */
ret = register_netdev(netdev);
if (ret) {
pr_err("register_netdev() failed (%d)\n", ret);
netif_napi_del(&sipa_dummy->napi);
free_netdev(netdev);
return ret;
}
/* Set link as disconnected */
netif_carrier_off(netdev);
platform_set_drvdata(pdev, sipa_dummy);
sipa_dummy_debugfs_mknod((void *)sipa_dummy);
napi_enable(&sipa_dummy->napi);
return 0;
}
/* Cleanup Ethernet device driver. */
static int sipa_dummy_remove(struct platform_device *pdev)
{
struct SIPA_DUMMY *sipa_dummy= platform_get_drvdata(pdev);
netif_stop_queue(sipa_dummy->netdev);
napi_disable(&sipa_dummy->napi);
netif_napi_del(&sipa_dummy->napi);
unregister_netdev(sipa_dummy->netdev);
free_netdev(sipa_dummy->netdev);
platform_set_drvdata(pdev, NULL);
if (!IS_ERR_OR_NULL(dummy_root))
debugfs_remove_recursive(dummy_root);
return 0;
}
#ifdef SPRD_PCIE_USE_DTS
static const struct of_device_id sipa_dummy_match_table[] = {
{ .compatible = "sprd,sipa_dummy"},
{ }
};
#endif
static struct platform_driver sipa_dummy_driver = {
.probe = sipa_dummy_probe,
.remove = sipa_dummy_remove,
.driver = {
.owner = THIS_MODULE,
.name = "sipa_dummy",
#ifdef SPRD_PCIE_USE_DTS
.of_match_table = sipa_dummy_match_table
#endif
}
};
#ifndef SPRD_PCIE_USE_DTS
static struct platform_device *sipa_dummy_device;
static int sipa_dummy_platform_device_reigster(void)
{
int retval = -ENOMEM;
sipa_dummy_device = platform_device_alloc("sipa_dummy", -1);
if (!sipa_dummy_device)
return retval;
retval = platform_device_add(sipa_dummy_device);
if (retval < 0)
platform_device_put(sipa_dummy_device);
return retval;
}
#endif
static int sipa_dummy_debug_show(struct seq_file *m, void *v)
{
struct SIPA_DUMMY *sipa_dummy = (struct SIPA_DUMMY *)(m->private);
if (!sipa_dummy) {
pr_err("invalid data, sipa_dummy is NULL\n");
return -EINVAL;
}
seq_puts(m, "*************************************************\n");
seq_printf(m, "DEVICE: %s rx_busy=%d rx_evt=%d\n",
sipa_dummy->netdev->name, atomic_read(&sipa_dummy->rx_busy),
atomic_read(&sipa_dummy->rx_evt));
seq_puts(m, "*************************************************\n");
return 0;
}
static int sipa_dummy_debug_open(struct inode *inode, struct file *file)
{
return single_open(file, sipa_dummy_debug_show, inode->i_private);
}
static const struct file_operations sipa_dummy_debug_fops = {
.open = sipa_dummy_debug_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int sipa_dummy_debugfs_mknod(void *data)
{
if (!dummy_root) {
pr_err("dummy dir is NULL\n");
return -ENXIO;
}
debugfs_create_file("stats",
0444,
dummy_root,
data,
&sipa_dummy_debug_fops);
return 0;
}
static void __init sipa_dummy_debugfs_init(void)
{
dummy_root = debugfs_create_dir("sipa_dummy", NULL);
if (!dummy_root)
pr_err("failed to create sipa_dummy debugfs dir\n");
}
int sipa_dummy_init(void)
{
sipa_dummy_debugfs_init();
#ifndef SPRD_PCIE_USE_DTS
sipa_dummy_platform_device_reigster();
#endif
return platform_driver_register(&sipa_dummy_driver);
}
EXPORT_SYMBOL(sipa_dummy_init);
void sipa_dummy_exit(void)
{
platform_driver_unregister(&sipa_dummy_driver);
#ifndef SPRD_PCIE_USE_DTS
platform_device_unregister(sipa_dummy_device);
#endif
}
EXPORT_SYMBOL(sipa_dummy_exit);

Some files were not shown because too many files have changed in this diff Show More