qca-nss-dp: update to ath11.5-cs

Signed-off-by: John Crispin <john@phrozen.org>
This commit is contained in:
John Crispin
2022-05-12 14:31:33 +02:00
parent 7a95f9ac2d
commit b68affdf6a
37 changed files with 71 additions and 11548 deletions

View File

@@ -5,6 +5,13 @@ PKG_NAME:=qca-nss-dp
PKG_SOURCE_PROTO:=git
PKG_BRANCH:=master
PKG_RELEASE:=1
PKG_SOURCE_URL:=https://git.codelinaro.org/clo/qsdk/oss/lklm/nss-dp
PKG_MIRROR_HASH:=dc5e870bf781d052399e8bbc0aa3d6593abeeff29304b64c685584f09fd29519
PKG_VERSION:=480f036cc96d4e5faa426cfcf90fa7e64dff87e8
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_SOURCE_SUBDIR:=$(PKG_NAME)-$(PKG_VERSION)
PKG_SOURCE_VERSION:=$(PKG_VERSION)
include $(INCLUDE_DIR)/package.mk
@@ -12,7 +19,8 @@ define KernelPackage/qca-nss-dp
SECTION:=kernel
CATEGORY:=Kernel modules
SUBMENU:=Network Devices
DEPENDS:=@TARGET_ipq807x +kmod-qca-ssdk
DEPENDS:=@TARGET_ipq807x||TARGET_ipq_ipq807x_64||TARGET_ipq807x||TARGET_ipq_ipq60xx||TARGET_ipq_ipq60xx_64||TARGET_ipq60xx||TARGET_ipq_ipq50xx||TARGET_ipq_ipq50xx_64||TARGET_ipq50xx\
+kmod-qca-ssdk
TITLE:=Kernel driver for NSS data plane
FILES:=$(PKG_BUILD_DIR)/qca-nss-dp.ko
AUTOLOAD:=$(call AutoLoad,31,qca-nss-dp)
@@ -36,7 +44,7 @@ NSS_DP_HAL_DIR:=$(PKG_BUILD_DIR)/hal
hal_arch:=$(subtarget)
define Build/Configure
$(LN) $(NSS_DP_HAL_DIR)/arch/$(hal_arch)/nss_$(hal_arch).h \
$(LN) $(NSS_DP_HAL_DIR)/soc_ops/$(hal_arch)/nss_$(hal_arch).h \
$(PKG_BUILD_DIR)/exports/nss_dp_arch.h
endef
@@ -45,7 +53,8 @@ define Build/Compile
CROSS_COMPILE="$(TARGET_CROSS)" \
ARCH="$(LINUX_KARCH)" \
M="$(PKG_BUILD_DIR)" \
EXTRA_CFLAGS="$(EXTRA_CFLAGS)" SoC="$(subtarget)" \
EXTRA_CFLAGS="$(EXTRA_CFLAGS)" SoC="$(hal_arch)" \
KBUILD_MODPOST_WARN=1 \
modules
endef

View File

@@ -0,0 +1,59 @@
include $(TOPDIR)/rules.mk
include $(INCLUDE_DIR)/kernel.mk
PKG_NAME:=qca-nss-dp
PKG_SOURCE_PROTO:=git
PKG_BRANCH:=master
PKG_RELEASE:=1
PKG_SOURCE_URL:=https://git.codelinaro.org/clo/qsdk/oss/lklm/nss-dp
PKG_MIRROR_HASH:=dc5e870bf781d052399e8bbc0aa3d6593abeeff29304b64c685584f09fd29519
PKG_VERSION:=480f036cc96d4e5faa426cfcf90fa7e64dff87e8
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_SOURCE_SUBDIR:=$(PKG_NAME)-$(PKG_VERSION)
PKG_SOURCE_VERSION:=$(PKG_VERSION)
include $(INCLUDE_DIR)/package.mk
define KernelPackage/qca-nss-dp
SECTION:=kernel
CATEGORY:=Kernel modules
SUBMENU:=Network Devices
DEPENDS:=@TARGET_ipq807x +kmod-qca-ssdk
TITLE:=Kernel driver for NSS data plane
FILES:=$(PKG_BUILD_DIR)/qca-nss-dp.ko
AUTOLOAD:=$(call AutoLoad,31,qca-nss-dp)
endef
define KernelPackage/qca-nss-dp/Description
This package contains a NSS data plane driver for QCA chipset
endef
define Build/InstallDev
mkdir -p $(1)/usr/include/qca-nss-dp
$(CP) $(PKG_BUILD_DIR)/exports/* $(1)/usr/include/qca-nss-dp/
endef
EXTRA_CFLAGS+= \
-I$(STAGING_DIR)/usr/include/qca-ssdk
subtarget:=$(SUBTARGET)
NSS_DP_HAL_DIR:=$(PKG_BUILD_DIR)/hal
hal_arch:=$(subtarget)
define Build/Configure
$(LN) $(NSS_DP_HAL_DIR)/soc_ops/$(hal_arch)/nss_$(hal_arch).h \
$(PKG_BUILD_DIR)/exports/nss_dp_arch.h
endef
define Build/Compile
$(MAKE) -C "$(LINUX_DIR)" \
CROSS_COMPILE="$(TARGET_CROSS)" \
ARCH="$(LINUX_KARCH)" \
M="$(PKG_BUILD_DIR)" \
EXTRA_CFLAGS="$(EXTRA_CFLAGS)" SoC=""$(subtarget)"" \
modules
endef
$(eval $(call KernelPackage,qca-nss-dp))

View File

@@ -1,56 +0,0 @@
###################################################
# Makefile for the NSS data plane driver
###################################################
obj ?= .
obj-m += qca-nss-dp.o
qca-nss-dp-objs += nss_dp_attach.o \
nss_dp_ethtools.o \
nss_dp_main.o
ifneq ($(CONFIG_NET_SWITCHDEV),)
qca-nss-dp-objs += nss_dp_switchdev.o
endif
ifeq ($(SoC),$(filter $(SoC),ipq807x ipq807x_64 ipq60xx ipq60xx_64))
qca-nss-dp-objs += hal/edma/edma_cfg.o \
hal/edma/edma_data_plane.o \
hal/edma/edma_tx_rx.o \
hal/gmac_hal_ops/qcom/qcom_if.o \
hal/gmac_hal_ops/syn/xgmac/syn_if.o
endif
NSS_DP_INCLUDE = -I$(obj)/include -I$(obj)/exports -I$(obj)/gmac_hal_ops/include \
-I$(obj)/hal/include
ifeq ($(SoC),$(filter $(SoC),ipq50xx ipq50xx_64))
NSS_DP_INCLUDE += -I$(obj)/hal/gmac_hal_ops/syn/gmac
endif
ccflags-y += $(NSS_DP_INCLUDE)
ccflags-y += -Wall -Werror
ifeq ($(SoC),$(filter $(SoC),ipq807x ipq807x_64 ipq60xx ipq60xx_64))
ccflags-y += -DNSS_DP_PPE_SUPPORT
endif
ifeq ($(SoC),$(filter $(SoC),ipq60xx ipq60xx_64))
qca-nss-dp-objs += hal/arch/ipq60xx/nss_ipq60xx.o
ccflags-y += -DNSS_DP_IPQ60XX
endif
ifeq ($(SoC),$(filter $(SoC),ipq807x ipq807x_64))
qca-nss-dp-objs += hal/arch/ipq807x/nss_ipq807x.o
ccflags-y += -DNSS_DP_IPQ807X -DNSS_DP_EDMA_TX_SMALL_PKT_WAR
endif
ifeq ($(SoC),$(filter $(SoC),ipq50xx ipq50xx_64))
qca-nss-dp-objs += hal/arch/ipq50xx/nss_ipq50xx.o \
hal/gmac_hal_ops/syn/gmac/syn_if.o \
hal/syn_gmac_dp/syn_data_plane.o \
hal/syn_gmac_dp/syn_dp_tx_rx.o \
hal/syn_gmac_dp/syn_dp_cfg.o
ccflags-y += -DNSS_DP_IPQ50XX
endif

View File

@@ -1,219 +0,0 @@
/*
**************************************************************************
* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
* OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
**************************************************************************
*/
/**
* @file nss_dp_api_if.h
* nss-dp exported structures/apis.
*
* This file declares all the public interfaces
* for NSS data-plane driver.
*/
#ifndef __NSS_DP_API_IF_H
#define __NSS_DP_API_IF_H
#include "nss_dp_arch.h"
/**
* @addtogroup nss_dp_subsystem
* @{
*/
/*
* NSS DP status
*/
#define NSS_DP_SUCCESS 0
#define NSS_DP_FAILURE -1
/*
* NSS DP platform specific defines
*/
#define NSS_DP_START_IFNUM NSS_DP_HAL_START_IFNUM
/**< First GMAC interface number (0/1) depending on SoC. */
#define NSS_DP_MAX_MTU_SIZE NSS_DP_HAL_MAX_MTU_SIZE
#define NSS_DP_MAX_PACKET_LEN NSS_DP_HAL_MAX_PACKET_LEN
#define NSS_DP_MAX_INTERFACES (NSS_DP_HAL_MAX_PORTS + NSS_DP_HAL_START_IFNUM)
/**< Last interface index for the SoC, to be used by qca-nss-drv. */
/*
* NSS PTP service code
*/
#define NSS_PTP_EVENT_SERVICE_CODE 0x9
/**
* nss_dp_data_plane_ctx
* Data plane context base class.
*/
struct nss_dp_data_plane_ctx {
struct net_device *dev;
};
/**
* nss_dp_gmac_stats
* The per-GMAC statistics structure.
*/
struct nss_dp_gmac_stats {
struct nss_dp_hal_gmac_stats stats;
};
/**
* nss_dp_data_plane_ops
* Per data-plane ops structure.
*
* Default would be slowpath and can be overridden by nss-drv
*/
struct nss_dp_data_plane_ops {
int (*init)(struct nss_dp_data_plane_ctx *dpc);
int (*open)(struct nss_dp_data_plane_ctx *dpc, uint32_t tx_desc_ring,
uint32_t rx_desc_ring, uint32_t mode);
int (*close)(struct nss_dp_data_plane_ctx *dpc);
int (*link_state)(struct nss_dp_data_plane_ctx *dpc,
uint32_t link_state);
int (*mac_addr)(struct nss_dp_data_plane_ctx *dpc, uint8_t *addr);
int (*change_mtu)(struct nss_dp_data_plane_ctx *dpc, uint32_t mtu);
netdev_tx_t (*xmit)(struct nss_dp_data_plane_ctx *dpc, struct sk_buff *os_buf);
void (*set_features)(struct nss_dp_data_plane_ctx *dpc);
int (*pause_on_off)(struct nss_dp_data_plane_ctx *dpc,
uint32_t pause_on);
int (*vsi_assign)(struct nss_dp_data_plane_ctx *dpc, uint32_t vsi);
int (*vsi_unassign)(struct nss_dp_data_plane_ctx *dpc, uint32_t vsi);
int (*rx_flow_steer)(struct nss_dp_data_plane_ctx *dpc, struct sk_buff *skb,
uint32_t cpu, bool is_add);
void (*get_stats)(struct nss_dp_data_plane_ctx *dpc, struct nss_dp_gmac_stats *stats);
int (*deinit)(struct nss_dp_data_plane_ctx *dpc);
};
/**
* nss_dp_receive
* Called by overlay drivers to deliver packets to nss-dp.
*
* @datatypes
* net_device
* sk_buff
* napi_struct
*
* @param[in] netdev Pointer to netdev structure on which packet is received.
* @param[in] skb Pointer to the received packet.
* @param[in] napi Pointer to napi context.
*/
void nss_dp_receive(struct net_device *netdev, struct sk_buff *skb,
struct napi_struct *napi);
/**
* nss_dp_is_in_open_state
* Returns if a data plane is opened or not.
*
* @datatypes
* net_device
*
* @param[in] netdev Pointer to netdev structure.
*
* @return
* bool
*/
bool nss_dp_is_in_open_state(struct net_device *netdev);
/**
* nss_dp_override_data_palne
* API to allow overlay drivers to override the data plane.
*
* @datatypes
* net_device
* nss_dp_data_plane_ops
* nss_dp_data_plane_ctx
*
* @param[in] netdev Pointer to netdev structure.
* @param[in] dp_ops Pointer to respective data plane ops structure.
* @param[in] dpc Pointer to data plane context.
*
* @return
* int
*/
int nss_dp_override_data_plane(struct net_device *netdev,
struct nss_dp_data_plane_ops *dp_ops,
struct nss_dp_data_plane_ctx *dpc);
/**
* nss_dp_start_data_plane
* Dataplane API to inform netdev when it is ready to start.
*
* @datatypes
* net_device
* nss_dp_data_plane_ctx
*
* @param[in] netdev Pointer to netdev structure.
* @param[in] dpc Pointer to data plane context.
*/
void nss_dp_start_data_plane(struct net_device *netdev,
struct nss_dp_data_plane_ctx *dpc);
/**
* nss_dp_restore_data_plane
* Called by overlay drivers to detach itself from nss-dp.
*
* @datatypes
* net_device
*
* @param[in] netdev Pointer to netdev structure.
*/
void nss_dp_restore_data_plane(struct net_device *netdev);
/**
* nss_dp_get_netdev_by_nss_if_num
* Returns the net device of the corresponding id if it exists.
*
* @datatypes
* int
*
* @param[in] interface ID of the physical mac port.
*
* @return
* Pointer to netdev structure.
*/
struct net_device *nss_dp_get_netdev_by_nss_if_num(int if_num);
/**
* nss_phy_tstamp_rx_buf
* Receive timestamp packet.
*
* @datatypes
* sk_buff
*
* @param[in] app_data Pointer to the application context of the message.
* @param[in] skb Pointer to the packet.
*/
void nss_phy_tstamp_rx_buf(void *app_data, struct sk_buff *skb);
/**
* nss_phy_tstamp_tx_buf
* Transmit timestamp packet
*
* @datatypes
* net_device
* sk_buff
*
* @param[in] net_device Pointer to netdev structure.
* @param[in] skb Pointer to the packet.
*/
void nss_phy_tstamp_tx_buf(struct net_device *ndev, struct sk_buff *skb);
/**
*@}
*/
#endif /** __NSS_DP_API_IF_H */

View File

@@ -1,153 +0,0 @@
/*
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/of.h>
#include <linux/clk.h>
#include <linux/ioport.h>
#include <linux/qcom_scm.h>
#include "nss_dp_hal.h"
/*
* nss_dp_hal_tcsr_base_get
* Reads TCSR base address from DTS
*/
static uint32_t nss_dp_hal_tcsr_base_get(void)
{
uint32_t tcsr_base_addr = 0;
struct device_node *dp_cmn;
/*
* Get reference to NSS dp common device node
*/
dp_cmn = of_find_node_by_name(NULL, "nss-dp-common");
if (!dp_cmn) {
pr_info("%s: NSS DP common node not found\n", __func__);
return 0;
}
if (of_property_read_u32(dp_cmn, "qcom,tcsr-base", &tcsr_base_addr)) {
pr_err("%s: error reading TCSR base\n", __func__);
}
of_node_put(dp_cmn);
return tcsr_base_addr;
}
/*
* nss_dp_hal_tcsr_set
* Sets the TCSR axi cache override register
*/
static void nss_dp_hal_tcsr_set(void)
{
void __iomem *tcsr_addr = NULL;
uint32_t tcsr_base;
int err;
tcsr_base = nss_dp_hal_tcsr_base_get();
if (!tcsr_base) {
pr_err("%s: Unable to get TCSR base address\n", __func__);
return;
}
/*
* Check if Trust Zone is enabled in the system.
* If yes, we need to go through SCM API call to program TCSR register.
* If TZ is not enabled, we can write to the register directly.
*/
if (qcom_scm_is_available()) {
#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0))
err = qcom_scm_tcsr_reg_write((tcsr_base + TCSR_GMAC_AXI_CACHE_OVERRIDE_OFFSET),
TCSR_GMAC_AXI_CACHE_OVERRIDE_VALUE);
#else
err = qti_scm_tcsr_reg_write((tcsr_base + TCSR_GMAC_AXI_CACHE_OVERRIDE_OFFSET),
TCSR_GMAC_AXI_CACHE_OVERRIDE_VALUE);
#endif
if (err) {
pr_err("%s: SCM TCSR write error: %d\n", __func__, err);
}
} else {
tcsr_addr = ioremap_nocache((tcsr_base + TCSR_GMAC_AXI_CACHE_OVERRIDE_OFFSET),
TCSR_GMAC_AXI_CACHE_OVERRIDE_REG_SIZE);
if (!tcsr_addr) {
pr_err("%s: ioremap failed\n", __func__);
return;
}
writel(TCSR_GMAC_AXI_CACHE_OVERRIDE_VALUE, tcsr_addr);
iounmap(tcsr_addr);
}
}
/*
* nss_dp_hal_get_data_plane_ops
* Return the data plane ops for GMAC data plane.
*/
struct nss_dp_data_plane_ops *nss_dp_hal_get_data_plane_ops(void)
{
return &nss_dp_gmac_ops;
}
/*
* nss_dp_hal_clk_enable
* Function to enable GCC_SNOC_GMAC_AXI_CLK.
*
* These clocks are required for GMAC operations.
*/
void nss_dp_hal_clk_enable(struct nss_dp_dev *dp_priv)
{
struct platform_device *pdev = dp_priv->pdev;
struct device *dev = &pdev->dev;
struct clk *gmac_clk = NULL;
int err;
gmac_clk = devm_clk_get(dev, NSS_SNOC_GMAC_AXI_CLK);
if (IS_ERR(gmac_clk)) {
pr_err("%s: cannot get clock: %s\n", __func__,
NSS_SNOC_GMAC_AXI_CLK);
return;
}
err = clk_prepare_enable(gmac_clk);
if (err) {
pr_err("%s: cannot enable clock: %s, err: %d\n", __func__,
NSS_SNOC_GMAC_AXI_CLK, err);
return;
}
}
/*
* nss_dp_hal_init
* Sets the gmac ops based on the GMAC type.
*/
bool nss_dp_hal_init(void)
{
nss_dp_hal_set_gmac_ops(&syn_hal_ops, GMAC_HAL_TYPE_SYN_GMAC);
/*
* Program the global GMAC AXI Cache override register
* for optimized AXI DMA operation.
*/
nss_dp_hal_tcsr_set();
return true;
}
/*
* nss_dp_hal_cleanup
* Sets the gmac ops to NULL.
*/
void nss_dp_hal_cleanup(void)
{
nss_dp_hal_set_gmac_ops(NULL, GMAC_HAL_TYPE_SYN_GMAC);
}

View File

@@ -1,130 +0,0 @@
/*
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef __NSS_DP_ARCH_H__
#define __NSS_DP_ARCH_H__
#define NSS_DP_HAL_MAX_PORTS 2
#define NSS_DP_HAL_CPU_NUM 2
#define NSS_DP_HAL_START_IFNUM 0
#define NSS_DP_GMAC_NORMAL_FRAME_MTU 1500
#define NSS_DP_GMAC_MINI_JUMBO_FRAME_MTU 1978
#define NSS_DP_GMAC_FULL_JUMBO_FRAME_MTU 9000
#define NSS_DP_HAL_MAX_MTU_SIZE NSS_DP_GMAC_FULL_JUMBO_FRAME_MTU
#define NSS_DP_HAL_MAX_PACKET_LEN 65535
/*
* TCSR_GMAC_AXI_CACHE_OVERRIDE register size
*/
#define TCSR_GMAC_AXI_CACHE_OVERRIDE_REG_SIZE 4
/*
* TCSR_GMAC_AXI_CACHE_OVERRIDE Register offset
*/
#define TCSR_GMAC_AXI_CACHE_OVERRIDE_OFFSET 0x6224
/*
* Value for TCSR_GMAC_AXI_CACHE_OVERRIDE register
*/
#define TCSR_GMAC_AXI_CACHE_OVERRIDE_VALUE 0x05050505
/*
* GCC_SNOC_GMAC_AXI_CLOCK
*/
#define NSS_SNOC_GMAC_AXI_CLK "nss-snoc-gmac-axi-clk"
/**
* nss_dp_hal_gmac_stats
* The per-GMAC statistics structure.
*/
struct nss_dp_hal_gmac_stats {
uint64_t rx_bytes; /**< Number of RX bytes */
uint64_t rx_packets; /**< Number of RX packets */
uint64_t rx_errors; /**< Number of RX errors */
uint64_t rx_receive_errors; /**< Number of RX receive errors */
uint64_t rx_descriptor_errors; /**< Number of RX descriptor errors */
uint64_t rx_late_collision_errors;
/**< Number of RX late collision errors */
uint64_t rx_dribble_bit_errors; /**< Number of RX dribble bit errors */
uint64_t rx_length_errors; /**< Number of RX length errors */
uint64_t rx_ip_header_errors; /**< Number of RX IP header errors read from rxdec */
uint64_t rx_ip_payload_errors; /**< Number of RX IP payload errors */
uint64_t rx_no_buffer_errors; /**< Number of RX no-buffer errors */
uint64_t rx_transport_csum_bypassed;
/**< Number of RX packets where the transport checksum was bypassed */
uint64_t tx_bytes; /**< Number of TX bytes */
uint64_t tx_packets; /**< Number of TX packets */
uint64_t tx_collisions; /**< Number of TX collisions */
uint64_t tx_errors; /**< Number of TX errors */
uint64_t tx_jabber_timeout_errors;
/**< Number of TX jabber timeout errors */
uint64_t tx_frame_flushed_errors;
/**< Number of TX frame flushed errors */
uint64_t tx_loss_of_carrier_errors;
/**< Number of TX loss of carrier errors */
uint64_t tx_no_carrier_errors; /**< Number of TX no carrier errors */
uint64_t tx_late_collision_errors;
/**< Number of TX late collision errors */
uint64_t tx_excessive_collision_errors;
/**< Number of TX excessive collision errors */
uint64_t tx_excessive_deferral_errors;
/**< Number of TX excessive deferral errors */
uint64_t tx_underflow_errors; /**< Number of TX underflow errors */
uint64_t tx_ip_header_errors; /**< Number of TX IP header errors */
uint64_t tx_ip_payload_errors; /**< Number of TX IP payload errors */
uint64_t tx_dropped; /**< Number of TX dropped packets */
uint64_t hw_errs[10]; /**< GMAC DMA error counters */
uint64_t rx_missed; /**< Number of RX packets missed by the DMA */
uint64_t fifo_overflows; /**< Number of RX FIFO overflows signalled by the DMA */
uint64_t rx_scatter_errors; /**< Number of scattered frames received by the DMA */
uint64_t tx_ts_create_errors; /**< Number of tx timestamp creation errors */
uint64_t gmac_total_ticks; /**< Total clock ticks spend inside the GMAC */
uint64_t gmac_worst_case_ticks; /**< Worst case iteration of the GMAC in ticks */
uint64_t gmac_iterations; /**< Number of iterations around the GMAC */
uint64_t tx_pause_frames; /**< Number of pause frames sent by the GMAC */
uint64_t mmc_rx_overflow_errors;
/**< Number of RX overflow errors */
uint64_t mmc_rx_watchdog_timeout_errors;
/**< Number of RX watchdog timeout errors */
uint64_t mmc_rx_crc_errors; /**< Number of RX CRC errors */
uint64_t mmc_rx_ip_header_errors;
/**< Number of RX IP header errors read from MMC counter*/
uint64_t mmc_rx_octets_g;
/**< Number of good octets received */
uint64_t mmc_rx_ucast_frames; /**< Number of Unicast frames received */
uint64_t mmc_rx_bcast_frames; /**< Number of Bcast frames received */
uint64_t mmc_rx_mcast_frames; /**< Number of Mcast frames received */
uint64_t mmc_rx_undersize;
/**< Number of RX undersize frames */
uint64_t mmc_rx_oversize;
/**< Number of RX oversize frames */
uint64_t mmc_rx_jabber; /**< Number of jabber frames */
uint64_t mmc_rx_octets_gb;
/**< Number of good/bad octets */
uint64_t mmc_rx_frag_frames_g; /**< Number of good ipv4 frag frames */
uint64_t mmc_tx_octets_g; /**< Number of good octets sent */
uint64_t mmc_tx_ucast_frames; /**< Number of Unicast frames sent*/
uint64_t mmc_tx_bcast_frames; /**< Number of Broadcast frames sent */
uint64_t mmc_tx_mcast_frames; /**< Number of Multicast frames sent */
uint64_t mmc_tx_deferred; /**< Number of Deferred frames sent */
uint64_t mmc_tx_single_col; /**< Number of single collisions */
uint64_t mmc_tx_multiple_col; /**< Number of multiple collisions */
uint64_t mmc_tx_octets_gb; /**< Number of good/bad octets sent*/
};
extern struct nss_dp_data_plane_ops nss_dp_gmac_ops;
#endif /* __NSS_DP_ARCH_H__ */

View File

@@ -1,53 +0,0 @@
/*
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "nss_dp_hal.h"
#include "edma.h"
/*
* nss_dp_hal_get_data_plane_ops()
* Return the data plane ops for edma data plane.
*/
struct nss_dp_data_plane_ops *nss_dp_hal_get_data_plane_ops(void)
{
return &nss_dp_edma_ops;
}
/*
* nss_dp_hal_init()
* Initialize EDMA and set gmac ops.
*/
bool nss_dp_hal_init(void)
{
nss_dp_hal_set_gmac_ops(&qcom_hal_ops, GMAC_HAL_TYPE_QCOM);
nss_dp_hal_set_gmac_ops(&syn_hal_ops, GMAC_HAL_TYPE_SYN_XGMAC);
if (edma_init()) {
return false;
}
return true;
}
/*
* nss_dp_hal_cleanup()
* Cleanup EDMA and set gmac ops to NULL.
*/
void nss_dp_hal_cleanup(void)
{
nss_dp_hal_set_gmac_ops(NULL, GMAC_HAL_TYPE_QCOM);
nss_dp_hal_set_gmac_ops(NULL, GMAC_HAL_TYPE_SYN_XGMAC);
edma_cleanup(false);
}

View File

@@ -1,34 +0,0 @@
/*
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef __NSS_DP_ARCH_H__
#define __NSS_DP_ARCH_H__
#define NSS_DP_HAL_MAX_PORTS 5
#define NSS_DP_HAL_CPU_NUM 4
#define NSS_DP_HAL_START_IFNUM 1
#define NSS_DP_HAL_MAX_MTU_SIZE 9216
#define NSS_DP_HAL_MAX_PACKET_LEN 65535
#define NSS_DP_PREHEADER_SIZE 32
/**
* nss_dp_hal_gmac_stats
* The per-GMAC statistics structure.
*/
struct nss_dp_hal_gmac_stats {
};
#endif /* __NSS_DP_ARCH_H__ */

View File

@@ -1,53 +0,0 @@
/*
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "nss_dp_hal.h"
#include "edma.h"
/*
* nss_dp_hal_get_data_plane_ops()
* Return the data plane ops for edma data plane.
*/
struct nss_dp_data_plane_ops *nss_dp_hal_get_data_plane_ops(void)
{
return &nss_dp_edma_ops;
}
/*
* nss_dp_hal_init()
* Initialize EDMA and set gmac ops.
*/
bool nss_dp_hal_init(void)
{
nss_dp_hal_set_gmac_ops(&qcom_hal_ops, GMAC_HAL_TYPE_QCOM);
nss_dp_hal_set_gmac_ops(&syn_hal_ops, GMAC_HAL_TYPE_SYN_XGMAC);
if (edma_init()) {
return false;
}
return true;
}
/*
* nss_dp_hal_cleanup()
* Cleanup EDMA and set gmac ops to NULL.
*/
void nss_dp_hal_cleanup(void)
{
nss_dp_hal_set_gmac_ops(NULL, GMAC_HAL_TYPE_QCOM);
nss_dp_hal_set_gmac_ops(NULL, GMAC_HAL_TYPE_SYN_XGMAC);
edma_cleanup(false);
}

View File

@@ -1,34 +0,0 @@
/*
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef __NSS_DP_ARCH_H__
#define __NSS_DP_ARCH_H__
#define NSS_DP_HAL_MAX_PORTS 6
#define NSS_DP_HAL_CPU_NUM 4
#define NSS_DP_HAL_START_IFNUM 1
#define NSS_DP_HAL_MAX_MTU_SIZE 9216
#define NSS_DP_HAL_MAX_PACKET_LEN 65535
#define NSS_DP_PREHEADER_SIZE 32
/**
* nss_dp_hal_gmac_stats
* The per-GMAC statistics structure.
*/
struct nss_dp_hal_gmac_stats {
};
#endif /* __NSS_DP_ARCH_H__ */

View File

@@ -1,967 +0,0 @@
/*
* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
* RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT
* NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE
* USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/debugfs.h>
#include <linux/reset.h>
#include "nss_dp_dev.h"
#include "edma_regs.h"
#include "edma_data_plane.h"
#define EDMA_HW_RESET_ID "edma_rst"
/*
* edma_cleanup_rxfill_ring_res()
* Cleanup resources for one RxFill ring
*/
static void edma_cleanup_rxfill_ring_res(struct edma_hw *ehw,
struct edma_rxfill_ring *rxfill_ring)
{
struct platform_device *pdev = ehw->pdev;
struct sk_buff *skb;
uint16_t cons_idx, curr_idx;
struct edma_rxfill_desc *rxfill_desc;
uint32_t reg_data = 0;
struct edma_rx_preheader *rxph = NULL;
int store_idx;
/*
* Read RXFILL ring producer index
*/
reg_data = edma_reg_read(EDMA_REG_RXFILL_PROD_IDX(rxfill_ring->id));
curr_idx = reg_data & EDMA_RXFILL_PROD_IDX_MASK;
/*
* Read RXFILL ring consumer index
*/
reg_data = edma_reg_read(EDMA_REG_RXFILL_CONS_IDX(rxfill_ring->id));
cons_idx = reg_data & EDMA_RXFILL_CONS_IDX_MASK;
while (curr_idx != cons_idx) {
/*
* Get RXFILL descriptor
*/
rxfill_desc = EDMA_RXFILL_DESC(rxfill_ring, cons_idx);
/*
* Get Rx preheader
*/
rxph = (struct edma_rx_preheader *)
phys_to_virt(rxfill_desc->buffer_addr);
dma_unmap_single(&pdev->dev, rxfill_desc->buffer_addr,
EDMA_RX_BUFF_SIZE, DMA_FROM_DEVICE);
/*
* Get sk_buff and free it
*/
store_idx = rxph->opaque;
skb = ehw->rx_skb_store[store_idx];
ehw->rx_skb_store[store_idx] = NULL;
dev_kfree_skb_any(skb);
cons_idx++;
if (cons_idx == rxfill_ring->count)
cons_idx = 0;
}
/*
* Free RXFILL ring descriptors
*/
dma_free_coherent(&pdev->dev,
(sizeof(struct edma_rxfill_desc)
* rxfill_ring->count),
rxfill_ring->desc, rxfill_ring->dma);
}
/*
* edma_setup_rxfill_ring_res()
* Setup resources for one RxFill ring
*/
static int edma_setup_rxfill_ring_res(struct edma_hw *ehw,
struct edma_rxfill_ring *rxfill_ring)
{
struct platform_device *pdev = ehw->pdev;
/*
* Allocate RxFill ring descriptors
*/
rxfill_ring->desc = dma_alloc_coherent(&pdev->dev,
(sizeof(struct edma_rxfill_desc)
* rxfill_ring->count),
&rxfill_ring->dma, GFP_KERNEL);
if (!rxfill_ring->desc) {
pr_warn("Descriptor alloc for RXFILL ring %u failed\n",
rxfill_ring->id);
return -ENOMEM;
}
spin_lock_init(&rxfill_ring->lock);
return 0;
}
/*
* edma_setup_rxdesc_ring_res()
* Setup resources for one RxDesc ring
*/
static int edma_setup_rxdesc_ring_res(struct edma_hw *ehw,
struct edma_rxdesc_ring *rxdesc_ring)
{
struct platform_device *pdev = ehw->pdev;
/*
* Allocate RxDesc ring descriptors
*/
rxdesc_ring->desc = dma_alloc_coherent(&pdev->dev,
(sizeof(struct edma_rxdesc_desc)
* rxdesc_ring->count),
&rxdesc_ring->dma, GFP_KERNEL);
if (!rxdesc_ring->desc) {
pr_warn("Descriptor alloc for RXDESC ring %u failed\n",
rxdesc_ring->id);
return -ENOMEM;
}
return 0;
}
/*
* edma_cleanup_rxdesc_ring_res()
* Cleanup resources for RxDesc ring
*/
static void edma_cleanup_rxdesc_ring_res(struct edma_hw *ehw,
struct edma_rxdesc_ring *rxdesc_ring)
{
struct platform_device *pdev = ehw->pdev;
struct sk_buff *skb;
struct edma_rxdesc_desc *rxdesc_desc;
struct edma_rx_preheader *rxph = NULL;
uint16_t prod_idx = 0;
uint16_t cons_idx = 0;
int store_idx;
cons_idx = edma_reg_read(EDMA_REG_RXDESC_CONS_IDX(rxdesc_ring->id))
& EDMA_RXDESC_CONS_IDX_MASK;
prod_idx = edma_reg_read(EDMA_REG_RXDESC_PROD_IDX(rxdesc_ring->id))
& EDMA_RXDESC_PROD_IDX_MASK;
/*
* Free any buffers assigned to any descriptors
*/
while (cons_idx != prod_idx) {
rxdesc_desc = EDMA_RXDESC_DESC(rxdesc_ring, cons_idx);
rxph = (struct edma_rx_preheader *)
phys_to_virt(rxdesc_desc->buffer_addr);
dma_unmap_single(&pdev->dev, rxdesc_desc->buffer_addr,
EDMA_RX_BUFF_SIZE, DMA_FROM_DEVICE);
store_idx = rxph->opaque;
skb = ehw->rx_skb_store[store_idx];
ehw->rx_skb_store[store_idx] = NULL;
dev_kfree_skb_any(skb);
/*
* Update consumer index
*/
if (++cons_idx == rxdesc_ring->count)
cons_idx = 0;
}
/*
* Free RXDESC ring descriptors
*/
dma_free_coherent(&pdev->dev,
(sizeof(struct edma_rxdesc_desc)
* rxdesc_ring->count),
rxdesc_ring->desc, rxdesc_ring->dma);
}
/*
* edma_cleanup_txcmpl_ring_res()
* Cleanup resources for one TxCmpl ring
*/
static void edma_cleanup_txcmpl_ring_res(struct edma_hw *ehw,
struct edma_txcmpl_ring *txcmpl_ring)
{
struct platform_device *pdev = ehw->pdev;
/*
* Free any buffers assigned to any descriptors
*/
edma_clean_tx(ehw, txcmpl_ring);
/*
* Free TxCmpl ring descriptors
*/
dma_free_coherent(&pdev->dev,
(sizeof(struct edma_txcmpl_desc)
* txcmpl_ring->count),
txcmpl_ring->desc, txcmpl_ring->dma);
}
/*
* edma_setup_txcmpl_ring_res()
* Setup resources for one TxCmpl ring
*/
static int edma_setup_txcmpl_ring_res(struct edma_hw *ehw,
struct edma_txcmpl_ring *txcmpl_ring)
{
struct platform_device *pdev = ehw->pdev;
/*
* Allocate TxCmpl ring descriptors
*/
txcmpl_ring->desc = dma_alloc_coherent(&pdev->dev,
(sizeof(struct edma_txcmpl_desc)
* txcmpl_ring->count),
&txcmpl_ring->dma, GFP_KERNEL);
if (!txcmpl_ring->desc) {
pr_warn("Descriptor alloc for TXCMPL ring %u failed\n",
txcmpl_ring->id);
return -ENOMEM;
}
return 0;
}
/*
* edma_cleanup_txdesc_ring_res()
* Cleanup resources for one TxDesc ring
*/
static void edma_cleanup_txdesc_ring_res(struct edma_hw *ehw,
struct edma_txdesc_ring *txdesc_ring)
{
struct platform_device *pdev = ehw->pdev;
struct sk_buff *skb = NULL;
struct edma_txdesc_desc *txdesc = NULL;
uint16_t prod_idx, cons_idx;
size_t buf_len;
uint32_t data;
int store_idx;
/*
* Free any buffers assigned to any descriptors
*/
data = edma_reg_read(EDMA_REG_TXDESC_PROD_IDX(txdesc_ring->id));
prod_idx = data & EDMA_TXDESC_PROD_IDX_MASK;
data = edma_reg_read(EDMA_REG_TXDESC_CONS_IDX(txdesc_ring->id));
cons_idx = data & EDMA_TXDESC_CONS_IDX_MASK;
while (cons_idx != prod_idx) {
txdesc = EDMA_TXDESC_DESC(txdesc_ring, cons_idx);
store_idx = txdesc->buffer_addr;
skb = ehw->tx_skb_store[store_idx];
ehw->tx_skb_store[store_idx] = NULL;
buf_len = (txdesc->word1 & EDMA_TXDESC_DATA_LENGTH_MASK) >>
EDMA_TXDESC_DATA_LENGTH_SHIFT;
dma_unmap_single(&pdev->dev, (dma_addr_t)skb->data,
buf_len + EDMA_TX_PREHDR_SIZE, DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
cons_idx = (cons_idx + 1) & (txdesc_ring->count - 1);
cons_idx++;
if (cons_idx == txdesc_ring->count)
cons_idx = 0;
}
/*
* Free Tx ring descriptors
*/
dma_free_coherent(&pdev->dev,
(sizeof(struct edma_txdesc_desc)
* txdesc_ring->count),
txdesc_ring->desc, txdesc_ring->dma);
}
/*
* edma_setup_txdesc_ring_res()
* Setup resources for one TxDesc ring
*/
static int edma_setup_txdesc_ring_res(struct edma_hw *ehw,
struct edma_txdesc_ring *txdesc_ring)
{
struct platform_device *pdev = ehw->pdev;
/*
* Allocate Tx ring descriptors
*/
txdesc_ring->desc = dma_alloc_coherent(&pdev->dev,
(sizeof(struct edma_txdesc_desc)
* txdesc_ring->count),
&txdesc_ring->dma, GFP_KERNEL);
if (!txdesc_ring->desc) {
pr_warn("Descriptor alloc for TXDESC ring %u failed\n",
txdesc_ring->id);
return -ENOMEM;
}
spin_lock_init(&txdesc_ring->tx_lock);
return 0;
}
/*
* edma_setup_ring_resources()
* Allocate/setup resources for EDMA rings
*/
static int edma_setup_ring_resources(struct edma_hw *ehw)
{
struct edma_txcmpl_ring *txcmpl_ring = NULL;
struct edma_txdesc_ring *txdesc_ring = NULL;
struct edma_rxfill_ring *rxfill_ring = NULL;
struct edma_rxdesc_ring *rxdesc_ring = NULL;
int i;
int ret;
int index;
/*
* Allocate TxDesc ring descriptors
*/
for (i = 0; i < ehw->txdesc_rings; i++) {
txdesc_ring = &ehw->txdesc_ring[i];
txdesc_ring->count = EDMA_RING_SIZE;
txdesc_ring->id = ehw->txdesc_ring_start + i;
ret = edma_setup_txdesc_ring_res(ehw, txdesc_ring);
if (ret != 0) {
while (i-- >= 0)
edma_cleanup_txdesc_ring_res(ehw,
&ehw->txdesc_ring[i]);
return -ENOMEM;
}
}
/*
* Allocate TxCmpl ring descriptors
*/
for (i = 0; i < ehw->txcmpl_rings; i++) {
txcmpl_ring = &ehw->txcmpl_ring[i];
txcmpl_ring->count = EDMA_RING_SIZE;
txcmpl_ring->id = ehw->txcmpl_ring_start + i;
ret = edma_setup_txcmpl_ring_res(ehw, txcmpl_ring);
if (ret != 0) {
while (i-- >= 0)
edma_cleanup_txcmpl_ring_res(ehw,
&ehw->txcmpl_ring[i]);
goto txcmpl_mem_alloc_fail;
}
}
/*
* Allocate Rx fill ring descriptors
*/
for (i = 0; i < ehw->rxfill_rings; i++) {
rxfill_ring = &ehw->rxfill_ring[i];
rxfill_ring->count = EDMA_RING_SIZE;
rxfill_ring->id = ehw->rxfill_ring_start + i;
ret = edma_setup_rxfill_ring_res(ehw, rxfill_ring);
if (ret != 0) {
while (--i >= 0)
edma_cleanup_rxfill_ring_res(ehw,
&ehw->rxfill_ring[i]);
goto rxfill_mem_alloc_fail;
}
}
/*
* Allocate RxDesc ring descriptors
*/
for (i = 0; i < ehw->rxdesc_rings; i++) {
rxdesc_ring = &ehw->rxdesc_ring[i];
rxdesc_ring->count = EDMA_RING_SIZE;
rxdesc_ring->id = ehw->rxdesc_ring_start + i;
/*
* Create a mapping between RX Desc ring and Rx fill ring.
* Number of fill rings are lesser than the descriptor rings
* Share the fill rings across descriptor rings.
*/
index = ehw->rxfill_ring_start + (i % ehw->rxfill_rings);
rxdesc_ring->rxfill =
&ehw->rxfill_ring[index - ehw->rxfill_ring_start];
ret = edma_setup_rxdesc_ring_res(ehw, rxdesc_ring);
if (ret != 0) {
while (--i >= 0)
edma_cleanup_rxdesc_ring_res(ehw,
&ehw->rxdesc_ring[i]);
goto rxdesc_mem_alloc_fail;
}
}
return 0;
rxdesc_mem_alloc_fail:
for (i = 0; i < ehw->rxfill_rings; i++)
edma_cleanup_rxfill_ring_res(ehw, &ehw->rxfill_ring[i]);
rxfill_mem_alloc_fail:
for (i = 0; i < ehw->txcmpl_rings; i++)
edma_cleanup_txcmpl_ring_res(ehw, &ehw->txcmpl_ring[i]);
txcmpl_mem_alloc_fail:
for (i = 0; i < ehw->txdesc_rings; i++)
edma_cleanup_txdesc_ring_res(ehw, &ehw->txdesc_ring[i]);
return -ENOMEM;
}
/*
* edma_free_rings()
* Free EDMA software rings
*/
static void edma_free_rings(struct edma_hw *ehw)
{
kfree(ehw->rxfill_ring);
kfree(ehw->rxdesc_ring);
kfree(ehw->txdesc_ring);
kfree(ehw->txcmpl_ring);
}
/*
* edma_alloc_rings()
* Allocate EDMA software rings
*/
static int edma_alloc_rings(struct edma_hw *ehw)
{
ehw->rxfill_ring = kzalloc((sizeof(struct edma_rxfill_ring) *
ehw->rxfill_rings), GFP_KERNEL);
if (!ehw->rxfill_ring)
return -ENOMEM;
ehw->rxdesc_ring = kzalloc((sizeof(struct edma_rxdesc_ring) *
ehw->rxdesc_rings), GFP_KERNEL);
if (!ehw->rxdesc_ring)
goto rxdesc_ring_alloc_fail;
ehw->txdesc_ring = kzalloc((sizeof(struct edma_txdesc_ring) *
ehw->txdesc_rings), GFP_KERNEL);
if (!ehw->txdesc_ring)
goto txdesc_ring_alloc_fail;
ehw->txcmpl_ring = kzalloc((sizeof(struct edma_txcmpl_ring) *
ehw->txcmpl_rings), GFP_KERNEL);
if (!ehw->txcmpl_ring)
goto txcmpl_ring_alloc_fail;
pr_info("Num rings - TxDesc:%u (%u-%u) TxCmpl:%u (%u-%u)\n",
ehw->txdesc_rings, ehw->txdesc_ring_start,
(ehw->txdesc_ring_start + ehw->txdesc_rings - 1),
ehw->txcmpl_rings, ehw->txcmpl_ring_start,
(ehw->txcmpl_ring_start + ehw->txcmpl_rings - 1));
pr_info("RxDesc:%u (%u-%u) RxFill:%u (%u-%u)\n",
ehw->rxdesc_rings, ehw->rxdesc_ring_start,
(ehw->rxdesc_ring_start + ehw->rxdesc_rings - 1),
ehw->rxfill_rings, ehw->rxfill_ring_start,
(ehw->rxfill_ring_start + ehw->rxfill_rings - 1));
return 0;
txcmpl_ring_alloc_fail:
kfree(ehw->txdesc_ring);
txdesc_ring_alloc_fail:
kfree(ehw->rxdesc_ring);
rxdesc_ring_alloc_fail:
kfree(ehw->rxfill_ring);
return -ENOMEM;
}
/*
* edma_cleanup_rings()
* Cleanup EDMA rings
*/
void edma_cleanup_rings(struct edma_hw *ehw)
{
int i;
/*
* Free any buffers assigned to any descriptors
*/
for (i = 0; i < ehw->txdesc_rings; i++)
edma_cleanup_txdesc_ring_res(ehw, &ehw->txdesc_ring[i]);
/*
* Free Tx completion descriptors
*/
for (i = 0; i < ehw->txcmpl_rings; i++)
edma_cleanup_txcmpl_ring_res(ehw, &ehw->txcmpl_ring[i]);
/*
* Free Rx fill ring descriptors
*/
for (i = 0; i < ehw->rxfill_rings; i++)
edma_cleanup_rxfill_ring_res(ehw, &ehw->rxfill_ring[i]);
/*
* Free Rx completion ring descriptors
*/
for (i = 0; i < ehw->rxdesc_rings; i++)
edma_cleanup_rxdesc_ring_res(ehw, &ehw->rxdesc_ring[i]);
edma_free_rings(ehw);
}
/*
* edma_init_rings()
* Initialize EDMA rings
*/
static int edma_init_rings(struct edma_hw *ehw)
{
int ret = 0;
ret = edma_alloc_rings(ehw);
if (ret)
return ret;
ret = edma_setup_ring_resources(ehw);
if (ret)
return ret;
return 0;
}
/*
* edma_configure_txdesc_ring()
* Configure one TxDesc ring
*/
static void edma_configure_txdesc_ring(struct edma_hw *ehw,
struct edma_txdesc_ring *txdesc_ring)
{
uint32_t data = 0;
uint16_t hw_cons_idx = 0;
/*
* Configure TXDESC ring
*/
edma_reg_write(EDMA_REG_TXDESC_BA(txdesc_ring->id),
(uint32_t)(txdesc_ring->dma &
EDMA_RING_DMA_MASK));
edma_reg_write(EDMA_REG_TXDESC_RING_SIZE(txdesc_ring->id),
(uint32_t)(txdesc_ring->count &
EDMA_TXDESC_RING_SIZE_MASK));
data = edma_reg_read(EDMA_REG_TXDESC_CONS_IDX(txdesc_ring->id));
data &= ~(EDMA_TXDESC_CONS_IDX_MASK);
hw_cons_idx = data;
data = edma_reg_read(EDMA_REG_TXDESC_PROD_IDX(txdesc_ring->id));
data &= ~(EDMA_TXDESC_PROD_IDX_MASK);
data |= hw_cons_idx & EDMA_TXDESC_PROD_IDX_MASK;
edma_reg_write(EDMA_REG_TXDESC_PROD_IDX(txdesc_ring->id), data);
}
/*
* edma_configure_txcmpl_ring()
* Configure one TxCmpl ring
*/
static void edma_configure_txcmpl_ring(struct edma_hw *ehw,
struct edma_txcmpl_ring *txcmpl_ring)
{
uint32_t tx_mod_timer;
/*
* Configure TxCmpl ring base address
*/
edma_reg_write(EDMA_REG_TXCMPL_BA(txcmpl_ring->id),
(uint32_t)(txcmpl_ring->dma & EDMA_RING_DMA_MASK));
edma_reg_write(EDMA_REG_TXCMPL_RING_SIZE(txcmpl_ring->id),
(uint32_t)(txcmpl_ring->count
& EDMA_TXDESC_RING_SIZE_MASK));
/*
* Set TxCmpl ret mode to opaque
*/
edma_reg_write(EDMA_REG_TXCMPL_CTRL(txcmpl_ring->id),
EDMA_TXCMPL_RETMODE_OPAQUE);
tx_mod_timer = (EDMA_TX_MOD_TIMER & EDMA_TX_MOD_TIMER_INIT_MASK)
<< EDMA_TX_MOD_TIMER_INIT_SHIFT;
edma_reg_write(EDMA_REG_TX_MOD_TIMER(txcmpl_ring->id),
tx_mod_timer);
edma_reg_write(EDMA_REG_TX_INT_CTRL(txcmpl_ring->id), 0x2);
}
/*
* edma_configure_rxdesc_ring()
* Configure one RxDesc ring
*/
static void edma_configure_rxdesc_ring(struct edma_hw *ehw,
struct edma_rxdesc_ring *rxdesc_ring)
{
uint32_t data;
edma_reg_write(EDMA_REG_RXDESC_BA(rxdesc_ring->id),
(uint32_t)(rxdesc_ring->dma & 0xffffffff));
data = rxdesc_ring->count & EDMA_RXDESC_RING_SIZE_MASK;
data |= (ehw->rx_payload_offset & EDMA_RXDESC_PL_OFFSET_MASK)
<< EDMA_RXDESC_PL_OFFSET_SHIFT;
edma_reg_write(EDMA_REG_RXDESC_RING_SIZE(rxdesc_ring->id), data);
data = (EDMA_RX_MOD_TIMER_INIT & EDMA_RX_MOD_TIMER_INIT_MASK)
<< EDMA_RX_MOD_TIMER_INIT_SHIFT;
edma_reg_write(EDMA_REG_RX_MOD_TIMER(rxdesc_ring->id), data);
/*
* Enable ring. Set ret mode to 'opaque'.
*/
edma_reg_write(EDMA_REG_RX_INT_CTRL(rxdesc_ring->id), 0x2);
}
/*
* edma_configure_rxfill_ring()
* Configure one RxFill ring
*/
static void edma_configure_rxfill_ring(struct edma_hw *ehw,
struct edma_rxfill_ring *rxfill_ring)
{
uint32_t data = 0;
edma_reg_write(EDMA_REG_RXFILL_BA(rxfill_ring->id),
(uint32_t)(rxfill_ring->dma & EDMA_RING_DMA_MASK));
data = rxfill_ring->count & EDMA_RXFILL_RING_SIZE_MASK;
edma_reg_write(EDMA_REG_RXFILL_RING_SIZE(rxfill_ring->id), data);
/*
* Alloc Rx buffers
*/
edma_alloc_rx_buffer(ehw, rxfill_ring);
}
/*
* edma_configure_rings()
* Configure EDMA rings
*/
static void edma_configure_rings(struct edma_hw *ehw)
{
int i = 0;
/*
* Initialize the store
*/
for (i = 0; i < EDMA_RING_SIZE; i++) {
ehw->tx_skb_store[i] = NULL;
ehw->rx_skb_store[i] = NULL;
}
/*
* Configure TXDESC ring
*/
for (i = 0; i < ehw->txdesc_rings; i++)
edma_configure_txdesc_ring(ehw, &ehw->txdesc_ring[i]);
/*
* Configure TXCMPL ring
*/
for (i = 0; i < ehw->txcmpl_rings; i++)
edma_configure_txcmpl_ring(ehw, &ehw->txcmpl_ring[i]);
/*
* Configure RXFILL rings
*/
for (i = 0; i < ehw->rxfill_rings; i++)
edma_configure_rxfill_ring(ehw, &ehw->rxfill_ring[i]);
/*
* Configure RXDESC ring
*/
for (i = 0; i < ehw->rxdesc_rings; i++)
edma_configure_rxdesc_ring(ehw, &ehw->rxdesc_ring[i]);
}
/*
* edma_hw_reset()
* Reset EDMA Hardware during initialization
*/
int edma_hw_reset(struct edma_hw *ehw)
{
struct reset_control *rst;
struct platform_device *pdev = ehw->pdev;
rst = devm_reset_control_get(&pdev->dev, EDMA_HW_RESET_ID);
if (IS_ERR(rst)) {
pr_warn("DTS Node: %s does not exist\n", EDMA_HW_RESET_ID);
return -EINVAL;
}
reset_control_assert(rst);
udelay(100);
reset_control_deassert(rst);
udelay(100);
pr_info("EDMA HW Reset completed succesfully\n");
return 0;
}
/*
* edma_hw_init()
* EDMA hw init
*/
int edma_hw_init(struct edma_hw *ehw)
{
int ret = 0;
int desc_index;
uint32_t i, data, reg = 0;
struct edma_rxdesc_ring *rxdesc_ring = NULL;
data = edma_reg_read(EDMA_REG_MAS_CTRL);
pr_info("EDMA ver %d hw init\n", data);
/*
* Setup private data structure
*/
ehw->misc_intr_mask = 0x0;
ehw->rxfill_intr_mask = EDMA_RXFILL_INT_MASK;
ehw->rxdesc_intr_mask = EDMA_RXDESC_INT_MASK_PKT_INT;
ehw->txcmpl_intr_mask = EDMA_TX_INT_MASK_PKT_INT |
EDMA_TX_INT_MASK_UGT_INT;
ehw->rx_payload_offset = EDMA_RX_PREHDR_SIZE;
ehw->active = 0;
ehw->edma_initialized = false;
/* Reset EDMA */
ret = edma_hw_reset(ehw);
if (ret)
return ret;
/*
* Disable interrupts
*/
for (i = 0; i < EDMA_MAX_TXCMPL_RINGS; i++)
edma_reg_write(EDMA_REG_TX_INT_MASK(i), 0);
for (i = 0; i < EDMA_MAX_RXFILL_RINGS; i++)
edma_reg_write(EDMA_REG_RXFILL_INT_MASK(i), 0);
for (i = 0; i < EDMA_MAX_RXDESC_RINGS; i++)
edma_reg_write(EDMA_REG_RX_INT_CTRL(i), 0);
/*
* Disable Rx rings
*/
for (i = 0; i < EDMA_MAX_RXDESC_RINGS; i++) {
data = edma_reg_read(EDMA_REG_RXDESC_CTRL(i));
data &= ~EDMA_RXDESC_RX_EN;
edma_reg_write(EDMA_REG_RXDESC_CTRL(i), data);
}
/*
* Disable RxFill Rings
*/
for (i = 0; i < EDMA_MAX_RXFILL_RINGS; i++) {
data = edma_reg_read(EDMA_REG_RXFILL_RING_EN(i));
data &= ~EDMA_RXFILL_RING_EN;
edma_reg_write(EDMA_REG_RXFILL_RING_EN(i), data);
}
/*
* Disable Tx rings
*/
for (desc_index = 0; desc_index < EDMA_MAX_TXDESC_RINGS; desc_index++) {
data = edma_reg_read(EDMA_REG_TXDESC_CTRL(desc_index));
data &= ~EDMA_TXDESC_TX_EN;
edma_reg_write(EDMA_REG_TXDESC_CTRL(desc_index), data);
}
#if defined(NSS_DP_IPQ807X)
/*
* Clear the TXDESC2CMPL_MAP_xx reg before setting up
* the mapping. This register holds TXDESC to TXFILL ring
* mapping.
*/
edma_reg_write(EDMA_REG_TXDESC2CMPL_MAP_0, 0);
edma_reg_write(EDMA_REG_TXDESC2CMPL_MAP_1, 0);
edma_reg_write(EDMA_REG_TXDESC2CMPL_MAP_2, 0);
desc_index = ehw->txcmpl_ring_start;
/*
* 3 registers to hold the completion mapping for total 24
* TX desc rings (0-9,10-19 and rest). In each entry 3 bits hold
* the mapping for a particular TX desc ring.
*/
for (i = ehw->txdesc_ring_start;
i < ehw->txdesc_ring_end; i++) {
if (i >= 0 && i <= 9)
reg = EDMA_REG_TXDESC2CMPL_MAP_0;
else if (i >= 10 && i <= 19)
reg = EDMA_REG_TXDESC2CMPL_MAP_1;
else
reg = EDMA_REG_TXDESC2CMPL_MAP_2;
pr_debug("Configure TXDESC:%u to use TXCMPL:%u\n",
i, desc_index);
data = edma_reg_read(reg);
data |= (desc_index & 0x7) << ((i % 10) * 3);
edma_reg_write(reg, data);
desc_index++;
if (desc_index == ehw->txcmpl_ring_end)
desc_index = ehw->txcmpl_ring_start;
}
#endif
/*
* Set PPE QID to EDMA Rx ring mapping.
* When coming up use only queue 0.
* HOST EDMA rings. FW EDMA comes up and overwrites as required.
* Each entry can hold mapping for 8 PPE queues and entry size is
* 4 bytes
*/
desc_index = ehw->rxdesc_ring_start;
data = 0;
data |= (desc_index & 0xF);
edma_reg_write(EDMA_QID2RID_TABLE_MEM(0), data);
pr_debug("Configure QID2RID reg:0x%x to 0x%x\n", reg, data);
ret = edma_init_rings(ehw);
if (ret)
return ret;
edma_configure_rings(ehw);
/*
* Set RXDESC2FILL_MAP_xx reg.
* There are two registers RXDESC2FILL_0 and RXDESC2FILL_1
* 3 bits holds the rx fill ring mapping for each of the
* rx descriptor ring.
*/
edma_reg_write(EDMA_REG_RXDESC2FILL_MAP_0, 0);
edma_reg_write(EDMA_REG_RXDESC2FILL_MAP_1, 0);
for (i = ehw->rxdesc_ring_start;
i < ehw->rxdesc_ring_end; i++) {
if ((i >= 0) && (i <= 9))
reg = EDMA_REG_RXDESC2FILL_MAP_0;
else
reg = EDMA_REG_RXDESC2FILL_MAP_1;
rxdesc_ring = &ehw->rxdesc_ring[i - ehw->rxdesc_ring_start];
pr_debug("Configure RXDESC:%u to use RXFILL:%u\n",
rxdesc_ring->id, rxdesc_ring->rxfill->id);
data = edma_reg_read(reg);
data |= (rxdesc_ring->rxfill->id & 0x7) << ((i % 10) * 3);
edma_reg_write(reg, data);
}
reg = EDMA_REG_RXDESC2FILL_MAP_0;
pr_debug("EDMA_REG_RXDESC2FILL_MAP_0: 0x%x\n", edma_reg_read(reg));
reg = EDMA_REG_RXDESC2FILL_MAP_1;
pr_debug("EDMA_REG_RXDESC2FILL_MAP_1: 0x%x\n", edma_reg_read(reg));
#if defined(NSS_DP_IPQ807X)
reg = EDMA_REG_TXDESC2CMPL_MAP_0;
pr_debug("EDMA_REG_TXDESC2CMPL_MAP_0: 0x%x\n", edma_reg_read(reg));
reg = EDMA_REG_TXDESC2CMPL_MAP_1;
pr_debug("EDMA_REG_TXDESC2CMPL_MAP_1: 0x%x\n", edma_reg_read(reg));
reg = EDMA_REG_TXDESC2CMPL_MAP_2;
pr_debug("EDMA_REG_TXDESC2CMPL_MAP_2: 0x%x\n", edma_reg_read(reg));
#endif
/*
* Configure DMA request priority, DMA read burst length,
* and AXI write size.
*/
data = EDMA_DMAR_BURST_LEN_SET(EDMA_BURST_LEN_ENABLE)
| EDMA_DMAR_REQ_PRI_SET(0)
| EDMA_DMAR_TXDATA_OUTSTANDING_NUM_SET(31)
| EDMA_DMAR_TXDESC_OUTSTANDING_NUM_SET(7)
| EDMA_DMAR_RXFILL_OUTSTANDING_NUM_SET(7);
edma_reg_write(EDMA_REG_DMAR_CTRL, data);
#if defined(NSS_DP_IPQ60XX)
data = edma_reg_read(EDMA_REG_AXIW_CTRL);
data |= EDMA_AXIW_MAX_WR_SIZE_EN;
edma_reg_write(EDMA_REG_AXIW_CTRL, data);
#endif
/*
* Misc error mask
*/
data = EDMA_MISC_AXI_RD_ERR_MASK_EN |
EDMA_MISC_AXI_WR_ERR_MASK_EN |
EDMA_MISC_RX_DESC_FIFO_FULL_MASK_EN |
EDMA_MISC_RX_ERR_BUF_SIZE_MASK_EN |
EDMA_MISC_TX_SRAM_FULL_MASK_EN |
EDMA_MISC_TX_CMPL_BUF_FULL_MASK_EN |
EDMA_MISC_DATA_LEN_ERR_MASK_EN;
#if defined(NSS_DP_IPQ807X)
data |= EDMA_MISC_PKT_LEN_LA_64K_MASK_EN |
EDMA_MISC_PKT_LEN_LE_40_MASK_EN;
#else
data |= EDMA_MISC_TX_TIMEOUT_MASK_EN;
#endif
edma_reg_write(EDMA_REG_MISC_INT_MASK, data);
/*
* Global EDMA enable and padding enable
*/
data = EDMA_PORT_PAD_EN | EDMA_PORT_EDMA_EN;
edma_reg_write(EDMA_REG_PORT_CTRL, data);
/*
* Enable Rx rings
*/
for (i = ehw->rxdesc_ring_start; i < ehw->rxdesc_ring_end; i++) {
data = edma_reg_read(EDMA_REG_RXDESC_CTRL(i));
data |= EDMA_RXDESC_RX_EN;
edma_reg_write(EDMA_REG_RXDESC_CTRL(i), data);
}
for (i = ehw->rxfill_ring_start; i < ehw->rxfill_ring_end; i++) {
data = edma_reg_read(EDMA_REG_RXFILL_RING_EN(i));
data |= EDMA_RXFILL_RING_EN;
edma_reg_write(EDMA_REG_RXFILL_RING_EN(i), data);
}
/*
* Enable Tx rings
*/
for (i = ehw->txdesc_ring_start; i < ehw->txdesc_ring_end; i++) {
data = edma_reg_read(EDMA_REG_TXDESC_CTRL(i));
data |= EDMA_TXDESC_TX_EN;
edma_reg_write(EDMA_REG_TXDESC_CTRL(i), data);
}
ehw->edma_initialized = true;
return 0;
}

View File

@@ -1,962 +0,0 @@
/*
* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
* RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT
* NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE
* USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/debugfs.h>
#include <fal/fal_vsi.h>
#include "nss_dp_dev.h"
#include "edma_regs.h"
#include "edma_data_plane.h"
/*
* EDMA hardware instance
*/
struct edma_hw edma_hw;
/*
* edma_get_port_num_from_netdev()
* Get port number from net device
*/
static int edma_get_port_num_from_netdev(struct net_device *netdev)
{
int i;
for (i = 0; i < EDMA_MAX_GMACS; i++) {
/* In the port-id to netdev mapping table, port-id
* starts from 1 and table index starts from 0.
* So we return index + 1 for port-id
*/
if (edma_hw.netdev_arr[i] == netdev)
return i+1;
}
return -1;
}
/*
* edma_reg_read()
* Read EDMA register
*/
uint32_t edma_reg_read(uint32_t reg_off)
{
return (uint32_t)readl(edma_hw.reg_base + reg_off);
}
/*
* edma_reg_write()
* Write EDMA register
*/
void edma_reg_write(uint32_t reg_off, uint32_t val)
{
writel(val, edma_hw.reg_base + reg_off);
}
/*
* edma_disable_interrupts()
* Disable EDMA RX/TX interrupt masks.
*/
static void edma_disable_interrupts(void)
{
struct edma_rxdesc_ring *rxdesc_ring = NULL;
struct edma_rxfill_ring *rxfill_ring = NULL;
struct edma_txcmpl_ring *txcmpl_ring = NULL;
int i;
for (i = 0; i < edma_hw.rxdesc_rings; i++) {
rxdesc_ring = &edma_hw.rxdesc_ring[i];
edma_reg_write(EDMA_REG_RXDESC_INT_MASK(rxdesc_ring->id),
EDMA_MASK_INT_CLEAR);
}
for (i = 0; i < edma_hw.txcmpl_rings; i++) {
txcmpl_ring = &edma_hw.txcmpl_ring[i];
edma_reg_write(EDMA_REG_TX_INT_MASK(txcmpl_ring->id),
EDMA_MASK_INT_CLEAR);
}
for (i = 0; i < edma_hw.rxfill_rings; i++) {
rxfill_ring = &edma_hw.rxfill_ring[i];
edma_reg_write(EDMA_REG_RXFILL_INT_MASK(rxfill_ring->id),
EDMA_MASK_INT_CLEAR);
}
/*
* Clear MISC interrupt mask.
*/
edma_reg_write(EDMA_REG_MISC_INT_MASK, EDMA_MASK_INT_CLEAR);
}
/*
* edma_enable_interrupts()
* Enable RX/TX EDMA interrupt masks.
*/
static void edma_enable_interrupts(void)
{
struct edma_rxdesc_ring *rxdesc_ring = NULL;
struct edma_rxfill_ring *rxfill_ring = NULL;
struct edma_txcmpl_ring *txcmpl_ring = NULL;
int i;
for (i = 0; i < edma_hw.rxfill_rings; i++) {
rxfill_ring = &edma_hw.rxfill_ring[i];
edma_reg_write(EDMA_REG_RXFILL_INT_MASK(rxfill_ring->id),
edma_hw.rxfill_intr_mask);
}
for (i = 0; i < edma_hw.txcmpl_rings; i++) {
txcmpl_ring = &edma_hw.txcmpl_ring[i];
edma_reg_write(EDMA_REG_TX_INT_MASK(txcmpl_ring->id),
edma_hw.txcmpl_intr_mask);
}
for (i = 0; i < edma_hw.rxdesc_rings; i++) {
rxdesc_ring = &edma_hw.rxdesc_ring[i];
edma_reg_write(EDMA_REG_RXDESC_INT_MASK(rxdesc_ring->id),
edma_hw.rxdesc_intr_mask);
}
/*
* Enable MISC interrupt mask.
*/
edma_reg_write(EDMA_REG_MISC_INT_MASK, edma_hw.misc_intr_mask);
}
/*
* nss_dp_edma_if_open()
* Do slow path data plane open
*/
static int edma_if_open(struct nss_dp_data_plane_ctx *dpc,
uint32_t tx_desc_ring, uint32_t rx_desc_ring,
uint32_t mode)
{
if (!dpc->dev)
return NSS_DP_FAILURE;
/*
* Enable NAPI
*/
if (edma_hw.active++ != 0)
return NSS_DP_SUCCESS;
napi_enable(&edma_hw.napi);
/*
* Enable the interrupt masks.
*/
edma_enable_interrupts();
return NSS_DP_SUCCESS;
}
/*
* edma_if_close()
* Do slow path data plane close
*/
static int edma_if_close(struct nss_dp_data_plane_ctx *dpc)
{
if (--edma_hw.active != 0)
return NSS_DP_SUCCESS;
/*
* Disable the interrupt masks.
*/
edma_disable_interrupts();
/*
* Disable NAPI
*/
napi_disable(&edma_hw.napi);
return NSS_DP_SUCCESS;
}
/*
* edma_if_link_state()
*/
static int edma_if_link_state(struct nss_dp_data_plane_ctx *dpc,
uint32_t link_state)
{
return NSS_DP_SUCCESS;
}
/*
* edma_if_mac_addr()
*/
static int edma_if_mac_addr(struct nss_dp_data_plane_ctx *dpc, uint8_t *addr)
{
return NSS_DP_SUCCESS;
}
/*
* edma_if_change_mtu()
*/
static int edma_if_change_mtu(struct nss_dp_data_plane_ctx *dpc, uint32_t mtu)
{
return NSS_DP_SUCCESS;
}
/*
* edma_if_xmit()
* Transmit a packet using EDMA
*/
static netdev_tx_t edma_if_xmit(struct nss_dp_data_plane_ctx *dpc,
struct sk_buff *skb)
{
struct net_device *netdev = dpc->dev;
int ret;
uint32_t tx_ring, skbq, nhead, ntail;
bool expand_skb = false;
if (skb->len < ETH_HLEN) {
netdev_dbg(netdev, "skb->len < ETH_HLEN\n");
goto drop;
}
/*
* Select a Tx ring
*/
skbq = skb_get_queue_mapping(skb);
tx_ring = 0;
if ((edma_hw.txdesc_rings > 1) && (skbq > 0))
tx_ring = edma_hw.txdesc_rings % skbq;
/*
* Check for non-linear skb
*/
if (skb_is_nonlinear(skb)) {
netdev_dbg(netdev, "cannot Tx non-linear skb:%px\n", skb);
goto drop;
}
/*
* Check for headroom/tailroom and clone
*/
nhead = netdev->needed_headroom;
ntail = netdev->needed_tailroom;
if (skb_cloned(skb) ||
(skb_headroom(skb) < nhead) ||
(skb_headroom(skb) < ntail)) {
expand_skb = true;
}
/*
* Expand the skb. This also unclones a cloned skb.
*/
if (expand_skb && pskb_expand_head(skb, nhead, ntail, GFP_ATOMIC)) {
netdev_dbg(netdev, "cannot expand skb:%px\n", skb);
goto drop;
}
/*
* Transmit the packet
*/
ret = edma_ring_xmit(&edma_hw, netdev, skb,
&edma_hw.txdesc_ring[tx_ring]);
if (ret == EDMA_TX_OK)
return NETDEV_TX_OK;
/*
* Not enough descriptors. Stop netdev Tx queue.
*/
if (ret == EDMA_TX_DESC) {
netif_stop_queue(netdev);
return NETDEV_TX_BUSY;
}
drop:
dev_kfree_skb_any(skb);
netdev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
/*
* edma_if_set_features()
* Set the supported net_device features
*/
static void edma_if_set_features(struct nss_dp_data_plane_ctx *dpc)
{
/*
* TODO - add flags to support HIGHMEM/cksum offload VLAN
* the features are enabled.
*/
}
/* TODO - check if this is needed */
/*
* edma_if_pause_on_off()
* Set pause frames on or off
*
* No need to send a message if we defaulted to slow path.
*/
static int edma_if_pause_on_off(struct nss_dp_data_plane_ctx *dpc,
uint32_t pause_on)
{
return NSS_DP_SUCCESS;
}
/*
* edma_if_vsi_assign()
* assign vsi of the data plane
*
*/
static int edma_if_vsi_assign(struct nss_dp_data_plane_ctx *dpc, uint32_t vsi)
{
struct net_device *netdev = dpc->dev;
int32_t port_num;
port_num = edma_get_port_num_from_netdev(netdev);
if (port_num < 0)
return NSS_DP_FAILURE;
if (fal_port_vsi_set(0, port_num, vsi) < 0)
return NSS_DP_FAILURE;
return NSS_DP_SUCCESS;
}
/*
* edma_if_vsi_unassign()
* unassign vsi of the data plane
*
*/
static int edma_if_vsi_unassign(struct nss_dp_data_plane_ctx *dpc, uint32_t vsi)
{
struct net_device *netdev = dpc->dev;
uint32_t port_num;
port_num = edma_get_port_num_from_netdev(netdev);
if (port_num < 0)
return NSS_DP_FAILURE;
if (fal_port_vsi_set(0, port_num, 0xffff) < 0)
return NSS_DP_FAILURE;
return NSS_DP_SUCCESS;
}
#ifdef CONFIG_RFS_ACCEL
/*
* edma_if_rx_flow_steer()
* Flow steer of the data plane
*
* Initial receive flow steering function for data plane operation.
*/
static int edma_if_rx_flow_steer(struct nss_dp_data_plane_ctx *dpc, struct sk_buff *skb,
uint32_t cpu, bool is_add)
{
return NSS_DP_SUCCESS;
}
#endif
/*
* edma_if_deinit()
* Free edma resources
*/
static int edma_if_deinit(struct nss_dp_data_plane_ctx *dpc)
{
/*
* Free up resources used by EDMA if all the
* interfaces have been overridden
* */
if (edma_hw.dp_override_cnt == EDMA_MAX_GMACS - 1) {
edma_cleanup(true);
} else {
edma_hw.dp_override_cnt++;
}
return NSS_DP_SUCCESS;
}
/*
* edma_irq_init()
* Initialize interrupt handlers for the driver
*/
static int edma_irq_init(void)
{
int err;
uint32_t entry_num, i;
/*
* Get TXCMPL rings IRQ numbers
*/
entry_num = 0;
for (i = 0; i < edma_hw.txcmpl_rings; i++, entry_num++) {
edma_hw.txcmpl_intr[i] =
platform_get_irq(edma_hw.pdev, entry_num);
if (edma_hw.txcmpl_intr[i] < 0) {
pr_warn("%s: txcmpl_intr[%u] irq get failed\n",
(edma_hw.device_node)->name, i);
return -1;
}
pr_debug("%s: txcmpl_intr[%u] = %u\n",
(edma_hw.device_node)->name,
i, edma_hw.txcmpl_intr[i]);
}
/*
* Get RXFILL rings IRQ numbers
*/
for (i = 0; i < edma_hw.rxfill_rings; i++, entry_num++) {
edma_hw.rxfill_intr[i] =
platform_get_irq(edma_hw.pdev, entry_num);
if (edma_hw.rxfill_intr[i] < 0) {
pr_warn("%s: rxfill_intr[%u] irq get failed\n",
(edma_hw.device_node)->name, i);
return -1;
}
pr_debug("%s: rxfill_intr[%u] = %u\n",
(edma_hw.device_node)->name,
i, edma_hw.rxfill_intr[i]);
}
/*
* Get RXDESC rings IRQ numbers
*
*/
for (i = 0; i < edma_hw.rxdesc_rings; i++, entry_num++) {
edma_hw.rxdesc_intr[i] =
platform_get_irq(edma_hw.pdev, entry_num);
if (edma_hw.rxdesc_intr[i] < 0) {
pr_warn("%s: rxdesc_intr[%u] irq get failed\n",
(edma_hw.device_node)->name, i);
return -1;
}
pr_debug("%s: rxdesc_intr[%u] = %u\n",
(edma_hw.device_node)->name,
i, edma_hw.rxdesc_intr[i]);
}
/*
* Get misc IRQ number
*/
edma_hw.misc_intr = platform_get_irq(edma_hw.pdev, entry_num);
pr_debug("%s: misc IRQ:%u\n",
(edma_hw.device_node)->name,
edma_hw.misc_intr);
/*
* Request IRQ for TXCMPL rings
*/
for (i = 0; i < edma_hw.txcmpl_rings; i++) {
err = request_irq(edma_hw.txcmpl_intr[i],
edma_handle_irq, IRQF_SHARED,
"edma_txcmpl", (void *)edma_hw.pdev);
if (err) {
pr_debug("TXCMPL ring IRQ:%d request failed\n",
edma_hw.txcmpl_intr[i]);
return -1;
}
}
/*
* Request IRQ for RXFILL rings
*/
for (i = 0; i < edma_hw.rxfill_rings; i++) {
err = request_irq(edma_hw.rxfill_intr[i],
edma_handle_irq, IRQF_SHARED,
"edma_rxfill", (void *)edma_hw.pdev);
if (err) {
pr_debug("RXFILL ring IRQ:%d request failed\n",
edma_hw.rxfill_intr[i]);
goto rx_fill_ring_intr_req_fail;
}
}
/*
* Request IRQ for RXDESC rings
*/
for (i = 0; i < edma_hw.rxdesc_rings; i++) {
err = request_irq(edma_hw.rxdesc_intr[i],
edma_handle_irq, IRQF_SHARED,
"edma_rxdesc", (void *)edma_hw.pdev);
if (err) {
pr_debug("RXDESC ring IRQ:%d request failed\n",
edma_hw.rxdesc_intr[i]);
goto rx_desc_ring_intr_req_fail;
}
}
/*
* Request Misc IRQ
*/
err = request_irq(edma_hw.misc_intr, edma_handle_misc_irq,
IRQF_SHARED, "edma_misc",
(void *)edma_hw.pdev);
if (err) {
pr_debug("MISC IRQ:%d request failed\n",
edma_hw.misc_intr);
goto misc_intr_req_fail;
}
return 0;
misc_intr_req_fail:
/*
* Free IRQ for RXDESC rings
*/
for (i = 0; i < edma_hw.rxdesc_rings; i++) {
synchronize_irq(edma_hw.rxdesc_intr[i]);
free_irq(edma_hw.rxdesc_intr[i],
(void *)&(edma_hw.pdev)->dev);
}
rx_desc_ring_intr_req_fail:
/*
* Free IRQ for RXFILL rings
*/
for (i = 0; i < edma_hw.rxfill_rings; i++) {
synchronize_irq(edma_hw.rxfill_intr[i]);
free_irq(edma_hw.rxfill_intr[i],
(void *)&(edma_hw.pdev)->dev);
}
rx_fill_ring_intr_req_fail:
/*
* Free IRQ for TXCMPL rings
*/
for (i = 0; i < edma_hw.txcmpl_rings; i++) {
synchronize_irq(edma_hw.txcmpl_intr[i]);
free_irq(edma_hw.txcmpl_intr[i],
(void *)&(edma_hw.pdev)->dev);
}
return -1;
}
/*
* edma_register_netdevice()
* Register netdevice with EDMA
*/
static int edma_register_netdevice(struct net_device *netdev, uint32_t macid)
{
if (!netdev) {
pr_info("nss_dp_edma: Invalid netdev pointer %px\n", netdev);
return -EINVAL;
}
if ((macid < EDMA_START_GMACS) || (macid > EDMA_MAX_GMACS)) {
netdev_dbg(netdev, "nss_dp_edma: Invalid macid(%d) for %s\n",
macid, netdev->name);
return -EINVAL;
}
netdev_info(netdev, "nss_dp_edma: Registering netdev %s(qcom-id:%d) with EDMA\n",
netdev->name, macid);
/*
* We expect 'macid' to correspond to ports numbers on
* IPQ807x. These begin from '1' and hence we subtract
* one when using it as an array index.
*/
edma_hw.netdev_arr[macid - 1] = netdev;
/*
* NAPI add
*/
if (!edma_hw.napi_added) {
netif_napi_add(netdev, &edma_hw.napi, edma_napi,
EDMA_NAPI_WORK);
/*
* Register the interrupt handlers and enable interrupts
*/
if (edma_irq_init() < 0)
return -EINVAL;
edma_hw.napi_added = 1;
}
return 0;
}
/*
* edma_if_init()
*/
static int edma_if_init(struct nss_dp_data_plane_ctx *dpc)
{
struct net_device *netdev = dpc->dev;
struct nss_dp_dev *dp_dev = (struct nss_dp_dev *)netdev_priv(netdev);
int ret = 0;
/*
* Register the netdev
*/
ret = edma_register_netdevice(netdev, dp_dev->macid);
if (ret) {
netdev_dbg(netdev,
"Error registering netdevice with EDMA %s\n",
netdev->name);
return NSS_DP_FAILURE;
}
/*
* Headroom needed for Tx preheader
*/
netdev->needed_headroom += EDMA_TX_PREHDR_SIZE;
return NSS_DP_SUCCESS;
}
/*
* nss_dp_edma_ops
*/
struct nss_dp_data_plane_ops nss_dp_edma_ops = {
.init = edma_if_init,
.open = edma_if_open,
.close = edma_if_close,
.link_state = edma_if_link_state,
.mac_addr = edma_if_mac_addr,
.change_mtu = edma_if_change_mtu,
.xmit = edma_if_xmit,
.set_features = edma_if_set_features,
.pause_on_off = edma_if_pause_on_off,
.vsi_assign = edma_if_vsi_assign,
.vsi_unassign = edma_if_vsi_unassign,
#ifdef CONFIG_RFS_ACCEL
.rx_flow_steer = edma_if_rx_flow_steer,
#endif
.deinit = edma_if_deinit,
};
/*
* edma_of_get_pdata()
* Read the device tree details for EDMA
*/
static int edma_of_get_pdata(struct resource *edma_res)
{
/*
* Find EDMA node in device tree
*/
edma_hw.device_node = of_find_node_by_name(NULL,
EDMA_DEVICE_NODE_NAME);
if (!edma_hw.device_node) {
pr_warn("EDMA device tree node (%s) not found\n",
EDMA_DEVICE_NODE_NAME);
return -EINVAL;
}
/*
* Get EDMA device node
*/
edma_hw.pdev = of_find_device_by_node(edma_hw.device_node);
if (!edma_hw.pdev) {
pr_warn("Platform device for node %px(%s) not found\n",
edma_hw.device_node,
(edma_hw.device_node)->name);
return -EINVAL;
}
/*
* Get EDMA register resource
*/
if (of_address_to_resource(edma_hw.device_node, 0, edma_res) != 0) {
pr_warn("Unable to get register address for edma device: "
EDMA_DEVICE_NODE_NAME"\n");
return -EINVAL;
}
/*
* Get id of first TXDESC ring
*/
if (of_property_read_u32(edma_hw.device_node, "qcom,txdesc-ring-start",
&edma_hw.txdesc_ring_start) != 0) {
pr_warn("Read error 1st TXDESC ring (txdesc_ring_start)\n");
return -EINVAL;
}
/*
* Get number of TXDESC rings
*/
if (of_property_read_u32(edma_hw.device_node, "qcom,txdesc-rings",
&edma_hw.txdesc_rings) != 0) {
pr_warn("Unable to read number of txdesc rings.\n");
return -EINVAL;
}
edma_hw.txdesc_ring_end = edma_hw.txdesc_ring_start +
edma_hw.txdesc_rings;
/*
* Get id of first TXCMPL ring
*/
if (of_property_read_u32(edma_hw.device_node, "qcom,txcmpl-ring-start",
&edma_hw.txcmpl_ring_start) != 0) {
pr_warn("Read error 1st TXCMPL ring (txcmpl_ring_start)\n");
return -EINVAL;
}
/*
* Get number of TXCMPL rings
*/
if (of_property_read_u32(edma_hw.device_node, "qcom,txcmpl-rings",
&edma_hw.txcmpl_rings) != 0) {
pr_warn("Unable to read number of txcmpl rings.\n");
return -EINVAL;
}
edma_hw.txcmpl_ring_end = edma_hw.txcmpl_ring_start +
edma_hw.txcmpl_rings;
/*
* Get id of first RXFILL ring
*/
if (of_property_read_u32(edma_hw.device_node, "qcom,rxfill-ring-start",
&edma_hw.rxfill_ring_start) != 0) {
pr_warn("Read error 1st RXFILL ring (rxfill-ring-start)\n");
return -EINVAL;
}
/*
* Get number of RXFILL rings
*/
if (of_property_read_u32(edma_hw.device_node, "qcom,rxfill-rings",
&edma_hw.rxfill_rings) != 0) {
pr_warn("Unable to read number of rxfill rings.\n");
return -EINVAL;
}
edma_hw.rxfill_ring_end = edma_hw.rxfill_ring_start +
edma_hw.rxfill_rings;
/*
* Get id of first RXDESC ring
*/
if (of_property_read_u32(edma_hw.device_node, "qcom,rxdesc-ring-start",
&edma_hw.rxdesc_ring_start) != 0) {
pr_warn("Read error 1st RXDESC ring (rxdesc-ring-start)\n");
return -EINVAL;
}
/*
* Get number of RXDESC rings
*/
if (of_property_read_u32(edma_hw.device_node, "qcom,rxdesc-rings",
&edma_hw.rxdesc_rings) != 0) {
pr_warn("Unable to read number of rxdesc rings.\n");
return -EINVAL;
}
edma_hw.rxdesc_ring_end = edma_hw.rxdesc_ring_start +
edma_hw.rxdesc_rings;
return 0;
}
/*
* edma_init()
* EDMA init
*/
int edma_init(void)
{
int ret = 0;
struct resource res_edma;
/*
* Get all the DTS data needed
*/
if (edma_of_get_pdata(&res_edma) < 0) {
pr_warn("Unable to get EDMA DTS data.\n");
return -EINVAL;
}
/*
* Request memory region for EDMA registers
*/
edma_hw.reg_resource = request_mem_region(res_edma.start,
resource_size(&res_edma),
EDMA_DEVICE_NODE_NAME);
if (!edma_hw.reg_resource) {
pr_warn("Unable to request EDMA register memory.\n");
return -EFAULT;
}
/*
* Remap register resource
*/
edma_hw.reg_base = ioremap_nocache((edma_hw.reg_resource)->start,
resource_size(edma_hw.reg_resource));
if (!edma_hw.reg_base) {
pr_warn("Unable to remap EDMA register memory.\n");
ret = -EFAULT;
goto edma_init_remap_fail;
}
if (edma_hw_init(&edma_hw) != 0) {
ret = -EFAULT;
goto edma_init_hw_init_fail;
}
platform_set_drvdata(edma_hw.pdev, (void *)&edma_hw);
edma_hw.napi_added = 0;
return 0;
edma_init_hw_init_fail:
iounmap(edma_hw.reg_base);
edma_init_remap_fail:
release_mem_region((edma_hw.reg_resource)->start,
resource_size(edma_hw.reg_resource));
return ret;
}
/*
* edma_disable_port()
* EDMA disable port
*/
static void edma_disable_port(void)
{
edma_reg_write(EDMA_REG_PORT_CTRL, EDMA_DISABLE);
}
/*
* edma_cleanup()
* EDMA cleanup
*/
void edma_cleanup(bool is_dp_override)
{
int i;
struct edma_txcmpl_ring *txcmpl_ring = NULL;
struct edma_rxdesc_ring *rxdesc_ring = NULL;
/*
* The cleanup can happen from data plane override
* or from module_exit, we want to cleanup only once
*/
if (!edma_hw.edma_initialized) {
/*
* Disable EDMA only at module exit time, since NSS firmware
* depends on this setting.
*/
if (!is_dp_override) {
edma_disable_port();
}
return;
}
/*
* Disable Rx rings used by this driver
*/
for (i = edma_hw.rxdesc_ring_start; i < edma_hw.rxdesc_ring_end; i++)
edma_reg_write(EDMA_REG_RXDESC_CTRL(i), EDMA_RING_DISABLE);
/*
* Disable Tx rings used by this driver
*/
for (i = edma_hw.txdesc_ring_start; i < edma_hw.txdesc_ring_end; i++) {
txcmpl_ring = &edma_hw.txcmpl_ring[i];
edma_reg_write(EDMA_REG_TXDESC_CTRL(i),
EDMA_RING_DISABLE);
}
/*
* Disable RxFill Rings used by this driver
*/
for (i = edma_hw.rxfill_ring_start; i < edma_hw.rxfill_ring_end; i++)
edma_reg_write(EDMA_REG_RXFILL_RING_EN(i), EDMA_RING_DISABLE);
/*
* Clear interrupt mask
*/
for (i = 0; i < edma_hw.rxdesc_rings; i++) {
rxdesc_ring = &edma_hw.rxdesc_ring[i];
edma_reg_write(EDMA_REG_RXDESC_INT_MASK(rxdesc_ring->id),
EDMA_MASK_INT_CLEAR);
}
for (i = 0; i < edma_hw.txcmpl_rings; i++) {
txcmpl_ring = &edma_hw.txcmpl_ring[i];
edma_reg_write(EDMA_REG_TX_INT_MASK(txcmpl_ring->id),
EDMA_MASK_INT_CLEAR);
}
edma_reg_write(EDMA_REG_MISC_INT_MASK, EDMA_MASK_INT_CLEAR);
/*
* Remove interrupt handlers and NAPI
*/
if (edma_hw.napi_added) {
/*
* Free IRQ for TXCMPL rings
*/
for (i = 0; i < edma_hw.txcmpl_rings; i++) {
synchronize_irq(edma_hw.txcmpl_intr[i]);
free_irq(edma_hw.txcmpl_intr[i],
(void *)(edma_hw.pdev));
}
/*
* Free IRQ for RXFILL rings
*/
for (i = 0; i < edma_hw.rxfill_rings; i++) {
synchronize_irq(edma_hw.rxfill_intr[i]);
free_irq(edma_hw.rxfill_intr[i],
(void *)(edma_hw.pdev));
}
/*
* Free IRQ for RXDESC rings
*/
for (i = 0; i < edma_hw.rxdesc_rings; i++) {
synchronize_irq(edma_hw.rxdesc_intr[i]);
free_irq(edma_hw.rxdesc_intr[i],
(void *)(edma_hw.pdev));
}
/*
* Free Misc IRQ
*/
synchronize_irq(edma_hw.misc_intr);
free_irq(edma_hw.misc_intr, (void *)(edma_hw.pdev));
netif_napi_del(&edma_hw.napi);
edma_hw.napi_added = 0;
}
/*
* Disable EDMA only at module exit time, since NSS firmware
* depends on this setting.
*/
if (!is_dp_override) {
edma_disable_port();
}
/*
* cleanup rings and free
*/
edma_cleanup_rings(&edma_hw);
iounmap(edma_hw.reg_base);
release_mem_region((edma_hw.reg_resource)->start,
resource_size(edma_hw.reg_resource));
/*
* Mark initialize false, so that we do not
* try to cleanup again
*/
edma_hw.edma_initialized = false;
}

View File

@@ -1,287 +0,0 @@
/*
**************************************************************************
* Copyright (c) 2016, 2018-2021, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
* OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
**************************************************************************
*/
#include "nss_dp_dev.h"
#ifndef __NSS_DP_EDMA_DATAPLANE__
#define __NSS_DP_EDMA_DATAPLANE__
#define EDMA_BUF_SIZE 2000
#define EDMA_DEVICE_NODE_NAME "edma"
#define EDMA_RX_BUFF_SIZE (EDMA_BUF_SIZE + EDMA_RX_PREHDR_SIZE)
#define EDMA_RX_PREHDR_SIZE (sizeof(struct edma_rx_preheader))
#define EDMA_TX_PREHDR_SIZE (sizeof(struct edma_tx_preheader))
#define EDMA_RING_SIZE 128
#define EDMA_NAPI_WORK 100
#define EDMA_START_GMACS NSS_DP_START_IFNUM
#define EDMA_MAX_GMACS NSS_DP_HAL_MAX_PORTS
#define EDMA_TX_PKT_MIN_SIZE 33 /* IPQ807x EDMA needs a minimum packet size of 33 bytes */
#if defined(NSS_DP_IPQ60XX)
#define EDMA_MAX_TXCMPL_RINGS 24 /* Max TxCmpl rings */
#else
#define EDMA_MAX_TXCMPL_RINGS 8 /* Max TxCmpl rings */
#endif
#define EDMA_MAX_RXDESC_RINGS 16 /* Max RxDesc rings */
#define EDMA_MAX_RXFILL_RINGS 8 /* Max RxFill rings */
#define EDMA_MAX_TXDESC_RINGS 24 /* Max TxDesc rings */
#define EDMA_GET_DESC(R, i, type) (&(((type *)((R)->desc))[i]))
#define EDMA_RXFILL_DESC(R, i) EDMA_GET_DESC(R, i, struct edma_rxfill_desc)
#define EDMA_RXDESC_DESC(R, i) EDMA_GET_DESC(R, i, struct edma_rxdesc_desc)
#define EDMA_TXDESC_DESC(R, i) EDMA_GET_DESC(R, i, struct edma_txdesc_desc)
#define EDMA_RXPH_SRC_INFO_TYPE_GET(rxph) (((rxph)->src_info >> 8) & 0xf0)
#define EDMA_RXPH_SERVICE_CODE_GET(rxph) (((rxph)->rx_pre4) & 0xff)
/*
* Tx descriptor
*/
struct edma_txdesc_desc {
uint32_t buffer_addr;
/* buffer address */
uint32_t word1;
/* more bit, TSO, preheader, pool, offset and length */
};
/*
* TxCmpl descriptor
*/
struct edma_txcmpl_desc {
uint32_t buffer_addr; /* buffer address/opaque */
uint32_t status; /* status */
};
/*
* Rx descriptor
*/
struct edma_rxdesc_desc {
uint32_t buffer_addr; /* buffer address */
uint32_t status; /* status */
};
/*
* RxFill descriptor
*/
struct edma_rxfill_desc {
uint32_t buffer_addr; /* Buffer address */
uint32_t word1; /* opaque_ind and buffer size */
};
/*
* Tx descriptor ring
*/
struct edma_txdesc_ring {
uint32_t id; /* TXDESC ring number */
void *desc; /* descriptor ring virtual address */
dma_addr_t dma; /* descriptor ring physical address */
spinlock_t tx_lock; /* Tx ring lock */
uint16_t count; /* number of descriptors */
};
/*
* TxCmpl ring
*/
struct edma_txcmpl_ring {
uint32_t id; /* TXCMPL ring number */
void *desc; /* descriptor ring virtual address */
dma_addr_t dma; /* descriptor ring physical address */
uint16_t count; /* number of descriptors in the ring */
};
/*
* RxFill ring
*/
struct edma_rxfill_ring {
uint32_t id; /* RXFILL ring number */
void *desc; /* descriptor ring virtual address */
dma_addr_t dma; /* descriptor ring physical address */
spinlock_t lock; /* Rx ring lock */
uint16_t count; /* number of descriptors in the ring */
};
/*
* RxDesc ring
*/
struct edma_rxdesc_ring {
uint32_t id; /* RXDESC ring number */
struct edma_rxfill_ring *rxfill; /* RXFILL ring used */
void *desc; /* descriptor ring virtual address */
dma_addr_t dma; /* descriptor ring physical address */
uint16_t count; /* number of descriptors in the ring */
};
/*
* EDMA Tx Preheader
*/
struct edma_tx_preheader {
uint32_t opaque; /* Opaque, contains skb pointer */
uint16_t src_info; /* Src information */
uint16_t dst_info; /* Dest information */
uint32_t tx_pre2; /* SVLAN & CVLAN flag, drop prec, hash value */
uint32_t tx_pre3; /* STAG, CTAG */
uint32_t tx_pre4; /* CPU code, L3 & L4 offset, service code */
uint32_t tx_pre5; /* IP addr index, ACL index */
uint32_t tx_pre6; /* IP payload checksum, copy2cpu, timestamp, dscp */
uint32_t tx_pre7; /* Timestamp, QoS TAG */
};
/*
* EDMA Rx Preheader
*/
struct edma_rx_preheader {
uint32_t opaque;
/* Opaque, contains skb pointer*/
uint16_t src_info;
/* Src information */
uint16_t dst_info;
/* Dest information */
uint32_t rx_pre2;
/* SVLAN & CVLAN flag, drop prec, hash value */
uint32_t rx_pre3;
/* STAG, CTAG */
uint32_t rx_pre4;
/* CPU code, L3 & L4 offset, service code */
uint32_t rx_pre5;
/* IP addr index, ACL index */
uint32_t rx_pre6;
/* IP payload checksum, copy2cpu, timestamp, dscp */
uint32_t rx_pre7;
/* Timestamp, QoS TAG */
};
enum edma_tx {
EDMA_TX_OK = 0, /* Tx success */
EDMA_TX_DESC = 1, /* Not enough descriptors */
EDMA_TX_FAIL = 2, /* Tx failure */
};
/*
* EDMA private data structure
*/
struct edma_hw {
struct napi_struct napi;
/* napi structure */
struct net_device *netdev_arr[EDMA_MAX_GMACS];
/* netdev for each gmac port */
struct device_node *device_node;
/* Device tree node */
struct platform_device *pdev;
/* Platform device */
void __iomem *reg_base;
/* Base register address */
struct resource *reg_resource;
/* Memory resource */
uint16_t rx_payload_offset;
/* start of the payload offset */
uint32_t flags;
/* internal flags */
int active;
/* status */
int napi_added;
/* flag to indicate napi add status */
/*
* Debugfs entries
*/
struct dentry *edma_dentry;
struct dentry *txdesc_dentry;
struct dentry *txcmpl_dentry;
struct dentry *rxdesc_dentry;
/*
* Store for tx and rx skbs
*/
struct sk_buff *rx_skb_store[EDMA_RING_SIZE];
struct sk_buff *tx_skb_store[EDMA_RING_SIZE];
struct edma_rxfill_ring *rxfill_ring;
/* Rx Fill Ring, SW is producer */
struct edma_rxdesc_ring *rxdesc_ring;
/* Rx Descriptor Ring, SW is consumer */
struct edma_txdesc_ring *txdesc_ring;
/* Tx Descriptor Ring, SW is producer */
struct edma_txcmpl_ring *txcmpl_ring;
/* Tx Completion Ring, SW is consumer */
uint32_t txdesc_rings;
/* Number of TxDesc rings */
uint32_t txdesc_ring_start;
/* Id of first TXDESC ring */
uint32_t txdesc_ring_end;
/* Id of the last TXDESC ring */
uint32_t txcmpl_rings;
/* Number of TxCmpl rings */
uint32_t txcmpl_ring_start;
/* Id of first TXCMPL ring */
uint32_t txcmpl_ring_end;
/* Id of last TXCMPL ring */
uint32_t rxfill_rings;
/* Number of RxFill rings */
uint32_t rxfill_ring_start;
/* Id of first RxFill ring */
uint32_t rxfill_ring_end;
/* Id of last RxFill ring */
uint32_t rxdesc_rings;
/* Number of RxDesc rings */
uint32_t rxdesc_ring_start;
/* Id of first RxDesc ring */
uint32_t rxdesc_ring_end;
/* Id of last RxDesc ring */
uint32_t txcmpl_intr[EDMA_MAX_TXCMPL_RINGS];
/* TxCmpl ring IRQ numbers */
uint32_t rxfill_intr[EDMA_MAX_RXFILL_RINGS];
/* Rx fill ring IRQ numbers */
uint32_t rxdesc_intr[EDMA_MAX_RXDESC_RINGS];
/* Rx desc ring IRQ numbers */
uint32_t misc_intr;
/* Misc IRQ number */
uint32_t tx_intr_mask;
/* Tx interrupt mask */
uint32_t rxfill_intr_mask;
/* Rx fill ring interrupt mask */
uint32_t rxdesc_intr_mask;
/* Rx Desc ring interrupt mask */
uint32_t txcmpl_intr_mask;
/* Tx Cmpl ring interrupt mask */
uint32_t misc_intr_mask;
/* misc interrupt interrupt mask */
uint32_t dp_override_cnt;
/* number of interfaces overriden */
bool edma_initialized;
/* flag to check initialization status */
};
extern struct edma_hw edma_hw;
uint32_t edma_reg_read(uint32_t reg_off);
void edma_reg_write(uint32_t reg_off, uint32_t val);
int edma_alloc_rx_buffer(struct edma_hw *ehw,
struct edma_rxfill_ring *rxfill_ring);
enum edma_tx edma_ring_xmit(struct edma_hw *ehw,
struct net_device *netdev,
struct sk_buff *skb,
struct edma_txdesc_ring *txdesc_ring);
uint32_t edma_clean_tx(struct edma_hw *ehw,
struct edma_txcmpl_ring *txcmpl_ring);
irqreturn_t edma_handle_irq(int irq, void *ctx);
irqreturn_t edma_handle_misc_irq(int irq, void *ctx);
int edma_napi(struct napi_struct *napi, int budget);
void edma_cleanup_rings(struct edma_hw *ehw);
void edma_cleanup(bool is_dp_override);
int edma_hw_init(struct edma_hw *ehw);
#endif /* __NSS_DP_EDMA_DATAPLANE__ */

View File

@@ -1,454 +0,0 @@
/*
**************************************************************************
* Copyright (c) 2016,2019-2020, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
* OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
**************************************************************************
*/
#ifndef __EDMA_REGS__
#define __EDMA_REGS__
/*
* IPQ807x EDMA register offsets
*/
#define EDMA_REG_MAS_CTRL 0x0
#define EDMA_REG_PORT_CTRL 0x4
#define EDMA_REG_VLAN_CTRL 0x8
#define EDMA_REG_RXDESC2FILL_MAP_0 0x18
#define EDMA_REG_RXDESC2FILL_MAP_1 0x1c
#define EDMA_REG_TXQ_CTRL 0x20
#define EDMA_REG_TXQ_CTRL_2 0x24
#define EDMA_REG_TXQ_FC_0 0x28
#define EDMA_REG_TXQ_FC_1 0x30
#define EDMA_REG_TXQ_FC_2 0x34
#define EDMA_REG_TXQ_FC_3 0x38
#define EDMA_REG_RXQ_CTRL 0x3c
#define EDMA_REG_RX_TX_FULL_QID 0x40
#define EDMA_REG_RXQ_FC_THRE 0x44
#define EDMA_REG_DMAR_CTRL 0x48
#define EDMA_REG_AXIR_CTRL 0x4c
#define EDMA_REG_AXIW_CTRL 0x50
#define EDMA_REG_MIN_MSS 0x54
#define EDMA_REG_LOOPBACK_CTRL 0x58
#define EDMA_REG_MISC_INT_STAT 0x5c
#define EDMA_REG_MISC_INT_MASK 0x60
#define EDMA_REG_DBG_CTRL 0x64
#define EDMA_REG_DBG_DATA 0x68
#define EDMA_REG_TXDESC_BA(n) (0x1000 + (0x1000 * n))
#define EDMA_REG_TXDESC_PROD_IDX(n) (0x1004 + (0x1000 * n))
#define EDMA_REG_TXDESC_CONS_IDX(n) (0x1008 + (0x1000 * n))
#define EDMA_REG_TXDESC_RING_SIZE(n) (0x100c + (0x1000 * n))
#define EDMA_REG_TXDESC_CTRL(n) (0x1010 + (0x1000 * n))
#if defined(NSS_DP_IPQ807X)
#define EDMA_REG_TXDESC2CMPL_MAP_0 0xc
#define EDMA_REG_TXDESC2CMPL_MAP_1 0x10
#define EDMA_REG_TXDESC2CMPL_MAP_2 0x14
#define EDMA_REG_TXCMPL_BASE 0x19000
#define EDMA_REG_TX_BASE 0x21000
#else
#define EDMA_REG_TXCMPL_BASE 0x79000
#define EDMA_REG_TX_BASE 0x91000
#endif
#define EDMA_REG_TXCMPL_BA_OFFSET 0x00000
#define EDMA_REG_TXCMPL_PROD_IDX_OFFSET 0x00004
#define EDMA_REG_TXCMPL_CONS_IDX_OFFSET 0x00008
#define EDMA_REG_TXCMPL_RING_SIZE_OFFSET 0x0000c
#define EDMA_REG_TXCMPL_UGT_THRE_OFFSET 0x00010
#define EDMA_REG_TXCMPL_CTRL_OFFSET 0x00014
#define EDMA_REG_TXCMPL_BPC_OFFSET 0x00018
#define EDMA_REG_TX_INT_STAT_OFFSET 0x00000
#define EDMA_REG_TX_INT_MASK_OFFSET 0x00004
#define EDMA_REG_TX_MOD_TIMER_OFFSET 0x00008
#define EDMA_REG_TX_INT_CTRL_OFFSET 0x0000c
#define EDMA_REG_TXCMPL_BA(n) (EDMA_REG_TXCMPL_BASE + EDMA_REG_TXCMPL_BA_OFFSET + (0x1000 * n))
#define EDMA_REG_TXCMPL_PROD_IDX(n) (EDMA_REG_TXCMPL_BASE + EDMA_REG_TXCMPL_PROD_IDX_OFFSET + (0x1000 * n))
#define EDMA_REG_TXCMPL_CONS_IDX(n) (EDMA_REG_TXCMPL_BASE + EDMA_REG_TXCMPL_CONS_IDX_OFFSET + (0x1000 * n))
#define EDMA_REG_TXCMPL_RING_SIZE(n) (EDMA_REG_TXCMPL_BASE + EDMA_REG_TXCMPL_RING_SIZE_OFFSET + (0x1000 * n))
#define EDMA_REG_TXCMPL_UGT_THRE(n) (EDMA_REG_TXCMPL_BASE + EDMA_REG_TXCMPL_UGT_THRE_OFFSET + (0x1000 * n))
#define EDMA_REG_TXCMPL_CTRL(n) (EDMA_REG_TXCMPL_BASE + EDMA_REG_TXCMPL_CTRL_OFFSET + (0x1000 * n))
#define EDMA_REG_TXCMPL_BPC(n) (EDMA_REG_TXCMPL_BASE + EDMA_REG_TXCMPL_BPC_OFFSET + (0x1000 * n))
#define EDMA_REG_TX_INT_STAT(n) (EDMA_REG_TX_BASE + EDMA_REG_TX_INT_STAT_OFFSET + (0x1000 * n))
#define EDMA_REG_TX_INT_MASK(n) (EDMA_REG_TX_BASE + EDMA_REG_TX_INT_MASK_OFFSET + (0x1000 * n))
#define EDMA_REG_TX_MOD_TIMER(n) (EDMA_REG_TX_BASE + EDMA_REG_TX_MOD_TIMER_OFFSET + (0x1000 * n))
#define EDMA_REG_TX_INT_CTRL(n) (EDMA_REG_TX_BASE + EDMA_REG_TX_INT_CTRL_OFFSET + (0x1000 * n))
#define EDMA_REG_RXFILL_BA(n) (0x29000 + (0x1000 * n))
#define EDMA_REG_RXFILL_PROD_IDX(n) (0x29004 + (0x1000 * n))
#define EDMA_REG_RXFILL_CONS_IDX(n) (0x29008 + (0x1000 * n))
#define EDMA_REG_RXFILL_RING_SIZE(n) (0x2900c + (0x1000 * n))
#define EDMA_REG_RXFILL_BUFFER1_SIZE(n) (0x29010 + (0x1000 * n))
#define EDMA_REG_RXFILL_FC_THRE(n) (0x29014 + (0x1000 * n))
#define EDMA_REG_RXFILL_UGT_THRE(n) (0x29018 + (0x1000 * n))
#define EDMA_REG_RXFILL_RING_EN(n) (0x2901c + (0x1000 * n))
#define EDMA_REG_RXFILL_DISABLE(n) (0x29020 + (0x1000 * n))
#define EDMA_REG_RXFILL_DISABLE_DONE(n) (0x29024 + (0x1000 * n))
#define EDMA_REG_RXFILL_INT_STAT(n) (0x31000 + (0x1000 * n))
#define EDMA_REG_RXFILL_INT_MASK(n) (0x31004 + (0x1000 * n))
#define EDMA_REG_RXDESC_BA(n) (0x39000 + (0x1000 * n))
#define EDMA_REG_RXDESC_PROD_IDX(n) (0x39004 + (0x1000 * n))
#define EDMA_REG_RXDESC_CONS_IDX(n) (0x39008 + (0x1000 * n))
#define EDMA_REG_RXDESC_RING_SIZE(n) (0x3900c + (0x1000 * n))
#define EDMA_REG_RXDESC_FC_THRE(n) (0x39010 + (0x1000 * n))
#define EDMA_REG_RXDESC_UGT_THRE(n) (0x39014 + (0x1000 * n))
#define EDMA_REG_RXDESC_CTRL(n) (0x39018 + (0x1000 * n))
#define EDMA_REG_RXDESC_BPC(n) (0x3901c + (0x1000 * n))
#define EDMA_REG_RXDESC_INT_STAT(n) (0x49000 + (0x1000 * n))
#define EDMA_REG_RXDESC_INT_MASK(n) (0x49004 + (0x1000 * n))
#define EDMA_REG_RX_MOD_TIMER(n) (0x49008 + (0x1000 * n))
#define EDMA_REG_RX_INT_CTRL(n) (0x4900c + (0x1000 * n))
#define EDMA_QID2RID_TABLE_MEM(q) (0x5a000 + (0x4 * q))
#define EDMA_REG_RXRING_PC(n) (0x5A200 + (0x10 * n))
#define EDMA_REG_RXRING_BC_0(n) (0x5A204 + (0x10 * n))
#define EDMA_REG_RXRING_BC_1(n) (0x5A208 + (0x10 * n))
#define EDMA_REG_TXRING_PC(n) (0x74000 + (0x10 * n))
#define EDMA_REG_TXRING_BC_0(n) (0x74004 + (0x10 * n))
#define EDMA_REG_TXRING_BC_1(n) (0x74008 + (0x10 * n))
/*
* EDMA_REG_PORT_CTRL register
*/
#define EDMA_PORT_PAD_EN 0x1
#define EDMA_PORT_EDMA_EN 0x2
/*
* EDMA_REG_TXQ_CTRL register
*/
#define EDMA_TXDESC_PF_THRE_MASK 0xf
#define EDMA_TXDESC_PF_THRE_SHIFT 0
#define EDMA_TXCMPL_WB_THRE_MASK 0xf
#define EDMA_TXCMPL_WB_THRE_SHIFT 4
#define EDMA_TXDESC_PKT_SRAM_THRE_MASK 0xff
#define EDMA_TXDESC_PKT_SRAM_THRE_SHIFT 8
#define EDMA_TXCMPL_WB_TIMER_MASK 0xffff
#define EDMA_TXCMPL_WB_TIMER_SHIFT 16
/*
* EDMA_REG_RXQ_CTRL register
*/
#define EDMA_RXFILL_PF_THRE_MASK 0xf
#define EDMA_RXFILL_PF_THRE_SHIFT 0
#define EDMA_RXDESC_WB_THRE_MASK 0xf
#define EDMA_RXDESC_WB_THRE_SHIFT 4
#define EDMA_RXDESC_WB_TIMER_MASK 0xffff
#define EDMA_RXDESC_WB_TIMER_SHIFT 16
/*
* EDMA_REG_RX_TX_FULL_QID register
*/
#define EDMA_RX_DESC_FULL_QID_MASK 0xff
#define EDMA_RX_DESC_FULL_QID_SHIFT 0
#define EDMA_TX_CMPL_BUF_FULL_QID_MASK 0xff
#define EDMA_TX_CMPL_BUF_FULL_QID_SHIFT 8
#define EDMA_TX_SRAM_FULL_QID_MASK 0x1f
#define EDMA_TX_SRAM_FULL_QID_SHIFT 16
/*
* EDMA_REG_RXQ_FC_THRE reister
*/
#define EDMA_RXFILL_FIFO_XOFF_THRE_MASK 0x1f
#define EDMA_RXFILL_FIFO_XOFF_THRE_SHIFT 0
#define EDMA_DESC_FIFO_XOFF_THRE_MASK 0x3f
#define EDMA_DESC_FIFO_XOFF_THRE_SHIFT 16
/*
* EDMA_REG_DMAR_CTRL register
*/
#define EDMA_DMAR_REQ_PRI_MASK 0x7
#define EDMA_DMAR_REQ_PRI_SHIFT 0
#define EDMA_DMAR_BURST_LEN_MASK 0x1
#define EDMA_DMAR_BURST_LEN_SHIFT 3
#define EDMA_DMAR_TXDATA_OUTSTANDING_NUM_MASK 0x1f
#define EDMA_DMAR_TXDATA_OUTSTANDING_NUM_SHIFT 4
#define EDMA_DMAR_TXDESC_OUTSTANDING_NUM_MASK 0x7
#define EDMA_DMAR_TXDESC_OUTSTANDING_NUM_SHIFT 9
#define EDMA_DMAR_RXFILL_OUTSTANDING_NUM_MASK 0x7
#define EDMA_DMAR_RXFILL_OUTSTANDING_NUM_SHIFT 12
#define EDMA_DMAR_REQ_PRI_SET(x) (((x) & EDMA_DMAR_REQ_PRI_MASK) << EDMA_DMAR_REQ_PRI_SHIFT)
#define EDMA_DMAR_TXDATA_OUTSTANDING_NUM_SET(x) (((x) & EDMA_DMAR_TXDATA_OUTSTANDING_NUM_MASK) << EDMA_DMAR_TXDATA_OUTSTANDING_NUM_SHIFT)
#define EDMA_DMAR_TXDESC_OUTSTANDING_NUM_SET(x) (((x) & EDMA_DMAR_TXDESC_OUTSTANDING_NUM_MASK) << EDMA_DMAR_TXDESC_OUTSTANDING_NUM_SHIFT)
#define EDMA_DMAR_RXFILL_OUTSTANDING_NUM_SET(x) (((x) & EDMA_DMAR_RXFILL_OUTSTANDING_NUM_MASK) << EDMA_DMAR_RXFILL_OUTSTANDING_NUM_SHIFT)
#define EDMA_DMAR_BURST_LEN_SET(x) (((x) & EDMA_DMAR_BURST_LEN_MASK) << EDMA_DMAR_BURST_LEN_SHIFT)
/*
* Enable 128 byte EDMA burts for IPQ60xx
*/
#if defined(NSS_DP_IPQ60XX)
#define EDMA_BURST_LEN_ENABLE 1
#else
#define EDMA_BURST_LEN_ENABLE 0
#endif
/*
* EDMA_REG_AXIW_CTRL_REG
*/
#define EDMA_AXIW_MAX_WR_SIZE_EN 0x400
/*
* EDMA DISABLE
*/
#define EDMA_DISABLE 0
/*
* EDMA_REG_TXDESC_PROD_IDX register
*/
#define EDMA_TXDESC_PROD_IDX_MASK 0xffff
/*
* EDMA_REG_TXDESC_CONS_IDX register
*/
#define EDMA_TXDESC_CONS_IDX_MASK 0xffff
/*
* EDMA_REG_TXDESC_RING_SIZE register
*/
#define EDMA_TXDESC_RING_SIZE_MASK 0xffff
/*
* EDMA_REG_TXDESC_CTRL register
*/
#define EDMA_TXDESC_ARB_GRP_ID_MASK 0x3
#define EDMA_TXDESC_ARB_GRP_ID_SHIFT 4
#define EDMA_TXDESC_FC_GRP_ID_MASK 0x7
#define EDMA_TXDESC_FC_GRP_ID_SHIFT 1
#define EDMA_TXDESC_TX_EN 0x1
/*
* EDMA_REG_TXCMPL_PROD_IDX register
*/
#define EDMA_TXCMPL_PROD_IDX_MASK 0xffff
/*
* EDMA_REG_TXCMPL_CONS_IDX register
*/
#define EDMA_TXCMPL_CONS_IDX_MASK 0xffff
/*
* EDMA_REG_TXCMPL_RING_SIZE register
*/
#define EDMA_TXCMPL_RING_SIZE_MASK 0xffff
/*
* EDMA_REG_TXCMPL_UGT_THRE register
*/
#define EDMA_TXCMPL_LOW_THRE_MASK 0xffff
#define EDMA_TXCMPL_LOW_THRE_SHIFT 0
#define EDMA_TXCMPL_FC_THRE_MASK 0x3f
#define EDMA_TXCMPL_FC_THRE_SHIFT 16
/*
* EDMA_REG_TXCMPL_CTRL register
*/
#define EDMA_TXCMPL_RET_MODE_BUFF_ADDR 0x0
#define EDMA_TXCMPL_RET_MODE_OPAQUE 0x1
/*
* EDMA_REG_TX_MOD_TIMER register
*/
#define EDMA_TX_MOD_TIMER_INIT_MASK 0xffff
#define EDMA_TX_MOD_TIMER_INIT_SHIFT 0
/*
* EDMA_REG_TX_INT_CTRL register
*/
#define EDMA_TX_INT_MASK 0x3
/*
* EDMA_REG_RXFILL_PROD_IDX register
*/
#define EDMA_RXFILL_PROD_IDX_MASK 0xffff
/*
* EDMA_REG_RXFILL_CONS_IDX register
*/
#define EDMA_RXFILL_CONS_IDX_MASK 0xffff
/*
* EDMA_REG_RXFILL_RING_SIZE register
*/
#define EDMA_RXFILL_RING_SIZE_MASK 0xffff
#define EDMA_RXFILL_BUF_SIZE_MASK 0x3fff
#define EDMA_RXFILL_BUF_SIZE_SHIFT 16
/*
* EDMA_REG_RXFILL_FC_THRE register
*/
#define EDMA_RXFILL_FC_XON_THRE_MASK 0x7ff
#define EDMA_RXFILL_FC_XON_THRE_SHIFT 12
#define EDMA_RXFILL_FC_XOFF_THRE_MASK 0x7ff
#define EDMA_RXFILL_FC_XOFF_THRE_SHIFT 0
/*
* EDMA_REG_RXFILL_UGT_THRE register
*/
#define EDMA_RXFILL_LOW_THRE_MASK 0xffff
#define EDMA_RXFILL_LOW_THRE_SHIFT 0
/*
* EDMA_REG_RXFILL_RING_EN register
*/
#define EDMA_RXFILL_RING_EN 0x1
/*
* EDMA_REG_RXFILL_INT_MASK register
*/
#define EDMA_RXFILL_INT_MASK 0x1
/*
* EDMA_REG_RXDESC_PROD_IDX register
*/
#define EDMA_RXDESC_PROD_IDX_MASK 0xffff
/*
* EDMA_REG_RXDESC_CONS_IDX register
*/
#define EDMA_RXDESC_CONS_IDX_MASK 0xffff
/*
* EDMA_REG_RXDESC_RING_SIZE register
*/
#define EDMA_RXDESC_RING_SIZE_MASK 0xffff
#define EDMA_RXDESC_PL_OFFSET_MASK 0x1ff
#define EDMA_RXDESC_PL_OFFSET_SHIFT 16
/*
* EDMA_REG_RXDESC_FC_THRE register
*/
#define EDMA_RXDESC_FC_XON_THRE_MASK 0x7ff
#define EDMA_RXDESC_FC_XON_THRE_SHIFT 12
#define EDMA_RXDESC_FC_XOFF_THRE_MASK 0x7ff
#define EDMA_RXDESC_FC_XOFF_THRE_SHIFT 0
/*
* EDMA_REG_RXDESC_UGT_THRE register
*/
#define EDMA_RXDESC_LOW_THRE_MASK 0xffff
#define EDMA_RXDESC_LOW_THRE_SHIFT 0
/*
* EDMA_REG_RXDESC_CTRL register
*/
#define EDMA_RXDESC_STAG_REMOVE_EN 0x8
#define EDMA_RXDESC_CTAG_REMOVE_EN 0x4
#define EDMA_RXDESC_QDISC_EN 0x2
#define EDMA_RXDESC_RX_EN 0x1
/*
* EDMA_REG_TX_INT_MASK register
*/
#define EDMA_TX_INT_MASK_PKT_INT 0x1
#define EDMA_TX_INT_MASK_UGT_INT 0x2
/*
* EDMA_REG_RXDESC_INT_STAT register
*/
#define EDMA_RXDESC_INT_STAT_PKT_INT 0x1
#define EDMA_RXDESC_INT_STAT_UGT_INT 0x2
/*
* EDMA_REG_RXDESC_INT_MASK register
*/
#define EDMA_RXDESC_INT_MASK_PKT_INT 0x1
#define EDMA_RXDESC_INT_MASK_TIMER_INT_DIS 0x2
#define EDMA_MASK_INT_DISABLE 0x0
#define EDMA_MASK_INT_CLEAR 0x0
/*
* EDMA_REG_RX_MOD_TIMER register
*/
#define EDMA_RX_MOD_TIMER_INIT_MASK 0xffff
#define EDMA_RX_MOD_TIMER_INIT_SHIFT 0
/*
* EDMA QID2RID register sizes
*/
#define EDMA_QID2RID_DEPTH 0x40
#define EDMA_QID2RID_QUEUES_PER_ENTRY 8
/*
* TXDESC shift values
*/
#define EDMA_TXDESC_MORE_SHIFT 31
#define EDMA_TXDESC_TSO_EN_SHIFT 30
#define EDMA_TXDESC_PREHEADER_SHIFT 29
#define EDMA_TXDESC_POOL_ID_SHIFT 24
#define EDMA_TXDESC_POOL_ID_MASK 0x1f
#define EDMA_TXDESC_DATA_OFFSET_SHIFT 16
#define EDMA_TXDESC_DATA_OFFSET_MASK 0xff
#define EDMA_TXDESC_DATA_LENGTH_SHIFT 0
#define EDMA_TXDESC_DATA_LENGTH_MASK 0xffff
#define EDMA_PREHDR_DSTINFO_PORTID_IND 0x20
#define EDMA_PREHDR_PORTNUM_BITS 0x0fff
#define EDMA_RING_DMA_MASK 0xffffffff
/*
* RXDESC shift values
*/
#define EDMA_RXDESC_RX_RXFILL_CNT_MASK 0x000f
#define EDMA_RXDESC_RX_RXFILL_CNT_SHIFT 16
#define EDMA_RXDESC_PKT_SIZE_MASK 0x3fff
#define EDMA_RXDESC_PKT_SIZE_SHIFT 0
#define EDMA_RXDESC_RXD_VALID_MASK 0x1
#define EDMA_RXDESC_RXD_VALID_SHIFT 31
#define EDMA_RXDESC_PACKET_LEN_MASK 0x3fff
#define EDMA_RXDESC_RING_INT_STATUS_MASK 0x3
#define EDMA_RING_DISABLE 0
#define EDMA_TXCMPL_RING_INT_STATUS_MASK 0x3
#define EDMA_TXCMPL_RETMODE_OPAQUE 0x0
#define EDMA_RXFILL_RING_INT_STATUS_MASK 0x1
/*
* TODO tune the timer and threshold values
*/
#define EDMA_RXFILL_FIFO_XOFF_THRE 0x3
#define EDMA_RXFILL_PF_THRE 0x3
#define EDMA_RXDESC_WB_THRE 0x0
#define EDMA_RXDESC_WB_TIMER 0x2
#define EDMA_RXDESC_XON_THRE 50
#define EDMA_RXDESC_XOFF_THRE 30
#define EDMA_RXDESC_LOW_THRE 0
#define EDMA_RX_MOD_TIMER_INIT 1000
#define EDMA_TXDESC_PF_THRE 0x3
#define EDMA_TXCMPL_WB_THRE 0X0
#define EDMA_TXDESC_PKT_SRAM_THRE 0x20
#define EDMA_TXCMPL_WB_TIMER 0x2
#define EDMA_TX_MOD_TIMER 150
/*
* EDMA misc error mask
*/
#define EDMA_MISC_AXI_RD_ERR_MASK_EN 0x1
#define EDMA_MISC_AXI_WR_ERR_MASK_EN 0x2
#define EDMA_MISC_RX_DESC_FIFO_FULL_MASK_EN 0x4
#define EDMA_MISC_RX_ERR_BUF_SIZE_MASK_EN 0x8
#define EDMA_MISC_TX_SRAM_FULL_MASK_EN 0x10
#define EDMA_MISC_TX_CMPL_BUF_FULL_MASK_EN 0x20
#if defined(NSS_DP_IPQ807X)
#define EDMA_MISC_PKT_LEN_LA_64K_MASK_EN 0x40
#define EDMA_MISC_PKT_LEN_LE_40_MASK_EN 0x80
#define EDMA_MISC_DATA_LEN_ERR_MASK_EN 0x100
#else
#define EDMA_MISC_DATA_LEN_ERR_MASK_EN 0x40
#define EDMA_MISC_TX_TIMEOUT_MASK_EN 0x80
#endif
#endif /* __EDMA_REGS__ */

View File

@@ -1,795 +0,0 @@
/*
* Copyright (c) 2016-2018, 2020-21, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
* RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT
* NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE
* USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/version.h>
#include <linux/interrupt.h>
#include <linux/phy.h>
#include <linux/netdevice.h>
#include <linux/debugfs.h>
#include "nss_dp_dev.h"
#include "edma_regs.h"
#include "edma_data_plane.h"
/*
* edma_alloc_rx_buffer()
* Alloc Rx buffers for one RxFill ring
*/
int edma_alloc_rx_buffer(struct edma_hw *ehw,
struct edma_rxfill_ring *rxfill_ring)
{
struct platform_device *pdev = ehw->pdev;
struct sk_buff *skb;
uint16_t num_alloc = 0;
uint16_t cons, next, counter;
struct edma_rxfill_desc *rxfill_desc;
uint32_t reg_data = 0;
uint32_t store_index = 0;
struct edma_rx_preheader *rxph = NULL;
/*
* Read RXFILL ring producer index
*/
reg_data = edma_reg_read(EDMA_REG_RXFILL_PROD_IDX(rxfill_ring->id));
next = reg_data & EDMA_RXFILL_PROD_IDX_MASK & (rxfill_ring->count - 1);
/*
* Read RXFILL ring consumer index
*/
reg_data = edma_reg_read(EDMA_REG_RXFILL_CONS_IDX(rxfill_ring->id));
cons = reg_data & EDMA_RXFILL_CONS_IDX_MASK;
while (1) {
counter = next;
if (++counter == rxfill_ring->count)
counter = 0;
if (counter == cons)
break;
/*
* Allocate buffer
*/
skb = dev_alloc_skb(EDMA_RX_BUFF_SIZE);
if (unlikely(!skb))
break;
/*
* Get RXFILL descriptor
*/
rxfill_desc = EDMA_RXFILL_DESC(rxfill_ring, next);
/*
* Make room for Rx preheader
*/
rxph = (struct edma_rx_preheader *)
skb_push(skb, EDMA_RX_PREHDR_SIZE);
/*
* Store the skb in the rx store
*/
store_index = next;
if (ehw->rx_skb_store[store_index] != NULL) {
dev_kfree_skb_any(skb);
break;
}
ehw->rx_skb_store[store_index] = skb;
memcpy((uint8_t *)&rxph->opaque, (uint8_t *)&store_index, 4);
/*
* Save buffer size in RXFILL descriptor
*/
rxfill_desc->word1 = cpu_to_le32(EDMA_RX_BUFF_SIZE
& EDMA_RXFILL_BUF_SIZE_MASK);
/*
* Map Rx buffer for DMA
*/
rxfill_desc->buffer_addr = cpu_to_le32(dma_map_single(
&pdev->dev,
skb->data,
EDMA_RX_BUFF_SIZE,
DMA_FROM_DEVICE));
if (!rxfill_desc->buffer_addr) {
dev_kfree_skb_any(skb);
ehw->rx_skb_store[store_index] = NULL;
break;
}
num_alloc++;
next = counter;
}
if (num_alloc) {
/*
* Update RXFILL ring producer index
*/
reg_data = next & EDMA_RXFILL_PROD_IDX_MASK;
/*
* make sure the producer index updated before
* updating the hardware
*/
wmb();
edma_reg_write(EDMA_REG_RXFILL_PROD_IDX(rxfill_ring->id),
reg_data);
}
return num_alloc;
}
/*
* edma_clean_tx()
* Reap Tx descriptors
*/
uint32_t edma_clean_tx(struct edma_hw *ehw,
struct edma_txcmpl_ring *txcmpl_ring)
{
struct platform_device *pdev = ehw->pdev;
struct edma_txcmpl_desc *txcmpl = NULL;
uint16_t prod_idx = 0;
uint16_t cons_idx = 0;
uint32_t data = 0;
uint32_t txcmpl_consumed = 0;
struct sk_buff *skb;
uint32_t len;
int store_index;
dma_addr_t daddr;
/*
* Get TXCMPL ring producer index
*/
data = edma_reg_read(EDMA_REG_TXCMPL_PROD_IDX(txcmpl_ring->id));
prod_idx = data & EDMA_TXCMPL_PROD_IDX_MASK;
/*
* Get TXCMPL ring consumer index
*/
data = edma_reg_read(EDMA_REG_TXCMPL_CONS_IDX(txcmpl_ring->id));
cons_idx = data & EDMA_TXCMPL_CONS_IDX_MASK;
while (cons_idx != prod_idx) {
txcmpl = &(((struct edma_txcmpl_desc *)
(txcmpl_ring->desc))[cons_idx]);
/*
* skb for this is stored in tx store and
* tx header contains the index in the field
* buffer address (opaque) of txcmpl
*/
store_index = txcmpl->buffer_addr;
skb = ehw->tx_skb_store[store_index];
ehw->tx_skb_store[store_index] = NULL;
if (unlikely(!skb)) {
pr_warn("Invalid skb: cons_idx:%u prod_idx:%u status %x\n",
cons_idx, prod_idx, txcmpl->status);
goto next_txcmpl_desc;
}
len = skb_headlen(skb);
daddr = (dma_addr_t)virt_to_phys(skb->data);
pr_debug("skb:%px cons_idx:%d prod_idx:%d word1:0x%x\n",
skb, cons_idx, prod_idx, txcmpl->status);
dma_unmap_single(&pdev->dev, daddr,
len, DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
next_txcmpl_desc:
if (++cons_idx == txcmpl_ring->count)
cons_idx = 0;
txcmpl_consumed++;
}
if (txcmpl_consumed == 0)
return 0;
pr_debug("TXCMPL:%u txcmpl_consumed:%u prod_idx:%u cons_idx:%u\n",
txcmpl_ring->id, txcmpl_consumed, prod_idx, cons_idx);
/*
* Update TXCMPL ring consumer index
*/
wmb();
edma_reg_write(EDMA_REG_TXCMPL_CONS_IDX(txcmpl_ring->id), cons_idx);
return txcmpl_consumed;
}
/*
* nss_phy_tstamp_rx_buf()
* Receive timestamp packet
*/
void nss_phy_tstamp_rx_buf(__attribute__((unused))void *app_data, struct sk_buff *skb)
{
struct net_device *ndev = skb->dev;
/*
* The PTP_CLASS_ value 0 is passed to phy driver, which will be
* set to the correct PTP class value by calling ptp_classify_raw
* in drv->rxtstamp function.
*/
if (ndev && ndev->phydev && ndev->phydev->drv &&
ndev->phydev->drv->rxtstamp)
if(ndev->phydev->drv->rxtstamp(ndev->phydev, skb, 0))
return;
netif_receive_skb(skb);
}
EXPORT_SYMBOL(nss_phy_tstamp_rx_buf);
/*
* nss_phy_tstamp_tx_buf()
* Transmit timestamp packet
*/
void nss_phy_tstamp_tx_buf(struct net_device *ndev, struct sk_buff *skb)
{
/*
* Function drv->txtstamp will create a clone of skb if necessary,
* the PTP_CLASS_ value 0 is passed to phy driver, which will be
* set to the correct PTP class value by calling ptp_classify_raw
* in the drv->txtstamp function.
*/
if (ndev && ndev->phydev && ndev->phydev->drv &&
ndev->phydev->drv->txtstamp)
ndev->phydev->drv->txtstamp(ndev->phydev, skb, 0);
}
EXPORT_SYMBOL(nss_phy_tstamp_tx_buf);
/*
* edma_clean_rx()
* Reap Rx descriptors
*/
static uint32_t edma_clean_rx(struct edma_hw *ehw,
int work_to_do,
struct edma_rxdesc_ring *rxdesc_ring)
{
struct platform_device *pdev = ehw->pdev;
struct net_device *ndev;
struct sk_buff *skb = NULL;
struct edma_rxdesc_desc *rxdesc_desc;
struct edma_rx_preheader *rxph = NULL;
uint16_t prod_idx = 0;
int src_port_num = 0;
int pkt_length = 0;
uint16_t cons_idx = 0;
uint32_t work_done = 0;
int store_index;
/*
* Read Rx ring consumer index
*/
cons_idx = edma_reg_read(EDMA_REG_RXDESC_CONS_IDX(rxdesc_ring->id))
& EDMA_RXDESC_CONS_IDX_MASK;
while (1) {
/*
* Read Rx ring producer index
*/
prod_idx = edma_reg_read(
EDMA_REG_RXDESC_PROD_IDX(rxdesc_ring->id))
& EDMA_RXDESC_PROD_IDX_MASK;
if (cons_idx == prod_idx)
break;
if (work_done >= work_to_do)
break;
rxdesc_desc = EDMA_RXDESC_DESC(rxdesc_ring, cons_idx);
/*
* Get Rx preheader
*/
rxph = (struct edma_rx_preheader *)
phys_to_virt(rxdesc_desc->buffer_addr);
/*
* DMA unmap Rx buffer
*/
dma_unmap_single(&pdev->dev,
rxdesc_desc->buffer_addr,
EDMA_RX_BUFF_SIZE,
DMA_FROM_DEVICE);
store_index = rxph->opaque;
skb = ehw->rx_skb_store[store_index];
ehw->rx_skb_store[store_index] = NULL;
if (unlikely(!skb)) {
pr_warn("WARN: empty skb reference in rx_store:%d\n",
cons_idx);
goto next_rx_desc;
}
/*
* Check src_info from Rx preheader
*/
if (EDMA_RXPH_SRC_INFO_TYPE_GET(rxph) ==
EDMA_PREHDR_DSTINFO_PORTID_IND) {
src_port_num = rxph->src_info &
EDMA_PREHDR_PORTNUM_BITS;
} else {
pr_warn("WARN: src_info_type:0x%x. Drop skb:%px\n",
EDMA_RXPH_SRC_INFO_TYPE_GET(rxph), skb);
dev_kfree_skb_any(skb);
goto next_rx_desc;
}
/*
* Get packet length
*/
pkt_length = rxdesc_desc->status & EDMA_RXDESC_PACKET_LEN_MASK;
if (unlikely((src_port_num < NSS_DP_START_IFNUM) ||
(src_port_num > NSS_DP_HAL_MAX_PORTS))) {
pr_warn("WARN: Port number error :%d. Drop skb:%px\n",
src_port_num, skb);
dev_kfree_skb_any(skb);
goto next_rx_desc;
}
/*
* Get netdev for this port using the source port
* number as index into the netdev array. We need to
* subtract one since the indices start form '0' and
* port numbers start from '1'.
*/
ndev = ehw->netdev_arr[src_port_num - 1];
if (unlikely(!ndev)) {
pr_warn("WARN: netdev Null src_info_type:0x%x. Drop skb:%px\n",
src_port_num, skb);
dev_kfree_skb_any(skb);
goto next_rx_desc;
}
if (unlikely(!netif_running(ndev))) {
dev_kfree_skb_any(skb);
goto next_rx_desc;
}
/*
* Remove Rx preheader
*/
skb_pull(skb, EDMA_RX_PREHDR_SIZE);
/*
* Update skb fields and indicate packet to stack
*/
skb->dev = ndev;
skb->skb_iif = ndev->ifindex;
skb_put(skb, pkt_length);
skb->protocol = eth_type_trans(skb, skb->dev);
#ifdef CONFIG_NET_SWITCHDEV
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0))
skb->offload_fwd_mark = ndev->offload_fwd_mark;
#else
/*
* TODO: Implement ndo_get_devlink_port()
*/
skb->offload_fwd_mark = 0;
#endif
pr_debug("skb:%px ring_idx:%u pktlen:%d proto:0x%x mark:%u\n",
skb, cons_idx, pkt_length, skb->protocol,
skb->offload_fwd_mark);
#else
pr_debug("skb:%px ring_idx:%u pktlen:%d proto:0x%x\n",
skb, cons_idx, pkt_length, skb->protocol);
#endif
/*
* Deliver the ptp packet to phy driver for RX timestamping
*/
if (unlikely(EDMA_RXPH_SERVICE_CODE_GET(rxph) ==
NSS_PTP_EVENT_SERVICE_CODE))
nss_phy_tstamp_rx_buf(ndev, skb);
else
netif_receive_skb(skb);
next_rx_desc:
/*
* Update consumer index
*/
if (++cons_idx == rxdesc_ring->count)
cons_idx = 0;
/*
* Update work done
*/
work_done++;
}
edma_alloc_rx_buffer(ehw, rxdesc_ring->rxfill);
/*
* make sure the consumer index is updated
* before updating the hardware
*/
wmb();
edma_reg_write(EDMA_REG_RXDESC_CONS_IDX(rxdesc_ring->id), cons_idx);
return work_done;
}
/*
* edma_napi()
* EDMA NAPI handler
*/
int edma_napi(struct napi_struct *napi, int budget)
{
struct edma_hw *ehw = container_of(napi, struct edma_hw, napi);
struct edma_txcmpl_ring *txcmpl_ring = NULL;
struct edma_rxdesc_ring *rxdesc_ring = NULL;
struct edma_rxfill_ring *rxfill_ring = NULL;
struct net_device *ndev;
int work_done = 0;
int i;
for (i = 0; i < ehw->rxdesc_rings; i++) {
rxdesc_ring = &ehw->rxdesc_ring[i];
work_done += edma_clean_rx(ehw, budget, rxdesc_ring);
}
for (i = 0; i < ehw->txcmpl_rings; i++) {
txcmpl_ring = &ehw->txcmpl_ring[i];
work_done += edma_clean_tx(ehw, txcmpl_ring);
}
for (i = 0; i < ehw->rxfill_rings; i++) {
rxfill_ring = &ehw->rxfill_ring[i];
work_done += edma_alloc_rx_buffer(ehw, rxfill_ring);
}
/*
* Resume netdev Tx queue
*/
/*
* TODO works currently since we have a single queue.
* Need to make sure we have support in place when there is
* support for multiple queues
*/
for (i = 0; i < EDMA_MAX_GMACS; i++) {
ndev = ehw->netdev_arr[i];
if (!ndev)
continue;
if (netif_queue_stopped(ndev) && netif_carrier_ok(ndev))
netif_start_queue(ndev);
}
/*
* TODO - rework and fix the budget control
*/
if (work_done < budget) {
/*
* TODO per core NAPI
*/
napi_complete(napi);
/*
* Set RXDESC ring interrupt mask
*/
for (i = 0; i < ehw->rxdesc_rings; i++) {
rxdesc_ring = &ehw->rxdesc_ring[i];
edma_reg_write(
EDMA_REG_RXDESC_INT_MASK(rxdesc_ring->id),
ehw->rxdesc_intr_mask);
}
/*
* Set TXCMPL ring interrupt mask
*/
for (i = 0; i < ehw->txcmpl_rings; i++) {
txcmpl_ring = &ehw->txcmpl_ring[i];
edma_reg_write(EDMA_REG_TX_INT_MASK(txcmpl_ring->id),
ehw->txcmpl_intr_mask);
}
/*
* Set RXFILL ring interrupt mask
*/
for (i = 0; i < ehw->rxfill_rings; i++) {
rxfill_ring = &ehw->rxfill_ring[i];
edma_reg_write(EDMA_REG_RXFILL_INT_MASK(
rxfill_ring->id),
edma_hw.rxfill_intr_mask);
}
}
return work_done;
}
/*
* edma_ring_xmit()
* Transmit a packet using an EDMA ring
*/
enum edma_tx edma_ring_xmit(struct edma_hw *ehw,
struct net_device *netdev,
struct sk_buff *skb,
struct edma_txdesc_ring *txdesc_ring)
{
struct nss_dp_dev *dp_dev = netdev_priv(netdev);
struct edma_txdesc_desc *txdesc = NULL;
uint16_t buf_len;
uint16_t hw_next_to_use, hw_next_to_clean, chk_idx;
uint32_t data;
uint32_t store_index = 0;
struct edma_tx_preheader *txph = NULL;
/*
* TODO - revisit locking
*/
spin_lock_bh(&txdesc_ring->tx_lock);
/*
* Read TXDESC ring producer index
*/
data = edma_reg_read(EDMA_REG_TXDESC_PROD_IDX(txdesc_ring->id));
hw_next_to_use = data & EDMA_TXDESC_PROD_IDX_MASK;
/*
* Read TXDESC ring consumer index
*/
/*
* TODO - read to local variable to optimize uncached access
*/
data = edma_reg_read(EDMA_REG_TXDESC_CONS_IDX(txdesc_ring->id));
hw_next_to_clean = data & EDMA_TXDESC_CONS_IDX_MASK;
/*
* Check for available Tx descriptor
*/
chk_idx = (hw_next_to_use + 1) & (txdesc_ring->count-1);
if (chk_idx == hw_next_to_clean) {
spin_unlock_bh(&txdesc_ring->tx_lock);
return EDMA_TX_DESC;
}
#if defined(NSS_DP_EDMA_TX_SMALL_PKT_WAR)
/*
* IPQ807x EDMA hardware can't process the packet if the packet size is
* less than EDMA_TX_PKT_MIN_SIZE (33 Byte). So, if the packet size
* is indeed less than EDMA_TX_PKT_MIN_SIZE, perform padding
* (if possible), otherwise drop the packet.
* Using skb_padto() API for padding the packet. This API will drop
* the packet if the padding is not possible.
*/
if (unlikely(skb->len < EDMA_TX_PKT_MIN_SIZE)) {
if (skb_padto(skb, EDMA_TX_PKT_MIN_SIZE)) {
netdev_dbg(netdev, "padding couldn't happen, skb is freed.\n");
netdev->stats.tx_dropped++;
spin_unlock_bh(&txdesc_ring->tx_lock);
return EDMA_TX_OK;
}
skb->len = EDMA_TX_PKT_MIN_SIZE;
}
#endif
buf_len = skb_headlen(skb);
/*
* Deliver the ptp packet to phy driver for TX timestamping
*/
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
nss_phy_tstamp_tx_buf(netdev, skb);
/*
* Make room for Tx preheader
*/
txph = (struct edma_tx_preheader *)skb_push(skb,
EDMA_TX_PREHDR_SIZE);
memset((void *)txph, 0, EDMA_TX_PREHDR_SIZE);
/*
* Populate Tx preheader dst info, port id is macid in dp_dev
*/
txph->dst_info = (EDMA_PREHDR_DSTINFO_PORTID_IND << 8) |
(dp_dev->macid & 0x0fff);
/*
* Store the skb in tx_store
*/
store_index = hw_next_to_use & (txdesc_ring->count - 1);
if (unlikely(ehw->tx_skb_store[store_index] != NULL)) {
spin_unlock_bh(&txdesc_ring->tx_lock);
return EDMA_TX_DESC;
}
ehw->tx_skb_store[store_index] = skb;
memcpy(skb->data, &store_index, 4);
/*
* Get Tx descriptor
*/
txdesc = EDMA_TXDESC_DESC(txdesc_ring, hw_next_to_use);
memset(txdesc, 0, sizeof(struct edma_txdesc_desc));
/*
* Map buffer to DMA address
*/
txdesc->buffer_addr = cpu_to_le32(dma_map_single(&(ehw->pdev)->dev,
skb->data,
buf_len + EDMA_TX_PREHDR_SIZE,
DMA_TO_DEVICE));
if (!txdesc->buffer_addr) {
/*
* DMA map failed for this address. Drop it
* and make sure does not got to stack again
*/
dev_kfree_skb_any(skb);
ehw->tx_skb_store[store_index] = NULL;
spin_unlock_bh(&txdesc_ring->tx_lock);
return EDMA_TX_OK;
}
/*
* Populate Tx descriptor
*/
txdesc->word1 |= (1 << EDMA_TXDESC_PREHEADER_SHIFT)
| ((EDMA_TX_PREHDR_SIZE & EDMA_TXDESC_DATA_OFFSET_MASK)
<< EDMA_TXDESC_DATA_OFFSET_SHIFT);
txdesc->word1 |= ((buf_len & EDMA_TXDESC_DATA_LENGTH_MASK)
<< EDMA_TXDESC_DATA_LENGTH_SHIFT);
netdev_dbg(netdev, "skb:%px tx_ring:%u proto:0x%x\n",
skb, txdesc_ring->id, ntohs(skb->protocol));
netdev_dbg(netdev, "port:%u prod_idx:%u cons_idx:%u\n",
dp_dev->macid, hw_next_to_use, hw_next_to_clean);
/*
* Update producer index
*/
hw_next_to_use = (hw_next_to_use + 1) & (txdesc_ring->count - 1);
/*
* make sure the hw_next_to_use is updated before the
* write to hardware
*/
wmb();
edma_reg_write(EDMA_REG_TXDESC_PROD_IDX(txdesc_ring->id),
hw_next_to_use & EDMA_TXDESC_PROD_IDX_MASK);
spin_unlock_bh(&txdesc_ring->tx_lock);
return EDMA_TX_OK;
}
/*
* edma_handle_misc_irq()
* Process IRQ
*/
irqreturn_t edma_handle_misc_irq(int irq, void *ctx)
{
uint32_t misc_intr_status = 0;
uint32_t reg_data = 0;
struct edma_hw *ehw = NULL;
struct platform_device *pdev = (struct platform_device *)ctx;
ehw = platform_get_drvdata(pdev);
/*
* Read Misc intr status
*/
reg_data = edma_reg_read(EDMA_REG_MISC_INT_STAT);
misc_intr_status = reg_data & ehw->misc_intr_mask;
/*
* TODO - error logging
*/
if (misc_intr_status == 0)
return IRQ_NONE;
else
edma_reg_write(EDMA_REG_MISC_INT_MASK, EDMA_MASK_INT_DISABLE);
return IRQ_HANDLED;
}
/*
* edma_handle_irq()
* Process IRQ and schedule napi
*/
irqreturn_t edma_handle_irq(int irq, void *ctx)
{
uint32_t reg_data = 0;
uint32_t rxdesc_intr_status = 0;
uint32_t txcmpl_intr_status = 0;
uint32_t rxfill_intr_status = 0;
int i;
struct edma_txcmpl_ring *txcmpl_ring = NULL;
struct edma_rxdesc_ring *rxdesc_ring = NULL;
struct edma_rxfill_ring *rxfill_ring = NULL;
struct edma_hw *ehw = NULL;
struct platform_device *pdev = (struct platform_device *)ctx;
ehw = platform_get_drvdata(pdev);
if (!ehw) {
pr_info("Unable to retrieve platrofm data");
return IRQ_HANDLED;
}
/*
* Read RxDesc intr status
*/
for (i = 0; i < ehw->rxdesc_rings; i++) {
rxdesc_ring = &ehw->rxdesc_ring[i];
reg_data = edma_reg_read(
EDMA_REG_RXDESC_INT_STAT(rxdesc_ring->id));
rxdesc_intr_status |= reg_data &
EDMA_RXDESC_RING_INT_STATUS_MASK;
/*
* Disable RxDesc intr
*/
edma_reg_write(EDMA_REG_RXDESC_INT_MASK(rxdesc_ring->id),
EDMA_MASK_INT_DISABLE);
}
/*
* Read TxCmpl intr status
*/
for (i = 0; i < ehw->txcmpl_rings; i++) {
txcmpl_ring = &ehw->txcmpl_ring[i];
reg_data = edma_reg_read(
EDMA_REG_TX_INT_STAT(txcmpl_ring->id));
txcmpl_intr_status |= reg_data &
EDMA_TXCMPL_RING_INT_STATUS_MASK;
/*
* Disable TxCmpl intr
*/
edma_reg_write(EDMA_REG_TX_INT_MASK(txcmpl_ring->id),
EDMA_MASK_INT_DISABLE);
}
/*
* Read RxFill intr status
*/
for (i = 0; i < ehw->rxfill_rings; i++) {
rxfill_ring = &ehw->rxfill_ring[i];
reg_data = edma_reg_read(
EDMA_REG_RXFILL_INT_STAT(rxfill_ring->id));
rxfill_intr_status |= reg_data &
EDMA_RXFILL_RING_INT_STATUS_MASK;
/*
* Disable RxFill intr
*/
edma_reg_write(EDMA_REG_RXFILL_INT_MASK(rxfill_ring->id),
EDMA_MASK_INT_DISABLE);
}
if ((rxdesc_intr_status == 0) && (txcmpl_intr_status == 0) &&
(rxfill_intr_status == 0))
return IRQ_NONE;
for (i = 0; i < ehw->rxdesc_rings; i++) {
rxdesc_ring = &ehw->rxdesc_ring[i];
edma_reg_write(EDMA_REG_RXDESC_INT_MASK(rxdesc_ring->id),
EDMA_MASK_INT_DISABLE);
}
/*
*TODO - per core NAPI
*/
if (rxdesc_intr_status || txcmpl_intr_status || rxfill_intr_status)
if (likely(napi_schedule_prep(&ehw->napi)))
__napi_schedule(&ehw->napi);
return IRQ_HANDLED;
}

View File

@@ -1,697 +0,0 @@
/*
**************************************************************************
* Copyright (c) 2016-2017,2020 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF0
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
* OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
**************************************************************************
*/
#ifndef __QCOM_DEV_H__
#define __QCOM_DEV_H__
#include <nss_dp_hal_if.h>
#include "qcom_reg.h"
#include <fal/fal_mib.h>
#include <fal/fal_port_ctrl.h>
/*
* Subclass for base nss_gmac_haldev
*/
struct qcom_hal_dev {
struct nss_gmac_hal_dev nghd; /* Base class */
fal_mib_counter_t stats; /* Stats structure */
};
/*
* qcom_set_rx_flow_ctrl()
*/
static inline void qcom_set_rx_flow_ctrl(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, QCOM_MAC_ENABLE, QCOM_RX_FLOW_ENABLE);
}
/*
* qcom_clear_rx_flow_ctrl()
*/
static inline void qcom_clear_rx_flow_ctrl(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, QCOM_MAC_ENABLE, QCOM_RX_FLOW_ENABLE);
}
/*
* qcom_set_tx_flow_ctrl()
*/
static inline void qcom_set_tx_flow_ctrl(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, QCOM_MAC_ENABLE, QCOM_TX_FLOW_ENABLE);
}
/*
* qcom_clear_tx_flow_ctrl()
*/
static inline void qcom_clear_tx_flow_ctrl(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, QCOM_MAC_ENABLE, QCOM_TX_FLOW_ENABLE);
}
/*
* qcom_clear_mac_ctrl0()
*/
static inline void qcom_clear_mac_ctrl0(struct nss_gmac_hal_dev *nghd)
{
hal_write_reg(nghd->mac_base, QCOM_MAC_CTRL0, 0);
}
/*
* qcom_rx_enable()
*/
static inline void qcom_rx_enable(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, QCOM_MAC_ENABLE, QCOM_RX_MAC_ENABLE);
}
/*
* qcom_rx_disable()
* Disable the reception of frames on GMII/MII.
* GMAC receive state machine is disabled after completion of reception of
* current frame.
*/
static inline void qcom_rx_disable(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, QCOM_MAC_ENABLE, QCOM_RX_MAC_ENABLE);
}
/*
* qcom_tx_enable()
*/
static inline void qcom_tx_enable(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, QCOM_MAC_ENABLE, QCOM_TX_MAC_ENABLE);
}
/*
* qcom_tx_disable()
* Disable the transmission of frames on GMII/MII.
* GMAC transmit state machine is disabled after completion of
* transmission of current frame.
*/
static inline void qcom_tx_disable(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, QCOM_MAC_ENABLE, QCOM_TX_MAC_ENABLE);
}
/*
* qcom_set_full_duplex()
*/
static inline void qcom_set_full_duplex(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, QCOM_MAC_ENABLE, QCOM_DUPLEX);
}
/*
* qcom_set_half_duplex()
*/
static inline void qcom_set_half_duplex(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, QCOM_MAC_ENABLE, QCOM_DUPLEX);
}
/*
* qcom_set_ipgt()
*/
static inline void qcom_set_ipgt(struct nss_gmac_hal_dev *nghd, uint32_t ipgt)
{
uint32_t data;
data = hal_read_reg(nghd->mac_base, QCOM_MAC_CTRL0);
data &= ~QCOM_IPGT_POS;
ipgt = ipgt << QCOM_IPGT_LSB;
data |= ipgt;
hal_write_reg(nghd->mac_base, QCOM_MAC_CTRL0, data);
}
/*
* qcom_set_ipgr()
*/
static inline void qcom_set_ipgr(struct nss_gmac_hal_dev *nghd, uint32_t ipgr)
{
uint32_t data;
data = hal_read_reg(nghd->mac_base, QCOM_MAC_CTRL0);
data &= ~QCOM_IPGR2_POS;
ipgr = ipgr << QCOM_IPGR2_LSB;
data |= ipgr;
hal_write_reg(nghd->mac_base, QCOM_MAC_CTRL0, data);
}
/*
* qcom_set_half_thdf_ctrl()
*/
static inline void qcom_set_half_thdf_ctrl(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, QCOM_MAC_CTRL0, QCOM_HALF_THDF_CTRL);
}
/*
* qcom_reset_half_thdf_ctrl()
*/
static inline void qcom_reset_half_thdf_ctrl(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, QCOM_MAC_CTRL0, QCOM_HALF_THDF_CTRL);
}
/*
* qcom_set_frame_len_chk()
*/
static inline void qcom_set_frame_len_chk(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, QCOM_MAC_CTRL0, QCOM_FLCHK);
}
/*
* qcom_reset_frame_len_chk()
*/
static inline void qcom_reset_frame_len_chk(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, QCOM_MAC_CTRL0, QCOM_FLCHK);
}
/*
* qcom_set_abebe()
*/
static inline void qcom_set_abebe(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, QCOM_MAC_CTRL0, QCOM_ABEBE);
}
/*
* qcom_reset_abebe()
*/
static inline void qcom_reset_abebe(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, QCOM_MAC_CTRL0, QCOM_ABEBE);
}
/*
* qcom_set_amaxe()
*/
static inline void qcom_set_amaxe(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, QCOM_MAC_CTRL0, QCOM_AMAXE);
}
/*
* qcom_reset_amaxe()
*/
static inline void qcom_reset_amaxe(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, QCOM_MAC_CTRL0, QCOM_AMAXE);
}
/*
* qcom_set_bpnb()
*/
static inline void qcom_set_bpnb(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, QCOM_MAC_CTRL0, QCOM_BPNB);
}
/*
* qcom_reset_bpnb()
*/
static inline void qcom_reset_bpnb(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, QCOM_MAC_CTRL0, QCOM_BPNB);
}
/*
* qcom_set_nobo()
*/
static inline void qcom_set_nobo(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, QCOM_MAC_CTRL0, QCOM_NOBO);
}
/*
* qcom_reset_nobo()
*/
static inline void qcom_reset_nobo(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, QCOM_MAC_CTRL0, QCOM_NOBO);
}
/*
* qcom_set_drbnib_rxok()
*/
static inline void qcom_set_drbnib_rxok(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, QCOM_MAC_CTRL0, QCOM_DRBNIB_RXOK);
}
/*
* qcom_reset_drbnib_rxok()
*/
static inline void qcom_reset_drbnib_rxok(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, QCOM_MAC_CTRL0, QCOM_DRBNIB_RXOK);
}
/*
* qcom_set_jam_ipg()
*/
static inline void qcom_set_jam_ipg(struct nss_gmac_hal_dev *nghd,
uint32_t jam_ipg)
{
uint32_t data;
data = hal_read_reg(nghd->mac_base, QCOM_MAC_CTRL1);
data &= ~QCOM_JAM_IPG_POS;
jam_ipg = jam_ipg << QCOM_JAM_IPG_LSB;
data |= jam_ipg;
hal_write_reg(nghd->mac_base, QCOM_MAC_CTRL1, data);
}
/*
* qcom_set_ctrl1_test_pause()
*/
static inline void qcom_set_ctrl1_test_pause(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, QCOM_MAC_CTRL1, QCOM_TPAUSE);
}
/*
* qcom_reset_ctrl1_test_pause()
*/
static inline void qcom_reset_ctrl1_test_pause(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, QCOM_MAC_CTRL1, QCOM_TPAUSE);
}
/*
* qcom_reset_ctrl1_test_pause()
*/
static inline void qcom_set_tctl(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, QCOM_MAC_CTRL1, QCOM_TCTL);
}
/*
* qcom_reset_tctl()
*/
static inline void qcom_reset_tctl(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, QCOM_MAC_CTRL1, QCOM_TCTL);
}
/*
* qcom_set_sstct()
*/
static inline void qcom_set_sstct(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, QCOM_MAC_CTRL1, QCOM_SSTCT);
}
/*
* qcom_reset_sstct()
*/
static inline void qcom_reset_sstct(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, QCOM_MAC_CTRL1, QCOM_SSTCT);
}
/*
* qcom_set_simr()
*/
static inline void qcom_set_simr(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, QCOM_MAC_CTRL1, QCOM_SIMR);
}
/*
* qcom_reset_simr()
*/
static inline void qcom_reset_simr(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, QCOM_MAC_CTRL1, QCOM_SIMR);
}
/*
* qcom_set_retry()
*/
static inline void qcom_set_retry(struct nss_gmac_hal_dev *nghd, uint32_t retry)
{
uint32_t data;
data = hal_read_reg(nghd->mac_base, QCOM_MAC_CTRL1);
data &= ~QCOM_RETRY_POS;
retry = retry << QCOM_RETRY_LSB;
data |= retry;
hal_write_reg(nghd->mac_base, QCOM_MAC_CTRL1, data);
}
/*
* qcom_set_prlen()
*/
static inline void qcom_set_prlen(struct nss_gmac_hal_dev *nghd, uint32_t prlen)
{
uint32_t data;
data = hal_read_reg(nghd->mac_base, QCOM_MAC_CTRL1);
data &= ~QCOM_PRLEN_POS;
prlen = prlen << QCOM_PRLEN_LSB;
data |= prlen;
hal_write_reg(nghd->mac_base, QCOM_MAC_CTRL1, data);
}
/*
* qcom_set_ppad()
*/
static inline void qcom_set_ppad(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, QCOM_MAC_CTRL1, QCOM_PPAD);
}
/*
* qcom_reset_ppad()
*/
static inline void qcom_reset_ppad(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, QCOM_MAC_CTRL1, QCOM_PPAD);
}
/*
* qcom_set_povr()
*/
static inline void qcom_set_povr(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, QCOM_MAC_CTRL1, QCOM_POVR);
}
/*
* qcom_reset_povr()
*/
static inline void qcom_reset_povr(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, QCOM_MAC_CTRL1, QCOM_POVR);
}
/*
* qcom_set_phug()
*/
static inline void qcom_set_phug(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, QCOM_MAC_CTRL1, QCOM_PHUG);
}
/*
* qcom_reset_phug()
*/
static inline void qcom_reset_phug(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, QCOM_MAC_CTRL1, QCOM_PHUG);
}
/*
* qcom_set_mbof()
*/
static inline void qcom_set_mbof(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, QCOM_MAC_CTRL1, QCOM_MBOF);
}
/*
* qcom_reset_mbof()
*/
static inline void qcom_reset_mbof(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, QCOM_MAC_CTRL1, QCOM_MBOF);
}
/*
* qcom_set_lcol()
*/
static inline void qcom_set_lcol(struct nss_gmac_hal_dev *nghd, uint32_t lcol)
{
uint32_t data;
data = hal_read_reg(nghd->mac_base, QCOM_MAC_CTRL1);
data &= ~QCOM_LCOL_POS;
lcol = lcol << QCOM_LCOL_LSB;
data |= lcol;
hal_write_reg(nghd->mac_base, QCOM_MAC_CTRL1, data);
}
/*
* qcom_set_long_jam()
*/
static inline void qcom_set_long_jam(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, QCOM_MAC_CTRL1, QCOM_LONG_JAM);
}
/*
* qcom_reset_long_jam()
*/
static inline void qcom_reset_long_jam(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, QCOM_MAC_CTRL1, QCOM_LONG_JAM);
}
/*
* qcom_set_ipg_dec_len()
*/
static inline void qcom_set_ipg_dec_len(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, QCOM_MAC_CTRL2, QCOM_IPG_DEC_LEN);
}
/*
* qcom_reset_ipg_dec_len()
*/
static inline void qcom_reset_ipg_dec_len(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, QCOM_MAC_CTRL2, QCOM_IPG_DEC_LEN);
}
/*
* qcom_set_ctrl2_test_pause()
*/
static inline void qcom_set_ctrl2_test_pause(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, QCOM_MAC_CTRL2, QCOM_TEST_PAUSE);
}
/*
* qcom_reset_ctrl2_test_pause()
*/
static inline void qcom_reset_ctrl2_test_pause(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, QCOM_MAC_CTRL2, QCOM_TEST_PAUSE);
}
/*
* qcom_set_mac_loopback()
*/
static inline void qcom_set_mac_loopback(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, QCOM_MAC_CTRL2, QCOM_MAC_LOOPBACK);
}
/*
* qcom_reset_mac_loopback()
*/
static inline void qcom_reset_mac_loopback(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, QCOM_MAC_CTRL2, QCOM_MAC_LOOPBACK);
}
/*
* qcom_set_ipg_dec()
*/
static inline void qcom_set_ipg_dec(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, QCOM_MAC_CTRL2, QCOM_IPG_DEC);
}
/*
* qcom_reset_ipg_dec()
*/
static inline void qcom_reset_ipg_dec(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, QCOM_MAC_CTRL2, QCOM_IPG_DEC);
}
/*
* qcom_set_crs_sel()
*/
static inline void qcom_set_crs_sel(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, QCOM_MAC_CTRL2, QCOM_SRS_SEL);
}
/*
* qcom_reset_crs_sel()
*/
static inline void qcom_reset_crs_sel(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, QCOM_MAC_CTRL2, QCOM_SRS_SEL);
}
/*
* qcom_set_crc_rsv()
*/
static inline void qcom_set_crc_rsv(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, QCOM_MAC_CTRL2, QCOM_CRC_RSV);
}
/*
* qcom_reset_crc_rsv()
*/
static inline void qcom_reset_crc_rsv(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, QCOM_MAC_CTRL2, QCOM_CRC_RSV);
}
/*
* qcom_set_ipgr1()
*/
static inline void qcom_set_ipgr1(struct nss_gmac_hal_dev *nghd, uint32_t ipgr1)
{
uint32_t data;
data = hal_read_reg(nghd->mac_base, QCOM_MAC_DBG_CTRL);
data &= ~QCOM_DBG_IPGR1_POS;
ipgr1 = ipgr1 << QCOM_DBG_IPGR1_LSB;
data |= ipgr1;
hal_write_reg(nghd->mac_base, QCOM_MAC_DBG_CTRL, data);
}
/*
* qcom_set_hihg_ipg()
*/
static inline void qcom_set_hihg_ipg(struct nss_gmac_hal_dev *nghd,
uint32_t hihg_ipg)
{
uint32_t data;
data = hal_read_reg(nghd->mac_base, QCOM_MAC_DBG_CTRL);
data &= ~QCOM_DBG_HIHG_IPG_POS;
data |= hihg_ipg << QCOM_DBG_HIHG_IPG_LSB;
hal_write_reg(nghd->mac_base, QCOM_MAC_DBG_CTRL, data);
}
/*
* qcom_set_mac_ipg_ctrl()
*/
static inline void qcom_set_mac_ipg_ctrl(struct nss_gmac_hal_dev *nghd,
uint32_t mac_ipg_ctrl)
{
uint32_t data;
data = hal_read_reg(nghd->mac_base, QCOM_MAC_DBG_CTRL);
data &= ~QCOM_DBG_MAC_IPG_CTRL_POS;
data |= mac_ipg_ctrl << QCOM_DBG_MAC_IPG_CTRL_LSB;
hal_write_reg(nghd->mac_base, QCOM_MAC_DBG_CTRL, data);
}
/*
* qcom_set_mac_len_ctrl()
*/
static inline void qcom_set_mac_len_ctrl(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, QCOM_MAC_DBG_CTRL, QCOM_DBG_MAC_LEN_CTRL);
}
/*
* qcom_reset_mac_len_ctrl()
*/
static inline void qcom_reset_mac_len_ctrl(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, QCOM_MAC_DBG_CTRL, QCOM_DBG_MAC_LEN_CTRL);
}
/*
* qcom_set_edxsdfr_transmit()
*/
static inline void qcom_set_edxsdfr_transmit(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, QCOM_MAC_DBG_CTRL, QCOM_DBG_EDxSDFR_TRANS);
}
/*
* qcom_reset_edxsdfr_transmit()
*/
static inline void qcom_reset_edxsdfr_transmit(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, QCOM_MAC_DBG_CTRL, QCOM_DBG_EDxSDFR_TRANS);
}
/*
* qcom_set_mac_dbg_addr()
*/
static inline void qcom_set_mac_dbg_addr(struct nss_gmac_hal_dev *nghd,
uint8_t mac_dbg_addr)
{
hal_write_reg(nghd->mac_base, QCOM_MAC_DBG_ADDR, mac_dbg_addr);
}
/*
* qcom_set_mac_dbg_data()
*/
static inline void qcom_set_mac_dbg_data(struct nss_gmac_hal_dev *nghd,
uint32_t mac_dbg_data)
{
hal_write_reg(nghd->mac_base, QCOM_MAC_DBG_DATA, mac_dbg_data);
}
/*
* qcom_set_mac_jumbosize()
*/
static inline void qcom_set_mac_jumbosize(struct nss_gmac_hal_dev *nghd,
uint16_t mac_jumbo_size)
{
hal_write_reg(nghd->mac_base, QCOM_MAC_JMB_SIZE, mac_jumbo_size);
}
/*
* qcom_clear_mib_ctrl()
*/
static inline void qcom_clear_mib_ctrl(struct nss_gmac_hal_dev *nghd)
{
hal_write_reg(nghd->mac_base, QCOM_MAC_MIB_CTRL, 0);
}
/*
* qcom_set_mib_ctrl()
*/
static inline void qcom_set_mib_ctrl(struct nss_gmac_hal_dev *nghd,
int mib_settings)
{
hal_set_reg_bits(nghd, QCOM_MAC_MIB_CTRL,
mib_settings);
}
/*
* qcom_get_stats()
*/
static int qcom_get_stats(struct nss_gmac_hal_dev *nghd)
{
struct qcom_hal_dev *qhd = (struct qcom_hal_dev *)nghd;
fal_mib_counter_t *stats = &(qhd->stats);
if (fal_mib_counter_get(0, nghd->mac_id, stats) < 0)
return -1;
return 0;
}
#endif /* __QCOM_DEV_H__ */

View File

@@ -1,479 +0,0 @@
/*
**************************************************************************
* Copyright (c) 2016-2018, 2020 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
* OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
**************************************************************************
*/
#include <linux/delay.h>
#include <linux/ethtool.h>
#include <linux/vmalloc.h>
#include <linux/spinlock.h>
#include <linux/io.h>
#include <nss_dp_hal_if.h>
#include <nss_dp_dev.h>
#include "qcom_dev.h"
#define QCOM_STAT(m) offsetof(fal_mib_counter_t, m)
/*
* Ethtool stats pointer structure
*/
struct qcom_ethtool_stats {
uint8_t stat_string[ETH_GSTRING_LEN];
uint32_t stat_offset;
};
/*
* Array of strings describing statistics
*/
static const struct qcom_ethtool_stats qcom_gstrings_stats[] = {
{"rx_broadcast", QCOM_STAT(RxBroad)},
{"rx_pause", QCOM_STAT(RxPause)},
{"rx_unicast", QCOM_STAT(RxUniCast)},
{"rx_multicast", QCOM_STAT(RxMulti)},
{"rx_fcserr", QCOM_STAT(RxFcsErr)},
{"rx_alignerr", QCOM_STAT(RxAllignErr)},
{"rx_runt", QCOM_STAT(RxRunt)},
{"rx_frag", QCOM_STAT(RxFragment)},
{"rx_jmbfcserr", QCOM_STAT(RxJumboFcsErr)},
{"rx_jmbalignerr", QCOM_STAT(RxJumboAligenErr)},
{"rx_pkt64", QCOM_STAT(Rx64Byte)},
{"rx_pkt65to127", QCOM_STAT(Rx128Byte)},
{"rx_pkt128to255", QCOM_STAT(Rx256Byte)},
{"rx_pkt256to511", QCOM_STAT(Rx512Byte)},
{"rx_pkt512to1023", QCOM_STAT(Rx1024Byte)},
{"rx_pkt1024to1518", QCOM_STAT(Rx1518Byte)},
{"rx_pkt1519tox", QCOM_STAT(RxMaxByte)},
{"rx_toolong", QCOM_STAT(RxTooLong)},
{"rx_pktgoodbyte", QCOM_STAT(RxGoodByte)},
{"rx_pktbadbyte", QCOM_STAT(RxBadByte)},
{"rx_overflow", QCOM_STAT(RxOverFlow)},
{"tx_broadcast", QCOM_STAT(TxBroad)},
{"tx_pause", QCOM_STAT(TxPause)},
{"tx_multicast", QCOM_STAT(TxMulti)},
{"tx_underrun", QCOM_STAT(TxUnderRun)},
{"tx_pkt64", QCOM_STAT(Tx64Byte)},
{"tx_pkt65to127", QCOM_STAT(Tx128Byte)},
{"tx_pkt128to255", QCOM_STAT(Tx256Byte)},
{"tx_pkt256to511", QCOM_STAT(Tx512Byte)},
{"tx_pkt512to1023", QCOM_STAT(Tx1024Byte)},
{"tx_pkt1024to1518", QCOM_STAT(Tx1518Byte)},
{"tx_pkt1519tox", QCOM_STAT(TxMaxByte)},
{"tx_oversize", QCOM_STAT(TxOverSize)},
{"tx_pktbyte_h", QCOM_STAT(TxByte)},
{"tx_collisions", QCOM_STAT(TxCollision)},
{"tx_abortcol", QCOM_STAT(TxAbortCol)},
{"tx_multicol", QCOM_STAT(TxMultiCol)},
{"tx_singlecol", QCOM_STAT(TxSingalCol)},
{"tx_exesdeffer", QCOM_STAT(TxExcDefer)},
{"tx_deffer", QCOM_STAT(TxDefer)},
{"tx_latecol", QCOM_STAT(TxLateCol)},
{"tx_unicast", QCOM_STAT(TxUniCast)},
};
/*
* Array of strings describing private flag names
*/
static const char * const qcom_strings_priv_flags[] = {
"linkpoll",
"tstamp",
"tsmode",
};
#define QCOM_STATS_LEN ARRAY_SIZE(qcom_gstrings_stats)
#define QCOM_PRIV_FLAGS_LEN ARRAY_SIZE(qcom_strings_priv_flags)
/*
* qcom_set_mac_speed()
*/
static int32_t qcom_set_mac_speed(struct nss_gmac_hal_dev *nghd,
uint32_t mac_speed)
{
struct net_device *netdev = nghd->netdev;
netdev_warn(netdev, "API deprecated\n");
return 0;
}
/*
* qcom_get_mac_speed()
*/
static uint32_t qcom_get_mac_speed(struct nss_gmac_hal_dev *nghd)
{
struct net_device *netdev = nghd->netdev;
netdev_warn(netdev, "API deprecated\n");
return 0;
}
/*
* qcom_set_duplex_mode()
*/
static void qcom_set_duplex_mode(struct nss_gmac_hal_dev *nghd,
uint8_t duplex_mode)
{
struct net_device *netdev = nghd->netdev;
netdev_warn(netdev, "This API deprecated\n");
}
/*
* qcom_get_duplex_mode()
*/
static uint8_t qcom_get_duplex_mode(struct nss_gmac_hal_dev *nghd)
{
struct net_device *netdev = nghd->netdev;
netdev_warn(netdev, "API deprecated\n");
return 0;
}
/*
* qcom_rx_flow_control()
*/
static void qcom_rx_flow_control(struct nss_gmac_hal_dev *nghd, bool enabled)
{
if (enabled)
qcom_set_rx_flow_ctrl(nghd);
else
qcom_clear_rx_flow_ctrl(nghd);
}
/*
* qcom_tx_flow_control()
*/
static void qcom_tx_flow_control(struct nss_gmac_hal_dev *nghd, bool enabled)
{
if (enabled)
qcom_set_tx_flow_ctrl(nghd);
else
qcom_clear_tx_flow_ctrl(nghd);
}
/*
* qcom_get_mib_stats()
*/
static int32_t qcom_get_mib_stats(struct nss_gmac_hal_dev *nghd)
{
if (qcom_get_stats(nghd))
return -1;
return 0;
}
/*
* qcom_set_maxframe()
*/
static int32_t qcom_set_maxframe(struct nss_gmac_hal_dev *nghd,
uint32_t maxframe)
{
return fal_port_max_frame_size_set(0, nghd->mac_id, maxframe);
}
/*
* qcom_get_maxframe()
*/
static int32_t qcom_get_maxframe(struct nss_gmac_hal_dev *nghd)
{
int ret;
uint32_t mtu;
ret = fal_port_max_frame_size_get(0, nghd->mac_id, &mtu);
if (!ret)
return mtu;
return ret;
}
/*
* qcom_get_netdev_stats()
*/
static int32_t qcom_get_netdev_stats(struct nss_gmac_hal_dev *nghd,
struct rtnl_link_stats64 *stats)
{
struct qcom_hal_dev *qhd = (struct qcom_hal_dev *)nghd;
fal_mib_counter_t *hal_stats = &(qhd->stats);
if (qcom_get_mib_stats(nghd))
return -1;
stats->rx_packets = hal_stats->RxUniCast + hal_stats->RxBroad
+ hal_stats->RxMulti;
stats->tx_packets = hal_stats->TxUniCast + hal_stats->TxBroad
+ hal_stats->TxMulti;
stats->rx_bytes = hal_stats->RxGoodByte;
stats->tx_bytes = hal_stats->TxByte;
/* RX errors */
stats->rx_crc_errors = hal_stats->RxFcsErr + hal_stats->RxJumboFcsErr;
stats->rx_frame_errors = hal_stats->RxAllignErr +
hal_stats->RxJumboAligenErr;
stats->rx_fifo_errors = hal_stats->RxRunt;
stats->rx_errors = stats->rx_crc_errors + stats->rx_frame_errors +
stats->rx_fifo_errors;
stats->rx_dropped = hal_stats->RxTooLong + stats->rx_errors;
/* TX errors */
stats->tx_fifo_errors = hal_stats->TxUnderRun;
stats->tx_aborted_errors = hal_stats->TxAbortCol;
stats->tx_errors = stats->tx_fifo_errors + stats->tx_aborted_errors;
stats->collisions = hal_stats->TxCollision;
stats->multicast = hal_stats->RxMulti;
return 0;
}
/*
* qcom_get_strset_count()
* Get string set count for ethtool operations
*/
int32_t qcom_get_strset_count(struct nss_gmac_hal_dev *nghd, int32_t sset)
{
struct net_device *netdev = nghd->netdev;
switch (sset) {
case ETH_SS_STATS:
return QCOM_STATS_LEN;
case ETH_SS_PRIV_FLAGS:
return QCOM_PRIV_FLAGS_LEN;
}
netdev_dbg(netdev, "%s: Invalid string set\n", __func__);
return -EPERM;
}
/*
* qcom_get_strings()
* Get strings
*/
int32_t qcom_get_strings(struct nss_gmac_hal_dev *nghd, int32_t sset,
uint8_t *data)
{
struct net_device *netdev = nghd->netdev;
int i;
switch (sset) {
case ETH_SS_STATS:
for (i = 0; i < QCOM_STATS_LEN; i++) {
memcpy(data, qcom_gstrings_stats[i].stat_string,
strlen(qcom_gstrings_stats[i].stat_string));
data += ETH_GSTRING_LEN;
}
break;
case ETH_SS_PRIV_FLAGS:
for (i = 0; i < QCOM_PRIV_FLAGS_LEN; i++) {
memcpy(data, qcom_strings_priv_flags[i],
strlen(qcom_strings_priv_flags[i]));
data += ETH_GSTRING_LEN;
}
break;
default:
netdev_dbg(netdev, "%s: Invalid string set\n", __func__);
return -EPERM;
}
return 0;
}
/*
* qcom_get_eth_stats()
*/
static int32_t qcom_get_eth_stats(struct nss_gmac_hal_dev *nghd, uint64_t *data)
{
struct qcom_hal_dev *qhd = (struct qcom_hal_dev *)nghd;
fal_mib_counter_t *stats = &(qhd->stats);
uint8_t *p;
int i;
if (qcom_get_mib_stats(nghd))
return -1;
for (i = 0; i < QCOM_STATS_LEN; i++) {
p = (uint8_t *)stats + qcom_gstrings_stats[i].stat_offset;
data[i] = *(uint32_t *)p;
}
return 0;
}
/*
* qcom_send_pause_frame()
*/
static void qcom_send_pause_frame(struct nss_gmac_hal_dev *nghd)
{
qcom_set_ctrl2_test_pause(nghd);
}
/*
* qcom_stop_pause_frame()
*/
static void qcom_stop_pause_frame(struct nss_gmac_hal_dev *nghd)
{
qcom_reset_ctrl2_test_pause(nghd);
}
/*
* qcom_start()
*/
static int32_t qcom_start(struct nss_gmac_hal_dev *nghd)
{
qcom_set_full_duplex(nghd);
/* TODO: Read speed from dts */
if (qcom_set_mac_speed(nghd, SPEED_1000))
return -1;
qcom_tx_enable(nghd);
qcom_rx_enable(nghd);
netdev_dbg(nghd->netdev, "%s: mac_base:0x%px mac_enable:0x%x\n",
__func__, nghd->mac_base,
hal_read_reg(nghd->mac_base, QCOM_MAC_ENABLE));
return 0;
}
/*
* qcom_stop()
*/
static int32_t qcom_stop(struct nss_gmac_hal_dev *nghd)
{
qcom_tx_disable(nghd);
qcom_rx_disable(nghd);
netdev_dbg(nghd->netdev, "%s: mac_base:0x%px mac_enable:0x%x\n",
__func__, nghd->mac_base,
hal_read_reg(nghd->mac_base, QCOM_MAC_ENABLE));
return 0;
}
/*
* qcom_init()
*/
static void *qcom_init(struct gmac_hal_platform_data *gmacpdata)
{
struct qcom_hal_dev *qhd = NULL;
struct net_device *ndev = NULL;
struct nss_dp_dev *dp_priv = NULL;
struct resource *res;
ndev = gmacpdata->netdev;
dp_priv = netdev_priv(ndev);
res = platform_get_resource(dp_priv->pdev, IORESOURCE_MEM, 0);
if (!res) {
netdev_dbg(ndev, "Resource get failed.\n");
return NULL;
}
if (!devm_request_mem_region(&dp_priv->pdev->dev, res->start,
resource_size(res), ndev->name)) {
netdev_dbg(ndev, "Request mem region failed. Returning...\n");
return NULL;
}
qhd = (struct qcom_hal_dev *)devm_kzalloc(&dp_priv->pdev->dev,
sizeof(struct qcom_hal_dev), GFP_KERNEL);
if (!qhd) {
netdev_dbg(ndev, "kzalloc failed. Returning...\n");
return NULL;
}
/* Save netdev context in QCOM HAL context */
qhd->nghd.netdev = gmacpdata->netdev;
qhd->nghd.mac_id = gmacpdata->macid;
/* Populate the mac base addresses */
qhd->nghd.mac_base = devm_ioremap_nocache(&dp_priv->pdev->dev,
res->start, resource_size(res));
if (!qhd->nghd.mac_base) {
netdev_dbg(ndev, "ioremap fail.\n");
return NULL;
}
spin_lock_init(&qhd->nghd.slock);
netdev_dbg(ndev, "ioremap OK.Size 0x%x Ndev base 0x%lx macbase 0x%px\n",
gmacpdata->reg_len,
ndev->base_addr,
qhd->nghd.mac_base);
/* Reset MIB Stats */
if (fal_mib_port_flush_counters(0, qhd->nghd.mac_id)) {
netdev_dbg(ndev, "MIB stats Reset fail.\n");
}
return (struct nss_gmac_hal_dev *)qhd;
}
/*
* qcom_get_mac_address()
*/
static void qcom_get_mac_address(struct nss_gmac_hal_dev *nghd,
uint8_t *macaddr)
{
uint32_t data = hal_read_reg(nghd->mac_base, QCOM_MAC_ADDR0);
macaddr[5] = (data >> 8) & 0xff;
macaddr[4] = (data) & 0xff;
data = hal_read_reg(nghd->mac_base, QCOM_MAC_ADDR1);
macaddr[0] = (data >> 24) & 0xff;
macaddr[1] = (data >> 16) & 0xff;
macaddr[2] = (data >> 8) & 0xff;
macaddr[3] = (data) & 0xff;
}
/*
* qcom_set_mac_address()
*/
static void qcom_set_mac_address(struct nss_gmac_hal_dev *nghd,
uint8_t *macaddr)
{
uint32_t data = (macaddr[5] << 8) | macaddr[4];
hal_write_reg(nghd->mac_base, QCOM_MAC_ADDR0, data);
data = (macaddr[0] << 24) | (macaddr[1] << 16)
| (macaddr[2] << 8) | macaddr[3];
hal_write_reg(nghd->mac_base, QCOM_MAC_ADDR1, data);
}
/*
* MAC hal_ops base structure
*/
struct nss_gmac_hal_ops qcom_hal_ops = {
.init = &qcom_init,
.start = &qcom_start,
.stop = &qcom_stop,
.setmacaddr = &qcom_set_mac_address,
.getmacaddr = &qcom_get_mac_address,
.rxflowcontrol = &qcom_rx_flow_control,
.txflowcontrol = &qcom_tx_flow_control,
.setspeed = &qcom_set_mac_speed,
.getspeed = &qcom_get_mac_speed,
.setduplex = &qcom_set_duplex_mode,
.getduplex = &qcom_get_duplex_mode,
.getstats = &qcom_get_mib_stats,
.setmaxframe = &qcom_set_maxframe,
.getmaxframe = &qcom_get_maxframe,
.getndostats = &qcom_get_netdev_stats,
.getssetcount = &qcom_get_strset_count,
.getstrings = &qcom_get_strings,
.getethtoolstats = &qcom_get_eth_stats,
.sendpause = &qcom_send_pause_frame,
.stoppause = &qcom_stop_pause_frame,
};

View File

@@ -1,156 +0,0 @@
/*
**************************************************************************
* Copyright (c) 2016,2020 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF0
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
* OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
**************************************************************************
*/
#ifndef __QCOM_REG_H__
#define __QCOM_REG_H__
/* Register Offsets */
/* Offsets of GMAC config and status registers within NSS_GMAC_QCOM_MAC_BASE */
#define QCOM_MAC_ENABLE 0x0000
#define QCOM_MAC_SPEED 0x0004
#define QCOM_MAC_ADDR0 0x0008
#define QCOM_MAC_ADDR1 0x000c
#define QCOM_MAC_CTRL0 0x0010
#define QCOM_MAC_CTRL1 0x0014
#define QCOM_MAC_CTRL2 0x0018
#define QCOM_MAC_DBG_CTRL 0x001c
#define QCOM_MAC_DBG_ADDR 0x0020
#define QCOM_MAC_DBG_DATA 0x0024
#define QCOM_MAC_JMB_SIZE 0x0030
#define QCOM_MAC_MIB_CTRL 0x0034
/* RX stats */
#define QCOM_RXBROAD 0x0040
#define QCOM_RXPAUSE 0x0044
#define QCOM_RXMULTI 0x0048
#define QCOM_RXFCSERR 0x004c
#define QCOM_RXALIGNERR 0x0050
#define QCOM_RXRUNT 0x0054
#define QCOM_RXFRAG 0x0058
#define QCOM_RXJMBFCSERR 0x005c
#define QCOM_RXJMBALIGNERR 0x0060
#define QCOM_RXPKT64 0x0064
#define QCOM_RXPKT65TO127 0x0068
#define QCOM_RXPKT128TO255 0x006c
#define QCOM_RXPKT256TO511 0x0070
#define QCOM_RXPKT512TO1023 0x0074
#define QCOM_RXPKT1024TO1518 0x0078
#define QCOM_RXPKT1519TOX 0x007c
#define QCOM_RXPKTTOOLONG 0x0080
#define QCOM_RXPKTGOODBYTE_L 0x0084
#define QCOM_RXPKTGOODBYTE_H 0x0088
#define QCOM_RXPKTBADBYTE_L 0x008c
#define QCOM_RXPKTBADBYTE_H 0x0090
#define QCOM_RXUNI 0x0094
/* TX stats */
#define QCOM_TXBROAD 0x00a0
#define QCOM_TXPAUSE 0x00a4
#define QCOM_TXMULTI 0x00a8
#define QCOM_TXUNDERUN 0x00aC
#define QCOM_TXPKT64 0x00b0
#define QCOM_TXPKT65TO127 0x00b4
#define QCOM_TXPKT128TO255 0x00b8
#define QCOM_TXPKT256TO511 0x00bc
#define QCOM_TXPKT512TO1023 0x00c0
#define QCOM_TXPKT1024TO1518 0x00c4
#define QCOM_TXPKT1519TOX 0x00c8
#define QCOM_TXPKTBYTE_L 0x00cc
#define QCOM_TXPKTBYTE_H 0x00d0
#define QCOM_TXCOLLISIONS 0x00d4
#define QCOM_TXABORTCOL 0x00d8
#define QCOM_TXMULTICOL 0x00dc
#define QCOM_TXSINGLECOL 0x00e0
#define QCOM_TXEXCESSIVEDEFER 0x00e4
#define QCOM_TXDEFER 0x00e8
#define QCOM_TXLATECOL 0x00ec
#define QCOM_TXUNI 0x00f0
/* Bit Masks */
/* GMAC BITs */
#define QCOM_RX_MAC_ENABLE 1
#define QCOM_TX_MAC_ENABLE 0x2
#define QCOM_DUPLEX 0x10
#define QCOM_RX_FLOW_ENABLE 0x20
#define QCOM_TX_FLOW_ENABLE 0x40
#define QCOM_MAC_SPEED_10 0
#define QCOM_MAC_SPEED_100 1
#define QCOM_MAC_SPEED_1000 2
/* MAC CTRL0 */
#define QCOM_IPGT_POS 0x0000007f
#define QCOM_IPGT_LSB 0
#define QCOM_IPGR2_POS 0x00007f00
#define QCOM_IPGR2_LSB 8
#define QCOM_HALF_THDF_CTRL 0x8000
#define QCOM_HUGE_RECV 0x10000
#define QCOM_HUGE_TRANS 0x20000
#define QCOM_FLCHK 0x40000
#define QCOM_ABEBE 0x80000
#define QCOM_AMAXE 0x10000000
#define QCOM_BPNB 0x20000000
#define QCOM_NOBO 0x40000000
#define QCOM_DRBNIB_RXOK 0x80000000
/* MAC CTRL1 */
#define QCOM_JAM_IPG_POS 0x0000000f
#define QCOM_JAM_IPG_LSB 0
#define QCOM_TPAUSE 0x10
#define QCOM_TCTL 0x20
#define QCOM_SSTCT 0x40
#define QCOM_SIMR 0x80
#define QCOM_RETRY_POS 0x00000f00
#define QCOM_RETRY_LSB 8
#define QCOM_PRLEN_POS 0x0000f000
#define QCOM_PRLEN_LSB 8
#define QCOM_PPAD 0x10000
#define QCOM_POVR 0x20000
#define QCOM_PHUG 0x40000
#define QCOM_MBOF 0x80000
#define QCOM_LCOL_POS 0x0ff00000
#define QCOM_LCOL_LSB 20
#define QCOM_LONG_JAM 0x10000000
/* MAC CTRL2 */
#define QCOM_IPG_DEC_LEN 0x2
#define QCOM_TEST_PAUSE 0x4
#define QCOM_MAC_LPI_TX_IDLE 0x8
#define QCOM_MAC_LOOPBACK 0x10
#define QCOM_IPG_DEC 0x20
#define QCOM_SRS_SEL 0x40
#define QCOM_CRC_RSV 0x80
#define QCOM_MAXFR_POS 0x003fff00
#define QCOM_MAXFR_LSB 8
/* MAC DEBUG_CTRL */
#define QCOM_DBG_IPGR1_POS 0x0000007f
#define QCOM_DBG_IPGR1_LSB 0
#define QCOM_DBG_HIHG_IPG_POS 0x0000ff00
#define QCOM_DBG_HIHG_IPG_LSB 8
#define QCOM_DBG_MAC_IPG_CTRL_POS 0x0000ff00
#define QCOM_DBG_MAC_IPG_CTRL_LSB 20
#define QCOM_DBG_MAC_LEN_CTRL 0x40000000
#define QCOM_DBG_EDxSDFR_TRANS 0x80000000
/* MAC MIB-CTRL*/
#define QCOM_MIB_ENABLE 1
#define QCOM_MIB_RESET 0x2
#define QCOM_MIB_RD_CLR 0x4
#endif /*__QCOM_REG_H__*/

View File

@@ -1,30 +0,0 @@
/*
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef __SYN_DEV_H__
#define __SYN_DEV_H__
#include <nss_dp_dev.h>
/*
* Subclass for base nss_gmac_hal_dev
*/
struct syn_hal_dev {
struct nss_gmac_hal_dev nghd; /* Base class */
struct nss_dp_gmac_stats stats; /* Stats structure */
};
#endif /*__SYN_DEV_H__*/

View File

@@ -1,959 +0,0 @@
/*
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <fal/fal_mib.h>
#include <fal/fal_port_ctrl.h>
#include <linux/delay.h>
#include <linux/ethtool.h>
#include <linux/vmalloc.h>
#include <linux/spinlock.h>
#include <linux/io.h>
#include <nss_dp_hal.h>
#include "syn_dev.h"
#include "syn_reg.h"
#define SYN_STAT(m) offsetof(struct nss_dp_hal_gmac_stats, m)
#define HW_ERR_SIZE sizeof(uint64_t)
/*
* Array to store ethtool statistics
*/
struct syn_ethtool_stats {
uint8_t stat_string[ETH_GSTRING_LEN];
uint64_t stat_offset;
};
/*
* Array of strings describing statistics
*/
static const struct syn_ethtool_stats syn_gstrings_stats[] = {
{"rx_bytes", SYN_STAT(rx_bytes)},
{"rx_packets", SYN_STAT(rx_packets)},
{"rx_errors", SYN_STAT(rx_errors)},
{"rx_receive_errors", SYN_STAT(rx_receive_errors)},
{"rx_descriptor_errors", SYN_STAT(rx_descriptor_errors)},
{"rx_late_collision_errors", SYN_STAT(rx_late_collision_errors)},
{"rx_dribble_bit_errors", SYN_STAT(rx_dribble_bit_errors)},
{"rx_length_errors", SYN_STAT(rx_length_errors)},
{"rx_ip_header_errors", SYN_STAT(rx_ip_header_errors)},
{"rx_ip_payload_errors", SYN_STAT(rx_ip_payload_errors)},
{"rx_no_buffer_errors", SYN_STAT(rx_no_buffer_errors)},
{"rx_transport_csum_bypassed", SYN_STAT(rx_transport_csum_bypassed)},
{"tx_bytes", SYN_STAT(tx_bytes)},
{"tx_packets", SYN_STAT(tx_packets)},
{"tx_collisions", SYN_STAT(tx_collisions)},
{"tx_errors", SYN_STAT(tx_errors)},
{"tx_jabber_timeout_errors", SYN_STAT(tx_jabber_timeout_errors)},
{"tx_frame_flushed_errors", SYN_STAT(tx_frame_flushed_errors)},
{"tx_loss_of_carrier_errors", SYN_STAT(tx_loss_of_carrier_errors)},
{"tx_no_carrier_errors", SYN_STAT(tx_no_carrier_errors)},
{"tx_late_collision_errors", SYN_STAT(tx_late_collision_errors)},
{"tx_excessive_collision_errors", SYN_STAT(tx_excessive_collision_errors)},
{"tx_excessive_deferral_errors", SYN_STAT(tx_excessive_deferral_errors)},
{"tx_underflow_errors", SYN_STAT(tx_underflow_errors)},
{"tx_ip_header_errors", SYN_STAT(tx_ip_header_errors)},
{"tx_ip_payload_errors", SYN_STAT(tx_ip_payload_errors)},
{"tx_dropped", SYN_STAT(tx_dropped)},
{"rx_missed", SYN_STAT(rx_missed)},
{"fifo_overflows", SYN_STAT(fifo_overflows)},
{"rx_scatter_errors", SYN_STAT(rx_scatter_errors)},
{"tx_ts_create_errors", SYN_STAT(tx_ts_create_errors)},
{"pmt_interrupts", SYN_STAT(hw_errs[0])},
{"mmc_interrupts", SYN_STAT(hw_errs[0]) + (1 * HW_ERR_SIZE)},
{"line_interface_interrupts", SYN_STAT(hw_errs[0]) + (2 * HW_ERR_SIZE)},
{"fatal_bus_error_interrupts", SYN_STAT(hw_errs[0]) + (3 * HW_ERR_SIZE)},
{"rx_buffer_unavailable_interrupts", SYN_STAT(hw_errs[0]) + (4 * HW_ERR_SIZE)},
{"rx_process_stopped_interrupts", SYN_STAT(hw_errs[0]) + (5 * HW_ERR_SIZE)},
{"tx_underflow_interrupts", SYN_STAT(hw_errs[0]) + (6 * HW_ERR_SIZE)},
{"rx_overflow_interrupts", SYN_STAT(hw_errs[0]) + (7 * HW_ERR_SIZE)},
{"tx_jabber_timeout_interrutps", SYN_STAT(hw_errs[0]) + (8 * HW_ERR_SIZE)},
{"tx_process_stopped_interrutps", SYN_STAT(hw_errs[0]) + (9 * HW_ERR_SIZE)},
{"gmac_total_ticks", SYN_STAT(gmac_total_ticks)},
{"gmac_worst_case_ticks", SYN_STAT(gmac_worst_case_ticks)},
{"gmac_iterations", SYN_STAT(gmac_iterations)},
{"tx_pause_frames", SYN_STAT(tx_pause_frames)},
{"mmc_rx_overflow_errors", SYN_STAT(mmc_rx_overflow_errors)},
{"mmc_rx_watchdog_timeout_errors", SYN_STAT(mmc_rx_watchdog_timeout_errors)},
{"mmc_rx_crc_errors", SYN_STAT(mmc_rx_crc_errors)},
{"mmc_rx_ip_header_errors", SYN_STAT(mmc_rx_ip_header_errors)},
{"mmc_rx_octets_g", SYN_STAT(mmc_rx_octets_g)},
{"mmc_rx_ucast_frames", SYN_STAT(mmc_rx_ucast_frames)},
{"mmc_rx_bcast_frames", SYN_STAT(mmc_rx_bcast_frames)},
{"mmc_rx_mcast_frames", SYN_STAT(mmc_rx_mcast_frames)},
{"mmc_rx_undersize", SYN_STAT(mmc_rx_undersize)},
{"mmc_rx_oversize", SYN_STAT(mmc_rx_oversize)},
{"mmc_rx_jabber", SYN_STAT(mmc_rx_jabber)},
{"mmc_rx_octets_gb", SYN_STAT(mmc_rx_octets_gb)},
{"mmc_rx_frag_frames_g", SYN_STAT(mmc_rx_frag_frames_g)},
{"mmc_tx_octets_g", SYN_STAT(mmc_tx_octets_g)},
{"mmc_tx_ucast_frames", SYN_STAT(mmc_tx_ucast_frames)},
{"mmc_tx_bcast_frames", SYN_STAT(mmc_tx_bcast_frames)},
{"mmc_tx_mcast_frames", SYN_STAT(mmc_tx_mcast_frames)},
{"mmc_tx_deferred", SYN_STAT(mmc_tx_deferred)},
{"mmc_tx_single_col", SYN_STAT(mmc_tx_single_col)},
{"mmc_tx_multiple_col", SYN_STAT(mmc_tx_multiple_col)},
{"mmc_tx_octets_gb", SYN_STAT(mmc_tx_octets_gb)},
};
#define SYN_STATS_LEN ARRAY_SIZE(syn_gstrings_stats)
/*
* syn_set_rx_flow_ctrl()
*/
static inline void syn_set_rx_flow_ctrl(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, SYN_MAC_FLOW_CONTROL,
SYN_MAC_FC_RX_FLOW_CONTROL);
}
/*
* syn_clear_rx_flow_ctrl()
*/
static inline void syn_clear_rx_flow_ctrl(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, SYN_MAC_FLOW_CONTROL,
SYN_MAC_FC_RX_FLOW_CONTROL);
}
/*
* syn_set_tx_flow_ctrl()
*/
static inline void syn_set_tx_flow_ctrl(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, SYN_MAC_FLOW_CONTROL,
SYN_MAC_FC_TX_FLOW_CONTROL);
}
/*
* syn_send_tx_pause_frame()
*/
static inline void syn_send_tx_pause_frame(struct nss_gmac_hal_dev *nghd)
{
syn_set_tx_flow_ctrl(nghd);
hal_set_reg_bits(nghd, SYN_MAC_FLOW_CONTROL,
SYN_MAC_FC_SEND_PAUSE_FRAME);
}
/*
* syn_clear_tx_flow_ctrl()
*/
static inline void syn_clear_tx_flow_ctrl(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, SYN_MAC_FLOW_CONTROL,
SYN_MAC_FC_TX_FLOW_CONTROL);
}
/*
* syn_rx_enable()
*/
static inline void syn_rx_enable(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, SYN_MAC_CONFIGURATION, SYN_MAC_RX);
hal_set_reg_bits(nghd, SYN_MAC_FRAME_FILTER, SYN_MAC_FILTER_OFF);
}
/*
* syn_tx_enable()
*/
static inline void syn_tx_enable(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, SYN_MAC_CONFIGURATION, SYN_MAC_TX);
}
/************Ip checksum offloading APIs*************/
/*
* syn_enable_rx_chksum_offload()
* Enable IPv4 header and IPv4/IPv6 TCP/UDP checksum calculation by GMAC.
*/
static inline void syn_enable_rx_chksum_offload(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd,
SYN_MAC_CONFIGURATION, SYN_MAC_RX_IPC_OFFLOAD);
}
/*
* syn_disable_rx_chksum_offload()
* Disable the IP checksum offloading in receive path.
*/
static inline void syn_disable_rx_chksum_offload(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd,
SYN_MAC_CONFIGURATION, SYN_MAC_RX_IPC_OFFLOAD);
}
/*
* syn_rx_tcpip_chksum_drop_enable()
* Instruct the DMA to drop the packets that fail TCP/IP checksum.
*
* This is to instruct the receive DMA engine to drop the recevied
* packet if they fails the tcp/ip checksum in hardware. Valid only when
* full checksum offloading is enabled(type-2).
*/
static inline void syn_rx_tcpip_chksum_drop_enable(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd,
SYN_DMA_OPERATION_MODE, SYN_DMA_DISABLE_DROP_TCP_CS);
}
/*******************Ip checksum offloading APIs**********************/
/*
* syn_ipc_offload_init()
* Initialize IPC Checksum offloading.
*/
static inline void syn_ipc_offload_init(struct nss_gmac_hal_dev *nghd)
{
struct nss_dp_dev *dp_priv;
dp_priv = netdev_priv(nghd->netdev);
if (test_bit(__NSS_DP_RXCSUM, &dp_priv->flags)) {
/*
* Enable the offload engine in the receive path
*/
syn_enable_rx_chksum_offload(nghd);
/*
* DMA drops the packets if error in encapsulated ethernet
* payload.
*/
syn_rx_tcpip_chksum_drop_enable(nghd);
netdev_dbg(nghd->netdev, "%s: enable Rx checksum\n", __func__);
} else {
syn_disable_rx_chksum_offload(nghd);
netdev_dbg(nghd->netdev, "%s: disable Rx checksum\n", __func__);
}
}
/*
* syn_disable_mac_interrupt()
* Disable all the interrupts.
*/
static inline void syn_disable_mac_interrupt(struct nss_gmac_hal_dev *nghd)
{
hal_write_reg(nghd->mac_base, SYN_INTERRUPT_MASK, 0xffffffff);
}
/*
* syn_disable_mmc_tx_interrupt()
* Disable the MMC Tx interrupt.
*
* The MMC tx interrupts are masked out as per the mask specified.
*/
static inline void syn_disable_mmc_tx_interrupt(struct nss_gmac_hal_dev *nghd,
uint32_t mask)
{
hal_set_reg_bits(nghd, SYN_MMC_TX_INTERRUPT_MASK, mask);
}
/*
* syn_disable_mmc_rx_interrupt()
* Disable the MMC Rx interrupt.
*
* The MMC rx interrupts are masked out as per the mask specified.
*/
static inline void syn_disable_mmc_rx_interrupt(struct nss_gmac_hal_dev *nghd,
uint32_t mask)
{
hal_set_reg_bits(nghd, SYN_MMC_RX_INTERRUPT_MASK, mask);
}
/*
* syn_disable_mmc_ipc_rx_interrupt()
* Disable the MMC ipc rx checksum offload interrupt.
*
* The MMC ipc rx checksum offload interrupts are masked out as
* per the mask specified.
*/
static inline void syn_disable_mmc_ipc_rx_interrupt(struct nss_gmac_hal_dev *nghd,
uint32_t mask)
{
hal_set_reg_bits(nghd, SYN_MMC_IPC_RX_INTR_MASK, mask);
}
/*
* syn_disable_dma_interrupt()
* Disables all DMA interrupts.
*/
void syn_disable_dma_interrupt(struct nss_gmac_hal_dev *nghd)
{
hal_write_reg(nghd->mac_base, SYN_DMA_INT_ENABLE, SYN_DMA_INT_DISABLE);
}
/*
* syn_enable_dma_interrupt()
* Enables all DMA interrupts.
*/
void syn_enable_dma_interrupt(struct nss_gmac_hal_dev *nghd)
{
hal_write_reg(nghd->mac_base, SYN_DMA_INT_ENABLE, SYN_DMA_INT_EN);
}
/*
* syn_disable_interrupt_all()
* Disable all the interrupts.
*/
static inline void syn_disable_interrupt_all(struct nss_gmac_hal_dev *nghd)
{
syn_disable_mac_interrupt(nghd);
syn_disable_dma_interrupt(nghd);
syn_disable_mmc_tx_interrupt(nghd, 0xFFFFFFFF);
syn_disable_mmc_rx_interrupt(nghd, 0xFFFFFFFF);
syn_disable_mmc_ipc_rx_interrupt(nghd, 0xFFFFFFFF);
}
/*
* syn_dma_bus_mode_init()
* Function to program DMA bus mode register.
*/
static inline void syn_dma_bus_mode_init(struct nss_gmac_hal_dev *nghd)
{
hal_write_reg(nghd->mac_base, SYN_DMA_BUS_MODE, SYN_DMA_BUS_MODE_VAL);
}
/*
* syn_clear_dma_status()
* Clear all the pending dma interrupts.
*/
void syn_clear_dma_status(struct nss_gmac_hal_dev *nghd)
{
uint32_t data;
data = hal_read_reg(nghd->mac_base, SYN_DMA_STATUS);
hal_write_reg(nghd->mac_base, SYN_DMA_STATUS, data);
}
/*
* syn_enable_dma_rx()
* Enable Rx GMAC operation
*/
void syn_enable_dma_rx(struct nss_gmac_hal_dev *nghd)
{
uint32_t data;
data = hal_read_reg(nghd->mac_base, SYN_DMA_OPERATION_MODE);
data |= SYN_DMA_RX_START;
hal_write_reg(nghd->mac_base, SYN_DMA_OPERATION_MODE, data);
}
/*
* syn_disable_dma_rx()
* Disable Rx GMAC operation
*/
void syn_disable_dma_rx(struct nss_gmac_hal_dev *nghd)
{
uint32_t data;
data = hal_read_reg(nghd->mac_base, SYN_DMA_OPERATION_MODE);
data &= ~SYN_DMA_RX_START;
hal_write_reg(nghd->mac_base, SYN_DMA_OPERATION_MODE, data);
}
/*
* syn_enable_dma_tx()
* Enable Rx GMAC operation
*/
void syn_enable_dma_tx(struct nss_gmac_hal_dev *nghd)
{
uint32_t data;
data = hal_read_reg(nghd->mac_base, SYN_DMA_OPERATION_MODE);
data |= SYN_DMA_TX_START;
hal_write_reg(nghd->mac_base, SYN_DMA_OPERATION_MODE, data);
}
/*
* syn_disable_dma_tx()
* Disable Rx GMAC operation
*/
void syn_disable_dma_tx(struct nss_gmac_hal_dev *nghd)
{
uint32_t data;
data = hal_read_reg(nghd->mac_base, SYN_DMA_OPERATION_MODE);
data &= ~SYN_DMA_TX_START;
hal_write_reg(nghd->mac_base, SYN_DMA_OPERATION_MODE, data);
}
/*
* syn_resume_dma_tx
* Resumes the DMA Transmission.
*/
void syn_resume_dma_tx(struct nss_gmac_hal_dev *nghd)
{
hal_write_reg(nghd->mac_base, SYN_DMA_TX_POLL_DEMAND, 0);
}
/*
* syn_get_rx_missed
* Get Rx missed errors
*/
uint32_t syn_get_rx_missed(struct nss_gmac_hal_dev *nghd)
{
uint32_t missed_frame_buff_overflow;
missed_frame_buff_overflow = hal_read_reg(nghd->mac_base, SYN_DMA_MISSED_FRAME_AND_BUFF_OVERFLOW_COUNTER);
return missed_frame_buff_overflow & 0xFFFF;
}
/*
* syn_get_fifo_overflows
* Get FIFO overflows
*/
uint32_t syn_get_fifo_overflows(struct nss_gmac_hal_dev *nghd)
{
uint32_t missed_frame_buff_overflow;
missed_frame_buff_overflow = hal_read_reg(nghd->mac_base, SYN_DMA_MISSED_FRAME_AND_BUFF_OVERFLOW_COUNTER);
return (missed_frame_buff_overflow >> 17) & 0x7ff;
}
/*
* syn_init_tx_desc_base()
* Programs the Dma Tx Base address with the starting address of the descriptor ring or chain.
*/
void syn_init_tx_desc_base(struct nss_gmac_hal_dev *nghd, uint32_t tx_desc_dma)
{
hal_write_reg(nghd->mac_base, SYN_DMA_TX_DESCRIPTOR_LIST_ADDRESS, tx_desc_dma);
}
/*
* syn_init_rx_desc_base()
* Programs the Dma Rx Base address with the starting address of the descriptor ring or chain.
*/
void syn_init_rx_desc_base(struct nss_gmac_hal_dev *nghd, uint32_t rx_desc_dma)
{
hal_write_reg(nghd->mac_base, SYN_DMA_RX_DESCRIPTOR_LIST_ADDRESS, rx_desc_dma);
}
/*
* syn_dma_axi_bus_mode_init()
* Function to program DMA AXI bus mode register.
*/
static inline void syn_dma_axi_bus_mode_init(struct nss_gmac_hal_dev *nghd)
{
hal_write_reg(nghd->mac_base, SYN_DMA_AXI_BUS_MODE,
SYN_DMA_AXI_BUS_MODE_VAL);
}
/*
* syn_dma_operation_mode_init()
* Function to program DMA Operation Mode register.
*/
static inline void syn_dma_operation_mode_init(struct nss_gmac_hal_dev *nghd)
{
hal_write_reg(nghd->mac_base, SYN_DMA_OPERATION_MODE, SYN_DMA_OMR);
}
/*
* syn_broadcast_enable()
* Enables Broadcast frames.
*
* When enabled Address filtering module passes all incoming broadcast frames.
*/
static inline void syn_broadcast_enable(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, SYN_MAC_FRAME_FILTER, SYN_MAC_BROADCAST);
}
/*
* syn_multicast_enable()
* Enables Multicast frames.
*
* When enabled all multicast frames are passed.
*/
static inline void syn_multicast_enable(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, SYN_MAC_FRAME_FILTER, SYN_MAC_MULTICAST_FILTER);
}
/*
* syn_promisc_enable()
* Enables promiscous mode.
*
* When enabled Address filter modules pass all incoming frames
* regardless of their Destination and source addresses.
*/
static inline void syn_promisc_enable(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, SYN_MAC_FRAME_FILTER, SYN_MAC_FILTER_OFF);
hal_set_reg_bits(nghd, SYN_MAC_FRAME_FILTER,
SYN_MAC_PROMISCUOUS_MODE_ON);
}
/*
* syn_get_stats()
*/
static int syn_get_stats(struct nss_gmac_hal_dev *nghd)
{
struct nss_dp_dev *dp_priv;
struct syn_hal_dev *shd;
struct nss_dp_gmac_stats *stats;
BUG_ON(nghd == NULL);
shd = (struct syn_hal_dev *)nghd;
stats = &(shd->stats);
dp_priv = netdev_priv(nghd->netdev);
if (!dp_priv->data_plane_ops)
return -1;
dp_priv->data_plane_ops->get_stats(dp_priv->dpc, stats);
return 0;
}
/*
* syn_rx_flow_control()
*/
static void syn_rx_flow_control(struct nss_gmac_hal_dev *nghd,
bool enabled)
{
BUG_ON(nghd == NULL);
if (enabled)
syn_set_rx_flow_ctrl(nghd);
else
syn_clear_rx_flow_ctrl(nghd);
}
/*
* syn_tx_flow_control()
*/
static void syn_tx_flow_control(struct nss_gmac_hal_dev *nghd,
bool enabled)
{
BUG_ON(nghd == NULL);
if (enabled)
syn_set_tx_flow_ctrl(nghd);
else
syn_clear_tx_flow_ctrl(nghd);
}
/*
* syn_get_max_frame_size()
*/
static int32_t syn_get_max_frame_size(struct nss_gmac_hal_dev *nghd)
{
int ret;
uint32_t mtu;
BUG_ON(nghd == NULL);
ret = fal_port_max_frame_size_get(0, nghd->mac_id, &mtu);
if (!ret)
return mtu;
return ret;
}
/*
* syn_set_max_frame_size()
*/
static int32_t syn_set_max_frame_size(struct nss_gmac_hal_dev *nghd,
uint32_t val)
{
BUG_ON(nghd == NULL);
return fal_port_max_frame_size_set(0, nghd->mac_id, val);
}
/*
* syn_set_mac_speed()
*/
static int32_t syn_set_mac_speed(struct nss_gmac_hal_dev *nghd,
uint32_t mac_speed)
{
struct net_device *netdev;
BUG_ON(nghd == NULL);
netdev = nghd->netdev;
netdev_warn(netdev, "API deprecated\n");
return 0;
}
/*
* syn_get_mac_speed()
*/
static uint32_t syn_get_mac_speed(struct nss_gmac_hal_dev *nghd)
{
struct net_device *netdev;
BUG_ON(nghd == NULL);
netdev = nghd->netdev;
netdev_warn(netdev, "API deprecated\n");
return 0;
}
/*
* syn_set_duplex_mode()
*/
static void syn_set_duplex_mode(struct nss_gmac_hal_dev *nghd,
uint8_t duplex_mode)
{
struct net_device *netdev;
BUG_ON(nghd == NULL);
netdev = nghd->netdev;
netdev_warn(netdev, "API deprecated\n");
}
/*
* syn_get_duplex_mode()
*/
static uint8_t syn_get_duplex_mode(struct nss_gmac_hal_dev *nghd)
{
struct net_device *netdev;
BUG_ON(nghd == NULL);
netdev = nghd->netdev;
netdev_warn(netdev, "API deprecated\n");
return 0;
}
/*
* syn_get_netdev_stats()
*/
static int syn_get_netdev_stats(struct nss_gmac_hal_dev *nghd,
struct rtnl_link_stats64 *stats)
{
struct syn_hal_dev *shd;
struct nss_dp_hal_gmac_stats *ndo_stats;
BUG_ON(nghd == NULL);
shd = (struct syn_hal_dev *)nghd;
ndo_stats = &(shd->stats.stats);
/*
* Read stats from the registered dataplane.
*/
if (syn_get_stats(nghd))
return -1;
stats->rx_packets = ndo_stats->rx_packets;
stats->rx_bytes = ndo_stats->rx_bytes;
stats->rx_errors = ndo_stats->rx_errors;
stats->rx_dropped = ndo_stats->rx_errors;
stats->rx_length_errors = ndo_stats->rx_length_errors;
stats->rx_over_errors = ndo_stats->mmc_rx_overflow_errors;
stats->rx_crc_errors = ndo_stats->mmc_rx_crc_errors;
stats->rx_frame_errors = ndo_stats->rx_dribble_bit_errors;
stats->rx_fifo_errors = ndo_stats->fifo_overflows;
stats->rx_missed_errors = ndo_stats->rx_missed;
stats->collisions = ndo_stats->tx_collisions + ndo_stats->rx_late_collision_errors;
stats->tx_packets = ndo_stats->tx_packets;
stats->tx_bytes = ndo_stats->tx_bytes;
stats->tx_errors = ndo_stats->tx_errors;
stats->tx_dropped = ndo_stats->tx_dropped;
stats->tx_carrier_errors = ndo_stats->tx_loss_of_carrier_errors + ndo_stats->tx_no_carrier_errors;
stats->tx_fifo_errors = ndo_stats->tx_underflow_errors;
stats->tx_window_errors = ndo_stats->tx_late_collision_errors;
return 0;
}
/*
* syn_get_eth_stats()
*/
static int32_t syn_get_eth_stats(struct nss_gmac_hal_dev *nghd,
uint64_t *data)
{
struct syn_hal_dev *shd;
struct nss_dp_gmac_stats *stats;
uint8_t *p = NULL;
int i;
BUG_ON(nghd == NULL);
shd = (struct syn_hal_dev *)nghd;
stats = &(shd->stats);
/*
* Read stats from the registered dataplane.
*/
if (syn_get_stats(nghd))
return -1;
for (i = 0; i < SYN_STATS_LEN; i++) {
p = ((uint8_t *)(stats) +
syn_gstrings_stats[i].stat_offset);
data[i] = *(uint32_t *)p;
}
return 0;
}
/*
* syn_get_strset_count()
*/
static int32_t syn_get_strset_count(struct nss_gmac_hal_dev *nghd,
int32_t sset)
{
struct net_device *netdev;
BUG_ON(nghd == NULL);
netdev = nghd->netdev;
switch (sset) {
case ETH_SS_STATS:
return SYN_STATS_LEN;
}
netdev_dbg(netdev, "%s: Invalid string set\n", __func__);
return -EPERM;
}
/*
* syn_get_strings()
*/
static int32_t syn_get_strings(struct nss_gmac_hal_dev *nghd,
int32_t stringset, uint8_t *data)
{
struct net_device *netdev;
int i;
BUG_ON(nghd == NULL);
netdev = nghd->netdev;
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < SYN_STATS_LEN; i++) {
memcpy(data, syn_gstrings_stats[i].stat_string,
ETH_GSTRING_LEN);
data += ETH_GSTRING_LEN;
}
break;
default:
netdev_dbg(netdev, "%s: Invalid string set\n", __func__);
return -EPERM;
}
return 0;
}
/*
* syn_send_pause_frame()
*/
static void syn_send_pause_frame(struct nss_gmac_hal_dev *nghd)
{
BUG_ON(nghd == NULL);
syn_send_tx_pause_frame(nghd);
}
/*
* syn_set_mac_address()
*/
static void syn_set_mac_address(struct nss_gmac_hal_dev *nghd,
uint8_t *macaddr)
{
uint32_t data;
BUG_ON(nghd == NULL);
if (!macaddr) {
netdev_warn(nghd->netdev, "macaddr is not valid.\n");
return;
}
data = (macaddr[5] << 8) | macaddr[4] | SYN_MAC_ADDR_HIGH_AE;
hal_write_reg(nghd->mac_base, SYN_MAC_ADDR0_HIGH, data);
data = (macaddr[3] << 24) | (macaddr[2] << 16) | (macaddr[1] << 8)
| macaddr[0];
hal_write_reg(nghd->mac_base, SYN_MAC_ADDR0_LOW, data);
}
/*
* syn_get_mac_address()
*/
static void syn_get_mac_address(struct nss_gmac_hal_dev *nghd,
uint8_t *macaddr)
{
uint32_t data;
BUG_ON(nghd == NULL);
if (!macaddr) {
netdev_warn(nghd->netdev, "macaddr is not valid.\n");
return;
}
data = hal_read_reg(nghd->mac_base, SYN_MAC_ADDR0_HIGH);
macaddr[5] = (data >> 8) & 0xff;
macaddr[4] = (data) & 0xff;
data = hal_read_reg(nghd->mac_base, SYN_MAC_ADDR0_LOW);
macaddr[3] = (data >> 24) & 0xff;
macaddr[2] = (data >> 16) & 0xff;
macaddr[1] = (data >> 8) & 0xff;
macaddr[0] = (data) & 0xff;
}
/*
* syn_dma_init()
* Initialize settings for GMAC DMA and AXI bus.
*/
static void syn_dma_init(struct nss_gmac_hal_dev *nghd)
{
struct net_device *ndev = nghd->netdev;
struct nss_dp_dev *dp_priv = netdev_priv(ndev);
/*
* Enable SoC specific GMAC clocks.
*/
nss_dp_hal_clk_enable(dp_priv);
/*
* Configure DMA registers.
*/
syn_dma_bus_mode_init(nghd);
syn_dma_axi_bus_mode_init(nghd);
syn_dma_operation_mode_init(nghd);
}
/*
* syn_init()
*/
static void *syn_init(struct gmac_hal_platform_data *gmacpdata)
{
struct syn_hal_dev *shd = NULL;
struct net_device *ndev = NULL;
struct nss_dp_dev *dp_priv = NULL;
struct resource *res;
ndev = gmacpdata->netdev;
dp_priv = netdev_priv(ndev);
res = platform_get_resource(dp_priv->pdev, IORESOURCE_MEM, 0);
if (!res) {
netdev_dbg(ndev, "Resource get failed.\n");
return NULL;
}
shd = (struct syn_hal_dev *)devm_kzalloc(&dp_priv->pdev->dev,
sizeof(struct syn_hal_dev),
GFP_KERNEL);
if (!shd) {
netdev_dbg(ndev, "kzalloc failed. Returning...\n");
return NULL;
}
shd->nghd.mac_reg_len = resource_size(res);
shd->nghd.memres = devm_request_mem_region(&dp_priv->pdev->dev,
res->start,
resource_size(res),
ndev->name);
if (!shd->nghd.memres) {
netdev_dbg(ndev, "Request mem region failed. Returning...\n");
devm_kfree(&dp_priv->pdev->dev, shd);
return NULL;
}
/*
* Save netdev context in syn HAL context
*/
shd->nghd.netdev = gmacpdata->netdev;
shd->nghd.mac_id = gmacpdata->macid;
shd->nghd.duplex_mode = DUPLEX_FULL;
set_bit(__NSS_DP_RXCSUM, &dp_priv->flags);
/*
* Populate the mac base addresses
*/
shd->nghd.mac_base =
devm_ioremap_nocache(&dp_priv->pdev->dev, res->start,
resource_size(res));
if (!shd->nghd.mac_base) {
netdev_dbg(ndev, "ioremap fail.\n");
devm_kfree(&dp_priv->pdev->dev, shd);
return NULL;
}
spin_lock_init(&shd->nghd.slock);
netdev_dbg(ndev, "ioremap OK.Size 0x%x Ndev base 0x%lx macbase 0x%px\n",
gmacpdata->reg_len,
ndev->base_addr,
shd->nghd.mac_base);
syn_disable_interrupt_all(&shd->nghd);
syn_dma_init(&shd->nghd);
syn_ipc_offload_init(&shd->nghd);
syn_promisc_enable(&shd->nghd);
syn_broadcast_enable(&shd->nghd);
syn_multicast_enable(&shd->nghd);
syn_rx_enable(&shd->nghd);
syn_tx_enable(&shd->nghd);
/*
* Reset MIB Stats
*/
if (fal_mib_port_flush_counters(0, shd->nghd.mac_id)) {
netdev_dbg(ndev, "MIB stats Reset fail.\n");
}
return (struct nss_gmac_hal_dev *)shd;
}
/*
* syn_exit()
*/
static void syn_exit(struct nss_gmac_hal_dev *nghd)
{
struct nss_dp_dev *dp_priv = NULL;
dp_priv = netdev_priv(nghd->netdev);
devm_iounmap(&dp_priv->pdev->dev,
(void *)nghd->mac_base);
devm_release_mem_region(&dp_priv->pdev->dev,
(nghd->memres)->start,
nghd->mac_reg_len);
nghd->memres = NULL;
nghd->mac_base = NULL;
}
struct nss_gmac_hal_ops syn_hal_ops = {
.init = &syn_init,
.start = NULL,
.stop = NULL,
.exit = &syn_exit,
.setmacaddr = &syn_set_mac_address,
.getmacaddr = &syn_get_mac_address,
.rxflowcontrol = &syn_rx_flow_control,
.txflowcontrol = &syn_tx_flow_control,
.setspeed = &syn_set_mac_speed,
.getspeed = &syn_get_mac_speed,
.setduplex = &syn_set_duplex_mode,
.getduplex = &syn_get_duplex_mode,
.setmaxframe = &syn_set_max_frame_size,
.getmaxframe = &syn_get_max_frame_size,
.getndostats = &syn_get_netdev_stats,
.getssetcount = &syn_get_strset_count,
.getstrings = &syn_get_strings,
.getethtoolstats = &syn_get_eth_stats,
.sendpause = &syn_send_pause_frame,
};

View File

@@ -1,531 +0,0 @@
/*
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef __SYN_REG_H__
#define __SYN_REG_H__
/*
* MAC register offset
*/
#define SYN_MAC_CONFIGURATION 0x0000
#define SYN_MAC_FRAME_FILTER 0x0004
#define SYN_MAC_FLOW_CONTROL 0x0018
#define SYN_VLAN_TAG 0x001C
#define SYN_VERSION 0x0020
#define SYN_DEBUG 0x0024
#define SYN_REMOTE_WAKE_UP_FRAME_FILTER 0x0028
#define SYN_PMT_CONTROL_STATUS 0x002C
#define SYN_LPI_CONTROL_STATUS 0x0030
#define SYN_LPI_TIMERS_CONTROL 0x0034
#define SYN_INTERRUPT_STATUS 0x0038
#define SYN_INTERRUPT_MASK 0x003C
/*
* MAC address register offset
*/
#define SYN_MAC_ADDR0_HIGH 0x0040
#define SYN_MAC_ADDR0_LOW 0x0044
#define SYN_MAC_ADDR1_HIGH 0x0048
#define SYN_MAC_ADDR1_LOW 0x004C
#define SYN_MAC_ADDR2_HIGH 0x0050
#define SYN_MAC_ADDR2_LOW 0x0054
#define SYN_MAC_ADDR3_HIGH 0x0058
#define SYN_MAC_ADDR3_LOW 0x005C
#define SYN_MAC_ADDR4_HIGH 0x0060
#define SYN_MAC_ADDR4_LOW 0x0064
/*
* Watchdog timeout register
*/
#define SYN_WDOG_TIMEOUT 0x00DC
/*
* Mac Management Counters (MMC) register offset
*/
#define SYN_MMC_CONTROL 0x0100
#define SYN_MMC_RX_INTERRUPT 0x0104
#define SYN_MMC_TX_INTERRUPT 0x0108
#define SYN_MMC_RX_INTERRUPT_MASK 0x010C
#define SYN_MMC_TX_INTERRUPT_MASK 0x0110
#define SYN_MMC_IPC_RX_INTR_MASK 0x0200
/*
* DMA Register offset
*/
#define SYN_DMA_BUS_MODE 0x1000
#define SYN_DMA_TX_POLL_DEMAND 0x1004
#define SYN_DMA_RX_POLL_DEMAND 0x1008
#define SYN_DMA_RX_DESCRIPTOR_LIST_ADDRESS 0x100C
#define SYN_DMA_TX_DESCRIPTOR_LIST_ADDRESS 0x1010
#define SYN_DMA_STATUS 0x1014
#define SYN_DMA_OPERATION_MODE 0x1018
#define SYN_DMA_INT_ENABLE 0x101C
#define SYN_DMA_MISSED_FRAME_AND_BUFF_OVERFLOW_COUNTER 0x1020
#define SYN_DMA_RX_INTERRUPT_WATCHDOG_TIMER 0x1024
#define SYN_DMA_AXI_BUS_MODE 0x1028
#define SYN_DMA_AHB_OR_AXI_STATUS 0x102C
#define SYN_DMA_CURRENT_HOST_TX_DESCRIPTOR 0x1048
#define SYN_DMA_CURRENT_HOST_RX_DESCRIPTOR 0x104C
#define SYN_DMA_CURRENT_HOST_TX_BUFFER_ADDRESS 0x1050
#define SYN_DMA_CURRENT_HOST_RX_BUFFER_ADDRESS 0x1054
/*
* Optional HW feature register
*/
#define SYN_HW_FEATURE 0x1058
/*
* Register Bit Definitions
*/
/*
* SYN_MAC_CONFIGURATION = 0x0000, MAC config Register Layout
*/
enum syn_mac_config_reg {
SYN_MAC_TWOKPE = 0x08000000, /* Support for 2K packets */
SYN_MAC_TWOKPE_ENABLE = 0x08000000,
SYN_MAC_TWOKPE_DISABLE = 0x00000000,
SYN_MAC_CST = 0x02000000, /* (CST) CRC Stripping for Type Frames */
SYN_MAC_CST_ENABLE = 0x02000000,
SYN_MAC_CST_DISABLE = 0x02000000,
SYN_MAC_TC = 0x01000000, /* (TC) Transmit configuration */
SYN_MAC_WATCHDOG = 0x00800000,
SYN_MAC_WATCHDOG_ENABLE = 0x00000000, /* Enable watchdog timer */
SYN_MAC_WATCHDOG_DISABLE = 0x00800000, /* (WD)Disable watchdog timer on Rx */
SYN_MAC_JABBER = 0x00400000,
SYN_MAC_JABBER_ENABLE = 0x00000000, /* Enable jabber timer */
SYN_MAC_JABBER_DISABLE = 0x00400000, /* (JD)Disable jabber timer on Tx */
SYN_MAC_FRAME_BURST = 0x00200000,
SYN_MAC_FRAME_BURST_ENABLE = 0x00200000, /* (BE)Enable frame bursting
during Tx */
SYN_MAC_FRAME_BURST_DISABLE = 0x00000000, /* Disable frame bursting */
SYN_MAC_JUMBO_FRAME = 0x00100000,
SYN_MAC_JUMBO_FRAME_ENABLE = 0x00100000, /* (JE)Enable jumbo frame for Rx */
SYN_MAC_JUMBO_FRAME_DISABLE = 0x00000000, /* Disable jumbo frame */
SYN_MAC_INTER_FRAME_GAP7 = 0x000E0000, /* (IFG) Config7 - 40bit times */
SYN_MAC_INTER_FRAME_GAP6 = 0x000C0000, /* (IFG) Config6 - 48bit times */
SYN_MAC_INTER_FRAME_GAP5 = 0x000A0000, /* (IFG) Config5 - 56bit times */
SYN_MAC_INTER_FRAME_GAP4 = 0x00080000, /* (IFG) Config4 - 64bit times */
SYN_MAC_INTER_FRAME_GAP3 = 0x00060000, /* (IFG) Config3 - 72bit times */
SYN_MAC_INTER_FRAME_GAP2 = 0x00040000, /* (IFG) Config2 - 80bit times */
SYN_MAC_INTER_FRAME_GAP1 = 0x00020000, /* (IFG) Config1 - 88bit times */
SYN_MAC_INTER_FRAME_GAP0 = 0x00000000, /* (IFG) Config0 - 96bit times */
SYN_MAC_DISABLE_CRS = 0x00010000, /* (DCRS) Disable Carrier Sense During Transmission */
SYN_MAC_MII_GMII = 0x00008000,
SYN_MAC_SELECT_MII = 0x00008000, /* (PS)Port Select-MII mode */
SYN_MAC_SELECT_GMII = 0x00000000, /* GMII mode */
SYN_MAC_FE_SPEED100 = 0x00004000, /* (FES)Fast Ethernet speed 100Mbps */
SYN_MAC_FE_SPEED = 0x00004000, /* (FES)Fast Ethernet speed 100Mbps */
SYN_MAC_FE_SPEED10 = 0x00000000, /* (FES)Fast Ethernet speed 10Mbps */
SYN_MAC_RX_OWN = 0x00002000,
SYN_MAC_DISABLE_RX_OWN = 0x00002000, /* (DO)Disable receive own packets */
SYN_MAC_ENABLE_RX_OWN = 0x00000000, /* Enable receive own packets */
SYN_MAC_LOOPBACK = 0x00001000,
SYN_MAC_LOOPBACK_ON = 0x00001000, /* (LM)Loopback mode for GMII/MII */
SYN_MAC_LOOPBACK_OFF = 0x00000000, /* Normal mode */
SYN_MAC_DUPLEX = 0x00000800,
SYN_MAC_FULL_DUPLEX = 0x00000800, /* (DM)Full duplex mode */
SYN_MAC_HALF_DUPLEX = 0x00000000, /* Half duplex mode */
SYN_MAC_RX_IPC_OFFLOAD = 0x00000400, /* IPC checksum offload */
SYN_MAC_RX_IPC_OFFLOAD_ENABLE = 0x00000400,
SYN_MAC_RX_IPC_OFFLOAD_DISABLE = 0x00000000,
SYN_MAC_RETRY = 0x00000200,
SYN_MAC_RETRY_DISABLE = 0x00000200, /* (DR)Disable Retry */
SYN_MAC_RETRY_ENABLE = 0x00000000, /* Enable retransmission as per BL */
SYN_MAC_LINK_UP = 0x00000100, /* (LUD)Link UP */
SYN_MAC_LINK_DOWN = 0x00000100, /* Link Down */
SYN_MAC_PAD_CRC_STRIP = 0x00000080,
SYN_MAC_PAD_CRC_STRIP_ENABLE = 0x00000080, /* (ACS) Automatic Pad/Crc strip enable */
SYN_MAC_PAD_CRC_STRIP_DISABLE = 0x00000000, /* Automatic Pad/Crc stripping disable */
SYN_MAC_BACKOFF_LIMIT = 0x00000060,
SYN_MAC_BACKOFF_LIMIT3 = 0x00000060, /* (BL)Back-off limit in HD mode */
SYN_MAC_BACKOFF_LIMIT2 = 0x00000040,
SYN_MAC_BACKOFF_LIMIT1 = 0x00000020,
SYN_MAC_BACKOFF_LIMIT0 = 0x00000000,
SYN_MAC_DEFERRAL_CHECK = 0x00000010,
SYN_MAC_DEFERRAL_CHECK_ENABLE = 0x00000010, /* (DC)Deferral check enable in HD mode */
SYN_MAC_DEFERRAL_CHECK_DISABLE = 0x00000000, /* Deferral check disable */
SYN_MAC_TX = 0x00000008,
SYN_MAC_TX_ENABLE = 0x00000008, /* (TE)Transmitter enable */
SYN_MAC_TX_DISABLE = 0x00000000, /* Transmitter disable */
SYN_MAC_RX = 0x00000004,
SYN_MAC_RX_ENABLE = 0x00000004, /* (RE)Receiver enable */
SYN_MAC_RX_DISABLE = 0x00000000, /* Receiver disable */
SYN_MAC_PRELEN_RESERVED = 0x00000003, /* Preamble Length for Transmit Frames */
SYN_MAC_PRELEN_3B = 0x00000002,
SYN_MAC_PRELEN_5B = 0x00000001,
SYN_MAC_PRELEN_7B = 0x00000000,
};
/*
* SYN_MAC_FRAME_FILTER = 0x0004, Mac frame filtering controls Register
*/
enum syn_mac_frame_filter_reg {
SYN_MAC_FILTER = 0x80000000,
SYN_MAC_FILTER_OFF = 0x80000000, /* (RA)Receive all incoming packets */
SYN_MAC_FILTER_ON = 0x00000000, /* Receive filtered pkts only */
SYN_MAC_HASH_PERFECT_FILTER = 0x00000400, /* Hash or Perfect Filter enable */
SYN_MAC_SRC_ADDR_FILTER = 0x00000200,
SYN_MAC_SRC_ADDR_FILTER_ENABLE = 0x00000200, /* (SAF)Source Address Filter enable */
SYN_MAC_SRC_ADDR_FILTER_DISABLE = 0x00000000,
SYN_MAC_SRC_INVA_ADDR_FILTER = 0x00000100,
SYN_MAC_SRC_INV_ADDR_FILTER_EN = 0x00000100, /* (SAIF)Inv Src Addr Filter enable */
SYN_MAC_SRC_INV_ADDR_FILTER_DIS = 0x00000000,
SYN_MAC_PASS_CONTROL = 0x000000C0,
SYN_MAC_PASS_CONTROL3 = 0x000000C0, /* (PCF)Forwards ctrl frames that pass AF */
SYN_MAC_PASS_CONTROL2 = 0x00000080, /* Forwards all control frames
even if they fail the AF */
SYN_MAC_PASS_CONTROL1 = 0x00000040, /* Forwards all control frames except
PAUSE control frames to application
even if they fail the AF */
SYN_MAC_PASS_CONTROL0 = 0x00000000, /* Don't pass control frames */
SYN_MAC_BROADCAST = 0x00000020,
SYN_MAC_BROADCAST_DISABLE = 0x00000020, /* (DBF)Disable Rx of broadcast frames */
SYN_MAC_BROADCAST_ENABLE = 0x00000000, /* Enable broadcast frames */
SYN_MAC_MULTICAST_FILTER = 0x00000010,
SYN_MAC_MULTICAST_FILTER_OFF = 0x00000010, /* (PM) Pass all multicast packets */
SYN_MAC_MULTICAST_FILTER_ON = 0x00000000, /* Pass filtered multicast packets */
SYN_MAC_DEST_ADDR_FILTER = 0x00000008,
SYN_MAC_DEST_ADDR_FILTER_INV = 0x00000008, /* (DAIF)Inverse filtering for DA */
SYN_MAC_DEST_ADDR_FILTER_NOR = 0x00000000, /* Normal filtering for DA */
SYN_MAC_MCAST_HASH_FILTER = 0x00000004,
SYN_MAC_MCAST_HASH_FILTER_ON = 0x00000004, /* (HMC)perfom multicast hash filtering */
SYN_MAC_MCAST_HASH_FILTER_OFF = 0x00000000, /* perfect filtering only */
SYN_MAC_UCAST_HASH_FILTER = 0x00000002,
SYN_MAC_UCAST_HASH_FILTER_ON = 0x00000002, /* (HUC)Unicast Hash filtering only */
SYN_MAC_UCAST_HASH_FILTER_OFF = 0x00000000, /* perfect filtering only */
SYN_MAC_PROMISCUOUS_MODE = 0x00000001,
SYN_MAC_PROMISCUOUS_MODE_ON = 0x00000001, /* Receive all frames */
SYN_MAC_PROMISCUOUS_MODE_OFF = 0x00000000, /* Receive filtered packets only */
};
/*
* SYN_MAC_FLOW_CONTROL = 0x0018, Flow control Register Layout
*/
enum syn_mac_flow_control_reg {
SYN_MAC_FC_PAUSE_TIME_MASK = 0xFFFF0000, /* (PT) PAUSE TIME field
in the control frame */
SYN_MAC_FC_PAUSE_TIME_SHIFT = 16,
SYN_MAC_FC_PAUSE_LOW_THRESH = 0x00000030,
SYN_MAC_FC_PAUSE_LOW_THRESH3 = 0x00000030, /* (PLT)thresh for pause
tmr 256 slot time */
SYN_MAC_FC_PAUSE_LOW_THRESH2 = 0x00000020, /* 144 slot time */
SYN_MAC_FC_PAUSE_LOW_THRESH1 = 0x00000010, /* 28 slot time */
SYN_MAC_FC_PAUSE_LOW_THRESH0 = 0x00000000, /* 4 slot time */
SYN_MAC_FC_UNICAST_PAUSE_FRAME = 0x00000008,
SYN_MAC_FC_UNICAST_PAUSE_FRAME_ON = 0x00000008, /* (UP)Detect pause frame
with unicast addr. */
SYN_MAC_FC_UNICAST_PAUSE_FRAME_OFF = 0x00000000,/* Detect only pause frame
with multicast addr. */
SYN_MAC_FC_RX_FLOW_CONTROL = 0x00000004,
SYN_MAC_FC_RX_FLOW_CONTROL_ENABLE = 0x00000004, /* (RFE)Enable Rx flow control */
SYN_MAC_FC_RX_FLOW_CONTROL_DISABLE = 0x00000000,/* Disable Rx flow control */
SYN_MAC_FC_TX_FLOW_CONTROL = 0x00000002,
SYN_MAC_FC_TX_FLOW_CONTROL_ENABLE = 0x00000002, /* (TFE)Enable Tx flow control */
SYN_MAC_FC_TX_FLOW_CONTROL_DISABLE = 0x00000000,/* Disable flow control */
SYN_MAC_FC_FLOW_CONTROL_BACK_PRESSURE = 0x00000001,
SYN_MAC_FC_SEND_PAUSE_FRAME = 0x00000001, /* (FCB/PBA)send pause frm/Apply
back pressure */
};
/*
* SYN_MAC_ADDR_HIGH Register
*/
enum syn_mac_addr_high {
SYN_MAC_ADDR_HIGH_AE = 0x80000000,
};
/*
* SYN_DMA_BUS_MODE = 0x0000, CSR0 - Bus Mode
*/
enum syn_dma_bus_mode_reg {
SYN_DMA_FIXED_BURST_ENABLE = 0x00010000, /* (FB)Fixed Burst SINGLE, INCR4,
INCR8 or INCR16 */
SYN_DMA_FIXED_BURST_DISABLE = 0x00000000, /* SINGLE, INCR */
SYN_DMA_TX_PRIORITY_RATIO11 = 0x00000000, /* (PR)TX:RX DMA priority ratio 1:1 */
SYN_DMA_TX_PRIORITY_RATIO21 = 0x00004000, /* (PR)TX:RX DMA priority ratio 2:1 */
SYN_DMA_TX_PRIORITY_RATIO31 = 0x00008000, /* (PR)TX:RX DMA priority ratio 3:1 */
SYN_DMA_TX_PRIORITY_RATIO41 = 0x0000C000, /* (PR)TX:RX DMA priority ratio 4:1 */
SYN_DMA_ADDRESS_ALIGNED_BEATS = 0x02000000, /* Address Aligned beats */
SYN_DMA_BURST_LENGTHX8 = 0x01000000, /* When set mutiplies the PBL by 8 */
SYN_DMA_BURST_LENGTH256 = 0x01002000, /* (dma_burst_lengthx8 |
dma_burst_length32) = 256 */
SYN_DMA_BURST_LENGTH128 = 0x01001000, /* (dma_burst_lengthx8 |
dma_burst_length16) = 128 */
SYN_DMA_BURST_LENGTH64 = 0x01000800, /* (dma_burst_lengthx8 |
dma_burst_length8) = 64 */
/* (PBL) programmable burst length */
SYN_DMA_BURST_LENGTH32 = 0x00002000, /* Dma burst length = 32 */
SYN_DMA_BURST_LENGTH16 = 0x00001000, /* Dma burst length = 16 */
SYN_DMA_BURST_LENGTH8 = 0x00000800, /* Dma burst length = 8 */
SYN_DMA_BURST_LENGTH4 = 0x00000400, /* Dma burst length = 4 */
SYN_DMA_BURST_LENGTH2 = 0x00000200, /* Dma burst length = 2 */
SYN_DMA_BURST_LENGTH1 = 0x00000100, /* Dma burst length = 1 */
SYN_DMA_BURST_LENGTH0 = 0x00000000, /* Dma burst length = 0 */
SYN_DMA_DESCRIPTOR8_WORDS = 0x00000080, /* Enh Descriptor works 1=>
8 word descriptor */
SYN_DMA_DESCRIPTOR4_WORDS = 0x00000000, /* Enh Descriptor works 0=>
4 word descriptor */
SYN_DMA_DESCRIPTOR_SKIP16 = 0x00000040, /* (DSL)Descriptor skip length (no.of dwords) */
SYN_DMA_DESCRIPTOR_SKIP8 = 0x00000020, /* between two unchained descriptors */
SYN_DMA_DESCRIPTOR_SKIP4 = 0x00000010,
SYN_DMA_DESCRIPTOR_SKIP2 = 0x00000008,
SYN_DMA_DESCRIPTOR_SKIP1 = 0x00000004,
SYN_DMA_DESCRIPTOR_SKIP0 = 0x00000000,
SYN_DMA_ARBIT_RR = 0x00000000, /* (DA) DMA RR arbitration */
SYN_DMA_ARBIT_PR = 0x00000002, /* Rx has priority over Tx */
SYN_DMA_RESET_ON = 0x00000001, /* (SWR)Software Reset DMA engine */
SYN_DMA_RESET_OFF = 0x00000000,
};
/*
* SYN_DMA_STATUS = 0x0014, CSR5 - Dma status Register
*/
enum syn_dma_status_reg {
SYN_DMA_GMAC_PMT_INTR = 0x10000000, /* (GPI)Gmac subsystem interrupt */
SYN_DMA_GMAC_MMC_INTR = 0x08000000, /* (GMI)Gmac MMC subsystem interrupt */
SYN_DMA_GMAC_LINE_INTF_INTR = 0x04000000, /* Line interface interrupt */
SYN_DMA_ERROR_BIT2 = 0x02000000, /* (EB)Error bits 0-data buffer, 1-desc access */
SYN_DMA_ERROR_BIT1 = 0x01000000, /* (EB)Error bits 0-write trnsf, 1-read transfer */
SYN_DMA_ERROR_BIT0 = 0x00800000, /* (EB)Error bits 0-Rx DMA, 1-Tx DMA */
SYN_DMA_TX_STATE = 0x00700000, /* (TS)Transmit process state */
SYN_DMA_TX_STOPPED = 0x00000000, /* Stopped - Reset or Stop Tx Command issued */
SYN_DMA_TX_FETCHING = 0x00100000, /* Running - fetching the Tx descriptor */
SYN_DMA_TX_WAITING = 0x00200000, /* Running - waiting for status */
SYN_DMA_TX_READING = 0x00300000, /* Running - reading the data from host memory */
SYN_DMA_TX_SUSPENDED = 0x00600000, /* Suspended - Tx Descriptor unavailabe */
SYN_DMA_TX_CLOSING = 0x00700000, /* Running - closing Rx descriptor */
SYN_DMA_RX_STATE = 0x000E0000, /* (RS)Receive process state */
SYN_DMA_RX_STOPPED = 0x00000000, /* Stopped - Reset or Stop Rx Command issued */
SYN_DMA_RX_FETCHING = 0x00020000, /* Running - fetching the Rx descriptor */
SYN_DMA_RX_WAITING = 0x00060000, /* Running - waiting for packet */
SYN_DMA_RX_SUSPENDED = 0x00080000, /* Suspended - Rx Descriptor unavailable */
SYN_DMA_RX_CLOSING = 0x000A0000, /* Running - closing descriptor */
SYN_DMA_RX_QUEUING = 0x000E0000, /* Running - queuing the receive frame into host memory */
SYN_DMA_INT_NORMAL = 0x00010000, /* (NIS)Normal interrupt summary */
SYN_DMA_INT_ABNORMAL = 0x00008000, /* (AIS)Abnormal interrupt summary */
SYN_DMA_INT_EARLY_RX = 0x00004000, /* Early receive interrupt (Normal) */
SYN_DMA_INT_BUS_ERROR = 0x00002000, /* Fatal bus error (Abnormal) */
SYN_DMA_INT_EARLY_TX = 0x00000400, /* Early transmit interrupt (Abnormal) */
SYN_DMA_INT_RX_WDOG_TO = 0x00000200, /* Receive Watchdog Timeout (Abnormal) */
SYN_DMA_INT_RX_STOPPED = 0x00000100, /* Receive process stopped (Abnormal) */
SYN_DMA_INT_RX_NO_BUFFER = 0x00000080, /* RX buffer unavailable (Abnormal) */
SYN_DMA_INT_RX_COMPLETED = 0x00000040, /* Completion of frame RX (Normal) */
SYN_DMA_INT_TX_UNDERFLOW = 0x00000020, /* Transmit underflow (Abnormal) */
SYN_DMA_INT_RCV_OVERFLOW = 0x00000010, /* RX Buffer overflow interrupt */
SYN_DMA_INT_TX_JABBER_TO = 0x00000008, /* TX Jabber Timeout (Abnormal) */
SYN_DMA_INT_TX_NO_BUFFER = 0x00000004, /* TX buffer unavailable (Normal) */
SYN_DMA_INT_TX_STOPPED = 0x00000002, /* TX process stopped (Abnormal) */
SYN_DMA_INT_TX_COMPLETED = 0x00000001, /* Transmit completed (Normal) */
};
/*
* SYN_DMA_OPERATION_MODE = 0x0018, CSR6 - Dma Operation Mode Register
*/
enum syn_dma_operation_mode_reg {
SYN_DMA_DISABLE_DROP_TCP_CS = 0x04000000, /* (DT) Dis. drop. of tcp/ip
CS error frames */
SYN_DMA_RX_STORE_AND_FORWARD = 0x02000000, /* Rx (SF)Store and forward */
SYN_DMA_RX_FRAME_FLUSH = 0x01000000, /* Disable Receive Frame Flush*/
SYN_DMA_TX_STORE_AND_FORWARD = 0x00200000, /* Tx (SF)Store and forward */
SYN_DMA_FLUSH_TX_FIFO = 0x00100000, /* (FTF)Tx FIFO controller
is reset to default */
SYN_DMA_TX_THRESH_CTRL = 0x0001C000, /* (TTC)Controls thre Thresh of
MTL tx Fifo */
SYN_DMA_TX_THRESH_CTRL16 = 0x0001C000, /* (TTC)Controls thre Thresh of
MTL tx Fifo 16 */
SYN_DMA_TX_THRESH_CTRL24 = 0x00018000, /* (TTC)Controls thre Thresh of
MTL tx Fifo 24 */
SYN_DMA_TX_THRESH_CTRL32 = 0x00014000, /* (TTC)Controls thre Thresh of
MTL tx Fifo 32 */
SYN_DMA_TX_THRESH_CTRL40 = 0x00010000, /* (TTC)Controls thre Thresh of
MTL tx Fifo 40 */
SYN_DMA_TX_THRESH_CTRL256 = 0x0000c000, /* (TTC)Controls thre Thresh of
MTL tx Fifo 256 */
SYN_DMA_TX_THRESH_CTRL192 = 0x00008000, /* (TTC)Controls thre Thresh of
MTL tx Fifo 192 */
SYN_DMA_TX_THRESH_CTRL128 = 0x00004000, /* (TTC)Controls thre Thresh of
MTL tx Fifo 128 */
SYN_DMA_TX_THRESH_CTRL64 = 0x00000000, /* (TTC)Controls thre Thresh of
MTL tx Fifo 64 */
SYN_DMA_TX_START = 0x00002000, /* (ST)Start/Stop transmission*/
SYN_DMA_RX_FLOW_CTRL_DEACT = 0x00401800, /* (RFD)Rx flow control
deact. Threshold */
SYN_DMA_RX_FLOW_CTRL_DEACT1K = 0x00000000, /* (RFD)Rx flow control
deact. Threshold (1kbytes) */
SYN_DMA_RX_FLOW_CTRL_DEACT2K = 0x00000800, /* (RFD)Rx flow control
deact. Threshold (2kbytes) */
SYN_DMA_RX_FLOW_CTRL_DEACT3K = 0x00001000, /* (RFD)Rx flow control
deact. Threshold (3kbytes) */
SYN_DMA_RX_FLOW_CTRL_DEACT4K = 0x00001800, /* (RFD)Rx flow control
deact. Threshold (4kbytes) */
SYN_DMA_RX_FLOW_CTRL_DEACT5K = 0x00400000, /* (RFD)Rx flow control
deact. Threshold (4kbytes) */
SYN_DMA_RX_FLOW_CTRL_DEACT6K = 0x00400800, /* (RFD)Rx flow control
deact. Threshold (4kbytes) */
SYN_DMA_RX_FLOW_CTRL_DEACT7K = 0x00401000, /* (RFD)Rx flow control
deact. Threshold (4kbytes) */
SYN_DMA_RX_FLOW_CTRL_ACT = 0x00800600, /* (RFA)Rx flow control
Act. Threshold */
SYN_DMA_RX_FLOW_CTRL_ACT1K = 0x00000000, /* (RFA)Rx flow control
Act. Threshold (1kbytes) */
SYN_DMA_RX_FLOW_CTRL_ACT2K = 0x00000200, /* (RFA)Rx flow control
Act. Threshold (2kbytes) */
SYN_DMA_RX_FLOW_CTRL_ACT3K = 0x00000400, /* (RFA)Rx flow control
Act. Threshold (3kbytes) */
SYN_DMA_RX_FLOW_CTRL_ACT4K = 0x00000600, /* (RFA)Rx flow control
Act. Threshold (4kbytes) */
SYN_DMA_RX_FLOW_CTRL_ACT5K = 0x00800000, /* (RFA)Rx flow control
Act. Threshold (5kbytes) */
SYN_DMA_RX_FLOW_CTRL_ACT6K = 0x00800200, /* (RFA)Rx flow control
Act. Threshold (6kbytes) */
SYN_DMA_RX_FLOW_CTRL_ACT7K = 0x00800400, /* (RFA)Rx flow control
Act. Threshold (7kbytes) */
SYN_DMA_RX_THRESH_CTRL = 0x00000018, /* (RTC)Controls thre
Thresh of MTL rx Fifo */
SYN_DMA_RX_THRESH_CTRL64 = 0x00000000, /* (RTC)Controls thre
Thresh of MTL tx Fifo 64 */
SYN_DMA_RX_THRESH_CTRL32 = 0x00000008, /* (RTC)Controls thre
Thresh of MTL tx Fifo 32 */
SYN_DMA_RX_THRESH_CTRL96 = 0x00000010, /* (RTC)Controls thre
Thresh of MTL tx Fifo 96 */
SYN_DMA_RX_THRESH_CTRL128 = 0x00000018, /* (RTC)Controls thre
Thresh of MTL tx Fifo 128 */
SYN_DMA_EN_HW_FLOW_CTRL = 0x00000100, /* (EFC)Enable HW flow control*/
SYN_DMA_DIS_HW_FLOW_CTRL = 0x00000000, /* Disable HW flow control */
SYN_DMA_FWD_ERROR_FRAMES = 0x00000080, /* (FEF)Forward error frames */
SYN_DMA_FWD_UNDER_SZ_FRAMES = 0x00000040, /* (FUF)Forward undersize
frames */
SYN_DMA_TX_SECOND_FRAME = 0x00000004, /* (OSF)Operate on 2nd frame */
SYN_DMA_RX_START = 0x00000002, /* (SR)Start/Stop reception */
};
/*
* SYN_DMA_INT_ENABLE = 0x101C, CSR7 - Interrupt enable Register Layout
*/
enum syn_dma_interrupt_reg {
SYN_DMA_IE_NORMAL = SYN_DMA_INT_NORMAL, /* Normal interrupt enable */
SYN_DMA_IE_ABNORMAL = SYN_DMA_INT_ABNORMAL, /* Abnormal interrupt enable */
SYN_DMA_IE_EARLY_RX = SYN_DMA_INT_EARLY_RX, /* Early RX interrupt enable */
SYN_DMA_IE_BUS_ERROR = SYN_DMA_INT_BUS_ERROR, /* Fatal bus error enable */
SYN_DMA_IE_EARLY_TX = SYN_DMA_INT_EARLY_TX, /* Early TX interrupt enable */
SYN_DMA_IE_RX_WDOG_TO = SYN_DMA_INT_RX_WDOG_TO, /* RX Watchdog Timeout enable */
SYN_DMA_IE_RX_STOPPED = SYN_DMA_INT_RX_STOPPED, /* RX process stopped enable */
SYN_DMA_IE_RX_NO_BUFFER = SYN_DMA_INT_RX_NO_BUFFER,
/* Receive buffer unavailable enable */
SYN_DMA_IE_RX_COMPLETED = SYN_DMA_INT_RX_COMPLETED,
/* Completion of frame reception enable */
SYN_DMA_IE_TX_UNDERFLOW = SYN_DMA_INT_TX_UNDERFLOW,
/* TX underflow enable */
SYN_DMA_IE_RX_OVERFLOW = SYN_DMA_INT_RCV_OVERFLOW,
/* RX Buffer overflow interrupt */
SYN_DMA_IE_TX_JABBER_TO = SYN_DMA_INT_TX_JABBER_TO,
/* TX Jabber Timeout enable */
SYN_DMA_IE_TX_NO_BUFFER = SYN_DMA_INT_TX_NO_BUFFER,
/* TX buffer unavailable enable */
SYN_DMA_IE_TX_STOPPED = SYN_DMA_INT_TX_STOPPED,
/* TX process stopped enable */
SYN_DMA_IE_TX_COMPLETED = SYN_DMA_INT_TX_COMPLETED,
/* TX completed enable */
};
/*
* SYN_DMA_AXI_BUS_MODE = 0x1028
*/
enum syn_dma_axi_bus_mode_reg {
SYN_DMA_EN_LPI = 0x80000000,
SYN_DMA_LPI_XIT_FRM = 0x40000000,
SYN_DMA_WR_OSR_NUM_REQS16 = 0x00F00000,
SYN_DMA_WR_OSR_NUM_REQS8 = 0x00700000,
SYN_DMA_WR_OSR_NUM_REQS4 = 0x00300000,
SYN_DMA_WR_OSR_NUM_REQS2 = 0x00100000,
SYN_DMA_WR_OSR_NUM_REQS1 = 0x00000000,
SYN_DMA_RD_OSR_NUM_REQS16 = 0x000F0000,
SYN_DMA_RD_OSR_NUM_REQS8 = 0x00070000,
SYN_DMA_RD_OSR_NUM_REQS4 = 0x00030000,
SYN_DMA_RD_OSR_NUM_REQS2 = 0x00010000,
SYN_DMA_RD_OSR_NUM_REQS1 = 0x00000000,
SYN_DMA_ONEKBBE = 0x00002000,
SYN_DMA_AXI_AAL = 0x00001000,
SYN_DMA_AXI_BLEN256 = 0x00000080,
SYN_DMA_AXI_BLEN128 = 0x00000040,
SYN_DMA_AXI_BLEN64 = 0x00000020,
SYN_DMA_AXI_BLEN32 = 0x00000010,
SYN_DMA_AXI_BLEN16 = 0x00000008,
SYN_DMA_AXI_BLEN8 = 0x00000004,
SYN_DMA_AXI_BLEN4 = 0x00000002,
SYN_DMA_UNDEFINED = 0x00000001,
};
/*
* Values to initialize DMA registers
*/
enum syn_dma_init_values {
/*
* Interrupt groups
*/
SYN_DMA_INT_ERROR_MASK = SYN_DMA_INT_BUS_ERROR, /* Error */
SYN_DMA_INT_RX_ABN_MASK = SYN_DMA_INT_RX_NO_BUFFER, /* RX abnormal intr */
SYN_DMA_INT_RX_NORM_MASK = SYN_DMA_INT_RX_COMPLETED, /* RXnormal intr */
SYN_DMA_INT_RX_STOPPED_MASK = SYN_DMA_INT_RX_STOPPED, /* RXstopped */
SYN_DMA_INT_TX_ABN_MASK = SYN_DMA_INT_TX_UNDERFLOW, /* TX abnormal intr */
SYN_DMA_INT_TX_NORM_MASK = SYN_DMA_INT_TX_COMPLETED, /* TX normal intr */
SYN_DMA_INT_TX_STOPPED_MASK = SYN_DMA_INT_TX_STOPPED, /* TX stopped */
SYN_DMA_BUS_MODE_INIT = SYN_DMA_FIXED_BURST_ENABLE | SYN_DMA_BURST_LENGTH8
| SYN_DMA_DESCRIPTOR_SKIP2 | SYN_DMA_RESET_OFF,
SYN_DMA_BUS_MODE_VAL = SYN_DMA_BURST_LENGTH32
| SYN_DMA_BURST_LENGTHX8 | SYN_DMA_DESCRIPTOR_SKIP0
| SYN_DMA_DESCRIPTOR8_WORDS | SYN_DMA_ARBIT_PR | SYN_DMA_ADDRESS_ALIGNED_BEATS,
SYN_DMA_OMR = SYN_DMA_TX_STORE_AND_FORWARD | SYN_DMA_RX_STORE_AND_FORWARD
| SYN_DMA_RX_THRESH_CTRL128 | SYN_DMA_TX_SECOND_FRAME,
SYN_DMA_INT_EN = SYN_DMA_IE_NORMAL | SYN_DMA_IE_ABNORMAL | SYN_DMA_INT_ERROR_MASK
| SYN_DMA_INT_RX_ABN_MASK | SYN_DMA_INT_RX_NORM_MASK
| SYN_DMA_INT_RX_STOPPED_MASK | SYN_DMA_INT_TX_ABN_MASK
| SYN_DMA_INT_TX_NORM_MASK | SYN_DMA_INT_TX_STOPPED_MASK,
SYN_DMA_INT_DISABLE = 0,
SYN_DMA_AXI_BUS_MODE_VAL = SYN_DMA_AXI_BLEN16 | SYN_DMA_RD_OSR_NUM_REQS8
| SYN_DMA_WR_OSR_NUM_REQS8,
};
/*
* desc_mode
* GMAC descriptors mode
*/
enum desc_mode {
RINGMODE = 0x00000001,
CHAINMODE = 0x00000002,
};
extern void syn_disable_dma_interrupt(struct nss_gmac_hal_dev *nghd);
extern void syn_enable_dma_interrupt(struct nss_gmac_hal_dev *nghd);
extern void syn_enable_dma_rx(struct nss_gmac_hal_dev *nghd);
extern void syn_disable_dma_rx(struct nss_gmac_hal_dev *nghd);
extern void syn_enable_dma_tx(struct nss_gmac_hal_dev *nghd);
extern void syn_disable_dma_tx(struct nss_gmac_hal_dev *nghd);
extern void syn_clear_dma_status(struct nss_gmac_hal_dev *nghd);
extern void syn_resume_dma_tx(struct nss_gmac_hal_dev *nghd);
extern uint32_t syn_get_rx_missed(struct nss_gmac_hal_dev *nghd);
extern uint32_t syn_get_fifo_overflows(struct nss_gmac_hal_dev *nghd);
extern void syn_init_tx_desc_base(struct nss_gmac_hal_dev *nghd, uint32_t tx_desc_dma);
extern void syn_init_rx_desc_base(struct nss_gmac_hal_dev *nghd, uint32_t rx_desc_dma);
#endif /*__SYN_REG_H__*/

View File

@@ -1,189 +0,0 @@
/*
**************************************************************************
* Copyright (c) 2016,2020 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF0
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
* OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
**************************************************************************
*/
#ifndef __SYN_DEV_H__
#define __SYN_DEV_H__
#include "syn_reg.h"
#include <fal/fal_mib.h>
#include <fal/fal_port_ctrl.h>
/*
* Subclass for base nss_gmac_haldev
*/
struct syn_hal_dev {
struct nss_gmac_hal_dev nghd; /* Base class */
fal_xgmib_info_t stats; /* Stats structure */
};
/*
* syn_set_rx_flow_ctrl()
*/
static inline void syn_set_rx_flow_ctrl(
struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, SYN_MAC_RX_FLOW_CTL,
SYN_MAC_RX_FLOW_ENABLE);
}
/*
* syn_clear_rx_flow_ctrl()
*/
static inline void syn_clear_rx_flow_ctrl(
struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, SYN_MAC_RX_FLOW_CTL,
SYN_MAC_RX_FLOW_ENABLE);
}
/*
* syn_set_tx_flow_ctrl()
*/
static inline void syn_set_tx_flow_ctrl(
struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, SYN_MAC_Q0_TX_FLOW_CTL,
SYN_MAC_TX_FLOW_ENABLE);
}
/*
* syn_send_tx_pause_frame()
*/
static inline void syn_send_tx_pause_frame(
struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, SYN_MAC_Q0_TX_FLOW_CTL,
SYN_MAC_TX_FLOW_ENABLE);
hal_set_reg_bits(nghd, SYN_MAC_Q0_TX_FLOW_CTL,
SYN_MAC_TX_PAUSE_SEND);
}
/*
* syn_clear_tx_flow_ctrl()
*/
static inline void syn_clear_tx_flow_ctrl(
struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, SYN_MAC_Q0_TX_FLOW_CTL,
SYN_MAC_TX_FLOW_ENABLE);
}
/*
* syn_clear_mac_ctrl()
*/
static inline void syn_clear_mac_ctrl(
struct nss_gmac_hal_dev *nghd)
{
hal_write_reg(nghd->mac_base, SYN_MAC_TX_CONFIG, 0);
hal_write_reg(nghd->mac_base, SYN_MAC_RX_CONFIG, 0);
}
/*
* syn_rx_enable()
*/
static inline void syn_rx_enable(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, SYN_MAC_RX_CONFIG, SYN_MAC_RX_ENABLE);
hal_set_reg_bits(nghd, SYN_MAC_PACKET_FILTER, SYN_MAC_RX_ENABLE);
}
/*
* syn_rx_disable()
*/
static inline void syn_rx_disable(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, SYN_MAC_RX_CONFIG, SYN_MAC_RX_ENABLE);
}
/*
* syn_tx_enable()
*/
static inline void syn_tx_enable(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, SYN_MAC_TX_CONFIG, SYN_MAC_TX_ENABLE);
}
/*
* syn_tx_disable()
*/
static inline void syn_tx_disable(struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, SYN_MAC_TX_CONFIG,
SYN_MAC_TX_ENABLE);
}
/*
* syn_set_mmc_stats()
*/
static inline void syn_set_mmc_stats(struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, SYN_MAC_MMC_CTL,
SYN_MAC_MMC_RSTONRD);
}
/*
* syn_rx_jumbo_frame_enable()
*/
static inline void syn_rx_jumbo_frame_enable(
struct nss_gmac_hal_dev *nghd)
{
hal_set_reg_bits(nghd, SYN_MAC_RX_CONFIG,
SYN_MAC_JUMBO_FRAME_ENABLE);
}
/*
* syn_rx_jumbo_frame_disable()
*/
static inline void syn_rx_jumbo_frame_disable(
struct nss_gmac_hal_dev *nghd)
{
hal_clear_reg_bits(nghd, SYN_MAC_RX_CONFIG,
SYN_MAC_JUMBO_FRAME_ENABLE);
}
/*
* syn_set_full_duplex()
*/
static inline void syn_set_full_duplex(
struct nss_gmac_hal_dev *nghd)
{
/* TBD */
return;
}
/*
* syn_set_half_duplex()
*/
static inline void syn_set_half_duplex(
struct nss_gmac_hal_dev *nghd)
{
/* TBD */
return;
}
static int syn_get_stats(struct nss_gmac_hal_dev *nghd)
{
struct syn_hal_dev *shd = (struct syn_hal_dev *)nghd;
fal_xgmib_info_t *stats = &(shd->stats);
if (fal_get_xgmib_info(0, nghd->mac_id, stats))
return -1;
return 0;
}
#endif /*__SYN_DEV_H__*/

View File

@@ -1,505 +0,0 @@
/*
**************************************************************************
* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
* OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
**************************************************************************
*/
#include <linux/delay.h>
#include <linux/ethtool.h>
#include <linux/vmalloc.h>
#include <linux/spinlock.h>
#include <linux/io.h>
#include <nss_dp_hal_if.h>
#include <nss_dp_dev.h>
#include "syn_dev.h"
#define SYN_STAT(m) offsetof(fal_xgmib_info_t, m)
struct syn_ethtool_stats {
uint8_t stat_string[ETH_GSTRING_LEN];
uint64_t stat_offset;
};
/*
* Array of strings describing statistics
*/
static const struct syn_ethtool_stats syn_gstrings_stats[] = {
{"rx_frame", SYN_STAT(RxFrame)},
{"rx_bytes", SYN_STAT(RxByte)},
{"rx_bytes_g", SYN_STAT(RxByteGood)},
{"rx_broadcast", SYN_STAT(RxBroadGood)},
{"rx_multicast", SYN_STAT(RxMultiGood)},
{"rx_crc_err", SYN_STAT(RxFcsErr)},
{"rx_runt_err", SYN_STAT(RxRuntErr)},
{"rx_jabber_err", SYN_STAT(RxJabberError)},
{"rx_undersize", SYN_STAT(RxUndersizeGood)},
{"rx_oversize", SYN_STAT(RxOversizeGood)},
{"rx_pkt64", SYN_STAT(Rx64Byte)},
{"rx_pkt65to127", SYN_STAT(Rx128Byte)},
{"rx_pkt128to255", SYN_STAT(Rx256Byte)},
{"rx_pkt256to511", SYN_STAT(Rx512Byte)},
{"rx_pkt512to1023", SYN_STAT(Rx1024Byte)},
{"rx_pkt1024tomax", SYN_STAT(RxMaxByte)},
{"rx_unicast", SYN_STAT(RxUnicastGood)},
{"rx_len_err", SYN_STAT(RxLengthError)},
{"rx_outofrange_err_ctr", SYN_STAT(RxOutOfRangeError)},
{"rx_pause", SYN_STAT(RxPause)},
{"rx_fifo_overflow", SYN_STAT(RxOverFlow)},
{"rx_vlan", SYN_STAT(RxVLANFrameGoodBad)},
{"rx_wdog", SYN_STAT(RxWatchDogError)},
{"rx_lpi_usec_ctr", SYN_STAT(RxLPIUsec)},
{"rx_lpi_tran_ctr", SYN_STAT(RxLPITran)},
{"rx_drop_frame_ctr", SYN_STAT(RxDropFrameGoodBad)},
{"rx_drop_byte_ctr", SYN_STAT(RxDropByteGoodBad)},
{"tx_bytes", SYN_STAT(TxByte)},
{"tx_frame", SYN_STAT(TxFrame)},
{"tx_broadcast", SYN_STAT(TxBroadGood)},
{"tx_broadcast_gb", SYN_STAT(TxBroad)},
{"tx_multicast", SYN_STAT(TxMultiGood)},
{"tx_multicast_gb", SYN_STAT(TxMulti)},
{"tx_pkt64", SYN_STAT(Tx64Byte)},
{"tx_pkt65to127", SYN_STAT(Tx128Byte)},
{"tx_pkt128to255", SYN_STAT(Tx256Byte)},
{"tx_pkt256to511", SYN_STAT(Tx512Byte)},
{"tx_pkt512to1023", SYN_STAT(Tx1024Byte)},
{"tx_pkt1024tomax", SYN_STAT(TxMaxByte)},
{"tx_unicast", SYN_STAT(TxUnicast)},
{"tx_underflow_err", SYN_STAT(TxUnderFlowError)},
{"tx_bytes_g", SYN_STAT(TxByteGood)},
{"tx_frame_g", SYN_STAT(TxFrameGood)},
{"tx_pause", SYN_STAT(TxPause)},
{"tx_vlan", SYN_STAT(TxVLANFrameGood)},
{"tx_lpi_usec_ctr", SYN_STAT(TxLPIUsec)},
{"tx_lpi_tran_ctr", SYN_STAT(TxLPITran)},
};
/*
* Array of strings describing private flag names
*/
static const char *const syn_strings_priv_flags[] = {
"test",
};
#define SYN_STATS_LEN ARRAY_SIZE(syn_gstrings_stats)
#define SYN_PRIV_FLAGS_LEN ARRAY_SIZE(syn_strings_priv_flags)
/*
* syn_rx_flow_control()
*/
static void syn_rx_flow_control(struct nss_gmac_hal_dev *nghd,
bool enabled)
{
BUG_ON(nghd == NULL);
if (enabled)
syn_set_rx_flow_ctrl(nghd);
else
syn_clear_rx_flow_ctrl(nghd);
}
/*
* syn_tx_flow_control()
*/
static void syn_tx_flow_control(struct nss_gmac_hal_dev *nghd,
bool enabled)
{
BUG_ON(nghd == NULL);
if (enabled)
syn_set_tx_flow_ctrl(nghd);
else
syn_clear_tx_flow_ctrl(nghd);
}
/*
* syn_get_mmc_stats()
*/
static int32_t syn_get_mmc_stats(struct nss_gmac_hal_dev *nghd)
{
BUG_ON(nghd == NULL);
if (syn_get_stats(nghd))
return -1;
return 0;
}
/*
* syn_get_max_frame_size()
*/
static int32_t syn_get_max_frame_size(struct nss_gmac_hal_dev *nghd)
{
int ret;
uint32_t mtu;
ret = fal_port_max_frame_size_get(0, nghd->mac_id, &mtu);
if (!ret)
return mtu;
return ret;
}
/*
* syn_set_max_frame_size()
*/
static int32_t syn_set_max_frame_size(struct nss_gmac_hal_dev *nghd,
uint32_t val)
{
return fal_port_max_frame_size_set(0, nghd->mac_id, val);
}
/*
* syn_set_mac_speed()
*/
static int32_t syn_set_mac_speed(struct nss_gmac_hal_dev *nghd,
uint32_t mac_speed)
{
struct net_device *netdev = nghd->netdev;
netdev_warn(netdev, "API deprecated\n");
return 0;
}
/*
* syn_get_mac_speed()
*/
static uint32_t syn_get_mac_speed(struct nss_gmac_hal_dev *nghd)
{
struct net_device *netdev = nghd->netdev;
netdev_warn(netdev, "API deprecated\n");
return 0;
}
/*
* syn_set_duplex_mode()
*/
static void syn_set_duplex_mode(struct nss_gmac_hal_dev *nghd,
uint8_t duplex_mode)
{
struct net_device *netdev = nghd->netdev;
netdev_warn(netdev, "API deprecated\n");
}
/*
* syn_get_duplex_mode()
*/
static uint8_t syn_get_duplex_mode(struct nss_gmac_hal_dev *nghd)
{
struct net_device *netdev = nghd->netdev;
netdev_warn(netdev, "API deprecated\n");
return 0;
}
/*
* syn_get_netdev_stats()
*/
static int syn_get_netdev_stats(struct nss_gmac_hal_dev *nghd,
struct rtnl_link_stats64 *stats)
{
struct syn_hal_dev *shd;
fal_xgmib_info_t *hal_stats;
BUG_ON(nghd == NULL);
shd = (struct syn_hal_dev *)nghd;
hal_stats = &(shd->stats);
if (syn_get_stats(nghd))
return -1;
stats->rx_packets = hal_stats->RxUnicastGood
+ hal_stats->RxBroadGood + hal_stats->RxMultiGood;
stats->tx_packets = hal_stats->TxUnicast
+ hal_stats->TxBroadGood + hal_stats->TxMultiGood;
stats->rx_bytes = hal_stats->RxByte;
stats->tx_bytes = hal_stats->TxByte;
stats->multicast =
hal_stats->RxMultiGood;
stats->rx_dropped =
hal_stats->RxDropFrameGoodBad;
stats->rx_length_errors =
hal_stats->RxLengthError;
stats->rx_crc_errors =
hal_stats->RxFcsErr;
stats->rx_fifo_errors =
hal_stats->RxOverFlow;
return 0;
}
/*
* syn_get_eth_stats()
*/
static int32_t syn_get_eth_stats(struct nss_gmac_hal_dev *nghd,
uint64_t *data)
{
struct syn_hal_dev *shd;
fal_xgmib_info_t *stats;
uint8_t *p = NULL;
int i;
BUG_ON(nghd == NULL);
shd = (struct syn_hal_dev *)nghd;
stats = &(shd->stats);
if (syn_get_stats(nghd))
return -1;
for (i = 0; i < SYN_STATS_LEN; i++) {
p = ((uint8_t *)(stats) +
syn_gstrings_stats[i].stat_offset);
data[i] = *(uint32_t *)p;
}
return 0;
}
/*
* syn_get_strset_count()
*/
static int32_t syn_get_strset_count(struct nss_gmac_hal_dev *nghd,
int32_t sset)
{
struct net_device *netdev;
BUG_ON(nghd == NULL);
netdev = nghd->netdev;
switch (sset) {
case ETH_SS_STATS:
return SYN_STATS_LEN;
case ETH_SS_PRIV_FLAGS:
return SYN_PRIV_FLAGS_LEN;
}
netdev_dbg(netdev, "%s: Invalid string set\n", __func__);
return -EPERM;
}
/*
* syn_get_strings()
*/
static int32_t syn_get_strings(struct nss_gmac_hal_dev *nghd,
int32_t stringset, uint8_t *data)
{
struct net_device *netdev;
int i;
BUG_ON(nghd == NULL);
netdev = nghd->netdev;
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < SYN_STATS_LEN; i++) {
memcpy(data, syn_gstrings_stats[i].stat_string,
strlen(syn_gstrings_stats[i].stat_string));
data += ETH_GSTRING_LEN;
}
break;
case ETH_SS_PRIV_FLAGS:
for (i = 0; i < SYN_PRIV_FLAGS_LEN; i++) {
memcpy(data, syn_strings_priv_flags[i],
strlen(syn_strings_priv_flags[i]));
data += ETH_GSTRING_LEN;
}
break;
default:
netdev_dbg(netdev, "%s: Invalid string set\n", __func__);
return -EPERM;
}
return 0;
}
/*
* syn_send_pause_frame()
*/
static void syn_send_pause_frame(struct nss_gmac_hal_dev *nghd)
{
BUG_ON(nghd == NULL);
syn_send_tx_pause_frame(nghd);
}
/*
* syn_start
*/
static int32_t syn_start(struct nss_gmac_hal_dev *nghd)
{
BUG_ON(nghd == NULL);
syn_tx_enable(nghd);
syn_rx_enable(nghd);
syn_set_full_duplex(nghd);
if (syn_set_mac_speed(nghd, SPEED_10000))
return -1;
netdev_dbg(nghd->netdev,
"%s: mac_base:0x%px tx_enable:0x%x rx_enable:0x%x\n",
__func__,
nghd->mac_base,
hal_read_reg(nghd->mac_base,
SYN_MAC_TX_CONFIG),
hal_read_reg(nghd->mac_base,
SYN_MAC_RX_CONFIG));
return 0;
}
/*
* syn_stop
*/
static int32_t syn_stop(struct nss_gmac_hal_dev *nghd)
{
BUG_ON(nghd == NULL);
syn_tx_disable(nghd);
syn_rx_disable(nghd);
netdev_dbg(nghd->netdev, "%s: Stopping mac_base:0x%px\n", __func__,
nghd->mac_base);
return 0;
}
/*
* syn_init()
*/
static void *syn_init(struct gmac_hal_platform_data *gmacpdata)
{
struct syn_hal_dev *shd = NULL;
struct net_device *ndev = NULL;
struct nss_dp_dev *dp_priv = NULL;
struct resource *res;
ndev = gmacpdata->netdev;
dp_priv = netdev_priv(ndev);
res = platform_get_resource(dp_priv->pdev, IORESOURCE_MEM, 0);
if (!res) {
netdev_dbg(ndev, "Resource get failed.\n");
return NULL;
}
if (!devm_request_mem_region(&dp_priv->pdev->dev, res->start,
resource_size(res), ndev->name)) {
netdev_dbg(ndev, "Request mem region failed. Returning...\n");
return NULL;
}
shd = (struct syn_hal_dev *)devm_kzalloc(&dp_priv->pdev->dev,
sizeof(struct syn_hal_dev),
GFP_KERNEL);
if (!shd) {
netdev_dbg(ndev, "kzalloc failed. Returning...\n");
return NULL;
}
/* Save netdev context in syn HAL context */
shd->nghd.netdev = gmacpdata->netdev;
shd->nghd.mac_id = gmacpdata->macid;
/* Populate the mac base addresses */
shd->nghd.mac_base =
devm_ioremap_nocache(&dp_priv->pdev->dev, res->start,
resource_size(res));
if (!shd->nghd.mac_base) {
netdev_dbg(ndev, "ioremap fail.\n");
return NULL;
}
spin_lock_init(&shd->nghd.slock);
netdev_dbg(ndev, "ioremap OK.Size 0x%x Ndev base 0x%lx macbase 0x%px\n",
gmacpdata->reg_len,
ndev->base_addr,
shd->nghd.mac_base);
/* Reset MIB Stats */
if (fal_mib_port_flush_counters(0, shd->nghd.mac_id)) {
netdev_dbg(ndev, "MIB stats Reset fail.\n");
}
return (struct nss_gmac_hal_dev *)shd;
}
/*
* syn_set_mac_address()
*/
static void syn_set_mac_address(struct nss_gmac_hal_dev *nghd,
uint8_t *macaddr)
{
uint32_t data;
BUG_ON(nghd == NULL);
data = (macaddr[5] << 8) | macaddr[4] | SYN_MAC_ADDR_RSVD_BIT;
hal_write_reg(nghd->mac_base, SYN_MAC_ADDR0_HIGH, data);
data = (macaddr[3] << 24) | (macaddr[2] << 16) | (macaddr[1] << 8)
| macaddr[0];
hal_write_reg(nghd->mac_base, SYN_MAC_ADDR0_LOW, data);
}
/*
* syn_get_mac_address()
*/
static void syn_get_mac_address(struct nss_gmac_hal_dev *nghd,
uint8_t *macaddr)
{
uint32_t data;
BUG_ON(nghd == NULL);
data = hal_read_reg(nghd->mac_base, SYN_MAC_ADDR0_HIGH);
macaddr[5] = (data >> 8) & 0xff;
macaddr[4] = (data) & 0xff;
data = hal_read_reg(nghd->mac_base, SYN_MAC_ADDR0_LOW);
macaddr[3] = (data >> 24) & 0xff;
macaddr[2] = (data >> 16) & 0xff;
macaddr[1] = (data >> 8) & 0xff;
macaddr[0] = (data) & 0xff;
}
struct nss_gmac_hal_ops syn_hal_ops = {
.init = &syn_init,
.start = &syn_start,
.stop = &syn_stop,
.setmacaddr = &syn_set_mac_address,
.getmacaddr = &syn_get_mac_address,
.rxflowcontrol = &syn_rx_flow_control,
.txflowcontrol = &syn_tx_flow_control,
.setspeed = &syn_set_mac_speed,
.getspeed = &syn_get_mac_speed,
.setduplex = &syn_set_duplex_mode,
.getduplex = &syn_get_duplex_mode,
.getstats = &syn_get_mmc_stats,
.setmaxframe = &syn_set_max_frame_size,
.getmaxframe = &syn_get_max_frame_size,
.getndostats = &syn_get_netdev_stats,
.getssetcount = &syn_get_strset_count,
.getstrings = &syn_get_strings,
.getethtoolstats = &syn_get_eth_stats,
.sendpause = &syn_send_pause_frame,
};

View File

@@ -1,255 +0,0 @@
/*
**************************************************************************
* Copyright (c) 2016,2020 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF0
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
* OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
**************************************************************************
*/
#ifndef __SYN_REG_H__
#define __SYN_REG_H__
/*
*
MAC Register Offset
*
*/
#define SYN_MAC_TX_CONFIG 0x0000
#define SYN_MAC_RX_CONFIG 0x0004
#define SYN_MAC_PACKET_FILTER 0x0008
#define SYN_MAC_WDOG_TIMEOUT 0x000c
#define SYN_MAC_HASH_TBL_REG0 0x0010
#define SYN_MAC_VLAN_TAG 0x0050
#define SYN_MAC_VLAN_HASH_TBL 0x0058
#define SYN_MAC_VLAN_INCL 0x0060
#define SYN_MAC_INNER_VLAN_INCL 0x0064
#define SYN_MAC_RX_ETH_TYP_MATCH 0x006c
#define SYN_MAC_Q0_TX_FLOW_CTL 0x0070
#define SYN_MAC_Q1_TX_FLOW_CTL 0x0074
#define SYN_MAC_Q2_TX_FLOW_CTL 0x0078
#define SYN_MAC_Q3_TX_FLOW_CTL 0x007c
#define SYN_MAC_Q4_TX_FLOW_CTL 0x0080
#define SYN_MAC_Q5_TX_FLOW_CTL 0x0084
#define SYN_MAC_Q6_TX_FLOW_CTL 0x0088
#define SYN_MAC_Q7_TX_FLOW_CTL 0x008c
#define SYN_MAC_RX_FLOW_CTL 0x0090
#define SYN_MAC_RXQ_CTL0 0x00a0
#define SYN_MAC_RXQ_CTL1 0x00a4
#define SYN_MAC_RXQ_CTL2 0x00a8
#define SYN_MAC_RXQ_CTL3 0x00ac
#define SYN_MAC_INT_STATUS 0x00b0
#define SYN_MAC_INT_ENABLE 0x00b4
#define SYN_MAC_TX_RX_STATUS 0x00b8
#define SYN_MAC_PMT_CTL_STATUS 0x00c0
#define SYN_MAC_RWK_PACKET_FILTER 0x00c4
#define SYN_MAC_LPI_CTL_STATUS 0x00d0
#define SYN_MAC_LPI_TIMER_STATUS 0x00d4
#define SYN_MAC_VERSION 0x0110
#define SYN_MAC_DEBUG 0x0114
#define SYN_MAC_FW_FEATURE0 0x011c
#define SYN_MAC_FW_FEATURE1 0x0120
#define SYN_MAC_FW_FEATURE2 0x0124
#define SYN_MAC_GPIO_CTL 0x0278
#define SYN_MAC_GPIO_STATUS 0x027c
#define SYN_MAC_ADDR0_HIGH 0x0300
#define SYN_MAC_ADDR0_LOW 0x0304
#define SYN_MAC_ADDR1_HIGH 0x0308
#define SYN_MAC_ADDR1_LOW 0x030c
#define SYN_MAC_TS_CTL 0x0d00
#define SYN_MAC_SUB_SEC_INCR 0x0d04
#define SYN_MAC_SYS_TIME_SECS 0x0d08
#define SYN_MAC_SYS_TIME_NSECS 0x0d0c
#define SYN_MAC_SYS_TIME_SECS_UPDATE 0x0d10
#define SYN_MAC_SYS_TIME_NSECS_UPDATE 0x0d14
#define SYN_MAC_TS_ADDEND 0x0d18
#define SYN_MAC_TS_STATUS 0x0d20
#define SYN_MAC_TX_TS_STATUS_NSECS 0x0d30
#define SYN_MAC_TX_TS_STATUS_SECS 0x0d34
#define SYN_MAC_PPS_CTL 0x0d70
#define SYN_MAC_MMC_CTL 0x0800
#define SYN_MAC_MMC_RX_INT 0x0804
#define SYN_MAC_MMC_TX_INT 0x0808
#define SYN_MAC_MMC_RX_INT_EN 0x080c
#define SYN_MAC_MMC_TX_INT_EN 0x0810
/* MAC TX MMC Counters */
#define SYN_MAC_MMC_TX_BCAST_LO 0x0824
#define SYN_MAC_MMC_TX_BCAST_HI 0x0828
#define SYN_MAC_MMC_TX_FRAME_LO 0x0894
#define SYN_MAC_MMC_TX_FRAME_HI 0x0898
#define SYN_MAC_MMC_TX_MCAST_LO 0x082c
#define SYN_MAC_MMC_TX_MCAST_HI 0x0830
#define SYN_MAC_MMC_TX_PKT64_LO 0x0834
#define SYN_MAC_MMC_TX_PKT64_HI 0x0838
#define SYN_MAC_MMC_TX_PKT65TO127_LO 0x083c
#define SYN_MAC_MMC_TX_PKT65TO127_HI 0x0840
#define SYN_MAC_MMC_TX_PKT128TO255_LO 0x0844
#define SYN_MAC_MMC_TX_PKT128TO255_HI 0x0848
#define SYN_MAC_MMC_TX_PKT256TO511_LO 0x084c
#define SYN_MAC_MMC_TX_PKT256TO511_HI 0x0850
#define SYN_MAC_MMC_TX_PKT512TO1023_LO 0x0854
#define SYN_MAC_MMC_TX_PKT512TO1023_HI 0x0858
#define SYN_MAC_MMC_TX_PKT1024TOMAX_LO 0x085c
#define SYN_MAC_MMC_TX_PKT1024TOMAX_HI 0x0860
#define SYN_MAC_MMC_TX_UNICAST_LO 0x0864
#define SYN_MAC_MMC_TX_UNICAST_HI 0x0868
#define SYN_MAC_MMC_TX_MCAST_GB_LO 0x086c
#define SYN_MAC_MMC_TX_MCAST_GB_HI 0x0870
#define SYN_MAC_MMC_TX_BCAST_GB_LO 0x0874
#define SYN_MAC_MMC_TX_BCAST_GB_HI 0x0878
#define SYN_MAC_MMC_TX_UNDERFLOW_ERR_LO 0x087c
#define SYN_MAC_MMC_TX_UNDERFLOW_ERR_HI 0x0880
#define SYN_MAC_MMC_TX_BYTES_LO 0x0884
#define SYN_MAC_MMC_TX_BYTES_HI 0x0888
#define SYN_MAC_MMC_TX_PAUSE_FRAME_LO 0x0894
#define SYN_MAC_MMC_TX_PAUSE_FRAME_HI 0x0898
#define SYN_MAC_MMC_TX_VLAN_LO 0x089c
#define SYN_MAC_MMC_TX_VLAN_HI 0x08a0
#define SYN_MAC_MMC_TX_LPI_USEC_CTR_LO 0x08a4
#define SYN_MAC_MMC_TX_LPI_USEC_CTR_HI 0x08a8
/* MAC RX MMC Counters */
#define SYN_MAC_MMC_RX_FRAME_LO 0x0900
#define SYN_MAC_MMC_RX_FRAME_HI 0x0904
#define SYN_MAC_MMC_RX_BYTES_LO 0x0910
#define SYN_MAC_MMC_RX_BYTES_HI 0x0914
#define SYN_MAC_MMC_RX_BCAST_LO 0x0918
#define SYN_MAC_MMC_RX_BCAST_HI 0x091c
#define SYN_MAC_MMC_RX_MCAST_LO 0x0920
#define SYN_MAC_MMC_RX_MCAST_HI 0x0924
#define SYN_MAC_MMC_RX_CRC_ERR_LO 0x0928
#define SYN_MAC_MMC_RX_CRC_ERR_HI 0x092c
#define SYN_MAC_MMC_RX_RUNT_ERR 0x0930
#define SYN_MAC_MMC_RX_JABBER_ERR 0x0934
#define SYN_MAC_MMC_RX_UNDERSIZE 0x0938
#define SYN_MAC_MMC_RX_OVERSIZE 0x093c
#define SYN_MAC_MMC_RX_PKT64_LO 0x0940
#define SYN_MAC_MMC_RX_PKT64_HI 0x0944
#define SYN_MAC_MMC_RX_PKT65TO127_LO 0x0948
#define SYN_MAC_MMC_RX_PKT65TO127_HI 0x094c
#define SYN_MAC_MMC_RX_PKT128TO255_LO 0x0950
#define SYN_MAC_MMC_RX_PKT128TO255_HI 0x0954
#define SYN_MAC_MMC_RX_PKT256TO511_LO 0x0958
#define SYN_MAC_MMC_RX_PKT256TO511_HI 0x095c
#define SYN_MAC_MMC_RX_PKT512TO1023_LO 0x0960
#define SYN_MAC_MMC_RX_PKT512TO1023_HI 0x0964
#define SYN_MAC_MMC_RX_PKT1024TOMAX_LO 0x0968
#define SYN_MAC_MMC_RX_PKT1024TOMAX_HI 0x096c
#define SYN_MAC_MMC_RX_UNICAST_LO 0x0970
#define SYN_MAC_MMC_RX_UNICAST_HI 0x0974
#define SYN_MAC_MMC_RX_LEN_ERR_LO 0x0978
#define SYN_MAC_MMC_RX_LEN_ERR_HI 0x097c
#define SYN_MAC_MMC_RX_PAUSE_FRAME_LO 0x0988
#define SYN_MAC_MMC_RX_PAUSE_FRAME_HI 0x098c
#define SYN_MAC_MMC_RX_FIFO_OVERFLOW_LO 0x0990
#define SYN_MAC_MMC_RX_FIFO_OVERFLOW_HI 0x0994
#define SYN_MAC_MMC_RX_VLAN_FRAME_LO 0x0998
#define SYN_MAC_MMC_RX_VLAN_FRAME_HI 0x099c
#define SYN_MAC_MMC_RX_LPI_USEC_CTR_LO 0x09a4
#define SYN_MAC_MMC_RX_LPI_USEC_CTR_HI 0x09a8
#define SYN_MAC_MMC_RX_DISCARD_FRAME_LO 0x09ac
#define SYN_MAC_MMC_RX_DISCARD_FRAME_HI 0x09b0
/* MAC Register Bit Definitions*/
/* SYN_MAC_Q0_TX_FLOW_CTL Bit definitions */
#define SYN_MAC_TX_PAUSE_SEND 0x00000001
#define SYN_MAC_TX_FLOW_ENABLE 0x00000002
#define SYN_MAC_TX_PAUSE_LOW_THRESHOLD 0x00000070
#define SYN_MAC_ADDR_RSVD_BIT 0x80000000
/* SYN_MAC_RX_FLOW_CTL Bit definitions */
#define SYN_MAC_RX_FLOW_ENABLE 0x00000001
/* SYN_MAC_TX_CONFIG Bit definitions */
#define SYN_MAC_TX_ENABLE 0x00000001
#define SYN_MAC_TX_SPEED_SELECT 0x60000000
/* SYN_MAC_RX_CONFIG Bit definitions */
#define SYN_MAC_RX_ENABLE 0x00000001
#define SYN_MAC_JUMBO_FRAME_ENABLE 0x00000100
#define SYN_MAC_SPEED_10G 0x0
#define SYN_MAC_SPEED_2_5G 0x2
#define SYN_MAC_SPEED_1G 0x3
#define SYN_MAC_SPEED_BITPOS 29
#define SYN_MAC_SPEED_BITMASK 0x3
#define SYN_MAC_DEFAULT_MAX_FRAME_SIZE 1518
#define SYN_MAC_MAX_FRAME_SIZE_BITPOS 16
#define SYN_MAC_MAX_FRAME_SIZE_BITMASK 0x3fff
/* SYN_MAC_MMC_CTL Bit definitions */
#define SYN_MAC_MMC_RSTONRD 0x00000004
/*
*
MTL Register Offset
*
*/
#define SYN_MTL_OPER_MODE 0x1000
#define SYN_MTL_DEBUG_CTL 0x1008
#define SYN_MTL_DEBUG_STATUS 0x100c
#define SYN_MTL_DEBUG_DATA 0x1010
#define SYN_MTL_INT_STATUS 0x1020
#define SYN_MTL_RXQ_DMA_MAP0 0x1030
#define SYN_MTL_RXQ_DMA_MAP1 0x1034
#define SYN_MTL_RXQ_DMA_MAP2 0x1038
#define SYN_MTL_TC_PRIO_MAP0 0x1040
#define SYN_MTL_TC_PRIO_MAP1 0x1044
#define SYN_MTL_TXQ0_OPER_MODE 0x1100
#define SYN_MTL_TXQ0_UNDERFLOW 0x1104
#define SYN_MTL_TXQ0_DEBUG 0x1108
#define SYN_MTL_TC0_ETS_CTL 0x1110
#define SYN_MTL_TC0_ETS_STATUS 0x1114
#define SYN_MTL_TC0_QUANTUM_WEIGHT 0x1118
#define SYN_MTL_RXQ0_DEBUG 0x1148
#define SYN_MTL_RXQ0_CTL 0x114c
#define SYN_MTL_RXQ0_FLOW_CTL 0x1150
#define SYN_MTL_Q0_INT_ENABLE 0x1170
#define SYN_MTL_Q0_INT_STATUS 0x1174
/* MTL Register Bit definitions */
/*
*
DMA Register Offset
*
*/
#define SYN_DMA_MODE 0x3000
#define SYN_DMA_SYSBUS_MODE 0x3004
#define SYN_DMA_INT_STATUS 0x3008
#define SYN_DMA_AXI_TX_AR_ACE_CTL 0x3010
#define SYN_DMA_AXI_RX_AW_ACE_CTL 0x3018
#define SYN_DMA_AXI_TXRX_AWAR_ACE_CTL 0x301c
#define SYN_DMA_DEBUG_STATUS0 0x3020
#define SYN_DMA_DEBUG_STATUS1 0x3024
#define SYN_DMA_TX_EDMA_CTL 0x3040
#define SYN_DMA_RX_EDMA_CTL 0x3044
#define SYN_DMA_CH0_CTL 0x3100
#define SYN_DMA_CH0_TX_CTL 0x3104
#define SYN_DMA_CH0_RX_CTL 0x3108
#define SYN_DMA_CH0_TXDESC_LIST_HADDR 0x3110
#define SYN_DMA_CH0_TXDESC_LIST_LADDR 0x3114
#define SYN_DMA_CH0_RXDESC_LIST_HADDR 0x3118
#define SYN_DMA_CH0_RXDESC_LIST_LADDR 0x311c
#define SYN_DMA_CH0_TXDESC_TAIL_LPTR 0x3124
#define SYN_DMA_CH0_RXDESC_TAIL_LPTR 0x312c
#define SYN_DMA_CH0_TXDESC_RING_LEN 0x3130
#define SYN_DMA_CH0_RXDESC_RING_LEN 0x3134
#define SYN_DMA_INT_ENABLE 0x3138
#define SYN_DMA_RX_INT_WDOG_TIMER 0x313c
/* DMA Register Bit definitions */
#endif /*__SYN_REG_H__*/

View File

@@ -1,31 +0,0 @@
/*
**************************************************************************
* Copyright (c) 2016, 2019-2020, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
* OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
**************************************************************************
*/
/*
* This file includes declarations defined by the EDMA
* dataplane and used by other layers of this driver.
*/
#ifndef __NSS_DP_EDMA__
#define __NSS_DP_EDMA__
extern int edma_init(void);
extern void edma_cleanup(bool is_dp_override);
extern struct nss_dp_data_plane_ops nss_dp_edma_ops;
#endif /*__NSS_DP_EDMA__ */

View File

@@ -1,48 +0,0 @@
/*
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef __NSS_DP_HAL_H__
#define __NSS_DP_HAL_H__
#include "nss_dp_dev.h"
/*
* nss_dp_hal_get_gmac_ops()
* Returns gmac hal ops based on the GMAC type.
*/
static inline struct nss_gmac_hal_ops *nss_dp_hal_get_gmac_ops(uint32_t gmac_type)
{
return dp_global_ctx.gmac_hal_ops[gmac_type];
}
/*
* nss_dp_hal_set_gmac_ops()
* Sets dp global gmac hal ops based on the GMAC type.
*/
static inline void nss_dp_hal_set_gmac_ops(struct nss_gmac_hal_ops *hal_ops, uint32_t gmac_type)
{
dp_global_ctx.gmac_hal_ops[gmac_type] = hal_ops;
}
/*
* HAL functions implemented by SoC specific source files.
*/
extern bool nss_dp_hal_init(void);
extern void nss_dp_hal_cleanup(void);
extern void nss_dp_hal_clk_enable(struct nss_dp_dev *dp_priv);
extern struct nss_dp_data_plane_ops *nss_dp_hal_get_data_plane_ops(void);
#endif /* __NSS_DP_HAL_H__ */

View File

@@ -1,162 +0,0 @@
/*
**************************************************************************
* Copyright (c) 2016-2017,2020 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF0
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
* OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
**************************************************************************
*/
#ifndef __NSS_DP_HAL_IF_H__
#define __NSS_DP_HAL_IF_H__
#include <linux/ioport.h>
#include <linux/netdevice.h>
#include <uapi/linux/if_link.h>
enum gmac_device_type {
GMAC_HAL_TYPE_QCOM = 0, /* 1G GMAC type */
GMAC_HAL_TYPE_SYN_XGMAC,/* Synopsys XGMAC type */
GMAC_HAL_TYPE_SYN_GMAC, /* Synopsys 1G GMAC type */
GMAC_HAL_TYPE_MAX
};
/*
* gmac_hal_platform_data
*/
struct gmac_hal_platform_data {
struct net_device *netdev; /* Net device */
uint32_t reg_len; /* Register space length */
uint32_t mactype; /* MAC chip type */
uint32_t macid; /* MAC sequence id on the Chip */
};
/*
* NSS GMAC HAL device data
*/
struct nss_gmac_hal_dev {
void __iomem *mac_base; /* Base address of MAC registers */
uint32_t version; /* GMAC Revision version */
uint32_t drv_flags; /* Driver specific feature flags */
/*
* Phy related stuff
*/
uint32_t link_state; /* Link status as reported by the Phy */
uint32_t duplex_mode; /* Duplex mode of the Phy */
uint32_t speed; /* Speed of the Phy */
uint32_t loop_back_mode;/* Loopback status of the Phy */
uint32_t phy_mii_type; /* RGMII/SGMII/XSGMII */
struct net_device *netdev;
struct resource *memres;
uint32_t mac_reg_len; /* MAC Register block length */
uint32_t mac_id; /* MAC sequence id on the Chip */
spinlock_t slock; /* lock to protect concurrent reg access */
};
/*
* nss_gmac_hal_ops
*/
struct nss_gmac_hal_ops {
void* (*init)(struct gmac_hal_platform_data *);
void (*exit)(struct nss_gmac_hal_dev *);
int32_t (*start)(struct nss_gmac_hal_dev *);
int32_t (*stop)(struct nss_gmac_hal_dev *);
void (*setmacaddr)(struct nss_gmac_hal_dev *, uint8_t *);
void (*getmacaddr)(struct nss_gmac_hal_dev *, uint8_t *);
void (*promisc)(struct nss_gmac_hal_dev *, bool enabled);
void (*multicast)(struct nss_gmac_hal_dev *, bool enabled);
void (*broadcast)(struct nss_gmac_hal_dev *, bool enabled);
void (*rxcsumoffload)(struct nss_gmac_hal_dev *, bool enabled);
void (*txcsumoffload)(struct nss_gmac_hal_dev *, bool enabled);
void (*rxflowcontrol)(struct nss_gmac_hal_dev *, bool enabled);
void (*txflowcontrol)(struct nss_gmac_hal_dev *, bool enabled);
int32_t (*setspeed)(struct nss_gmac_hal_dev *, uint32_t);
uint32_t (*getspeed)(struct nss_gmac_hal_dev *);
void (*setduplex)(struct nss_gmac_hal_dev *, uint8_t);
uint8_t (*getduplex)(struct nss_gmac_hal_dev *);
int32_t (*getstats)(struct nss_gmac_hal_dev *);
int32_t (*setmaxframe)(struct nss_gmac_hal_dev *, uint32_t);
int32_t (*getmaxframe)(struct nss_gmac_hal_dev *);
int32_t (*getndostats)(struct nss_gmac_hal_dev *,
struct rtnl_link_stats64 *);
void (*sendpause)(struct nss_gmac_hal_dev *);
void (*stoppause)(struct nss_gmac_hal_dev *);
int32_t (*getssetcount)(struct nss_gmac_hal_dev *, int32_t);
int32_t (*getstrings)(struct nss_gmac_hal_dev *, int32_t, uint8_t *);
int32_t (*getethtoolstats)(struct nss_gmac_hal_dev *, uint64_t *);
};
extern struct nss_gmac_hal_ops qcom_hal_ops;
extern struct nss_gmac_hal_ops syn_hal_ops;
/**********************************************************
* Common functions
**********************************************************/
/*
* hal_read_reg()
*/
static inline uint32_t hal_read_reg(void __iomem *regbase, uint32_t regoffset)
{
return readl_relaxed(regbase + regoffset);
}
/*
* hal_write_reg()
*/
static inline void hal_write_reg(void __iomem *regbase, uint32_t regoffset,
uint32_t regdata)
{
writel_relaxed(regdata, regbase + regoffset);
}
/*
* hal_set_reg_bits()
*/
static inline void hal_set_reg_bits(struct nss_gmac_hal_dev *nghd,
uint32_t regoffset,
uint32_t bitpos)
{
uint32_t data;
spin_lock(&nghd->slock);
data = bitpos | hal_read_reg(nghd->mac_base, regoffset);
hal_write_reg(nghd->mac_base, regoffset, data);
spin_unlock(&nghd->slock);
}
/*
* hal_clear_reg_bits()
*/
static inline void hal_clear_reg_bits(struct nss_gmac_hal_dev *nghd,
uint32_t regoffset,
uint32_t bitpos)
{
uint32_t data;
spin_lock(&nghd->slock);
data = ~bitpos & hal_read_reg(nghd->mac_base, regoffset);
hal_write_reg(nghd->mac_base, regoffset, data);
spin_unlock(&nghd->slock);
}
/*
* hal_check_reg_bits()
*/
static inline bool hal_check_reg_bits(void __iomem *regbase,
uint32_t regoffset,
uint32_t bitpos)
{
return (bitpos & hal_read_reg(regbase, regoffset)) != 0;
}
#endif /* __NSS_DP_HAL_IF_H__ */

View File

@@ -1,336 +0,0 @@
/*
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/module.h>
#include "syn_data_plane.h"
#include "syn_reg.h"
#define SYN_DP_NAPI_BUDGET 64
/*
* GMAC Ring info
*/
struct syn_dp_info dp_info[NSS_DP_HAL_MAX_PORTS];
/*
* syn_dp_napi_poll()
* Scheduled by napi to process RX and TX complete
*/
static int syn_dp_napi_poll(struct napi_struct *napi, int budget)
{
struct nss_dp_dev *gmac_dev = container_of(napi, struct nss_dp_dev, napi);
struct syn_dp_info *dev_info = &dp_info[gmac_dev->macid - 1];
int work_done;
/*
* Update GMAC stats
*/
spin_lock_bh(&dp_info->stats_lock);
dp_info->stats.stats.rx_missed += syn_get_rx_missed(gmac_dev->gmac_hal_ctx);
dp_info->stats.stats.rx_missed += syn_get_fifo_overflows(gmac_dev->gmac_hal_ctx);
spin_unlock_bh(&dp_info->stats_lock);
syn_dp_process_tx_complete(gmac_dev, dev_info);
work_done = syn_dp_rx(gmac_dev, dev_info, budget);
syn_dp_rx_refill(gmac_dev, dev_info);
if (work_done < budget) {
napi_complete(napi);
syn_enable_dma_interrupt(gmac_dev->gmac_hal_ctx);
}
return work_done;
}
/*
* syn_dp_handle_irq()
* Process IRQ and schedule napi
*/
static irqreturn_t syn_dp_handle_irq(int irq, void *ctx)
{
struct nss_dp_dev *gmac_dev = (struct nss_dp_dev *)ctx;
struct nss_gmac_hal_dev *nghd = gmac_dev->gmac_hal_ctx;
syn_clear_dma_status(nghd);
syn_disable_dma_interrupt(nghd);
/*
* Schedule NAPI
*/
napi_schedule(&gmac_dev->napi);
return IRQ_HANDLED;
}
/*
* syn_dp_if_init()
* Initialize the GMAC data plane operations
*/
static int syn_dp_if_init(struct nss_dp_data_plane_ctx *dpc)
{
struct net_device *netdev = dpc->dev;
struct nss_dp_dev *gmac_dev = (struct nss_dp_dev *)netdev_priv(netdev);
uint32_t macid = gmac_dev->macid;
struct syn_dp_info *dev_info = &dp_info[macid - 1];
struct device *dev = &gmac_dev->pdev->dev;
int err;
if (!netdev) {
netdev_dbg(netdev, "nss_dp_gmac: Invalid netdev pointer %px\n", netdev);
return NSS_DP_FAILURE;
}
netdev_info(netdev, "nss_dp_gmac: Registering netdev %s(qcom-id:%d) with GMAC\n", netdev->name, macid);
if (!dev_info->napi_added) {
netif_napi_add(netdev, &gmac_dev->napi, syn_dp_napi_poll, SYN_DP_NAPI_BUDGET);
/*
* Requesting irq
*/
netdev->irq = platform_get_irq(gmac_dev->pdev, 0);
err = request_irq(netdev->irq, syn_dp_handle_irq, 0, "nss-dp-gmac", gmac_dev);
if (err) {
netdev_dbg(netdev, "err_code:%d, Mac %d IRQ %d request failed\n", err,
gmac_dev->macid, netdev->irq);
return NSS_DP_FAILURE;
}
gmac_dev->drv_flags |= NSS_DP_PRIV_FLAG(IRQ_REQUESTED);
dev_info->napi_added = 1;
}
/*
* Forcing the kernel to use 32-bit DMA addressing
*/
dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
/*
* Initialize the Tx/Rx ring
*/
if (syn_dp_setup_rings(gmac_dev, netdev, dev, dev_info)) {
netdev_dbg(netdev, "nss_dp_gmac: Error initializing GMAC rings %px\n", netdev);
return NSS_DP_FAILURE;
}
spin_lock_init(&dev_info->data_lock);
spin_lock_init(&dev_info->stats_lock);
netdev_dbg(netdev,"Synopsys GMAC dataplane initialized\n");
return NSS_DP_SUCCESS;
}
/*
* syn_dp_if_open()
* Open the GMAC data plane operations
*/
static int syn_dp_if_open(struct nss_dp_data_plane_ctx *dpc, uint32_t tx_desc_ring,
uint32_t rx_desc_ring, uint32_t mode)
{
struct net_device *netdev = dpc->dev;
struct nss_dp_dev *gmac_dev = (struct nss_dp_dev *)netdev_priv(netdev);
struct nss_gmac_hal_dev *nghd = gmac_dev->gmac_hal_ctx;
syn_enable_dma_rx(nghd);
syn_enable_dma_tx(nghd);
napi_enable(&gmac_dev->napi);
syn_enable_dma_interrupt(nghd);
netdev_dbg(netdev, "Synopsys GMAC dataplane opened\n");
return NSS_DP_SUCCESS;
}
/*
* syn_dp_if_close()
* Close the GMAC data plane operations
*/
static int syn_dp_if_close(struct nss_dp_data_plane_ctx *dpc)
{
struct net_device *netdev = dpc->dev;
struct nss_dp_dev *gmac_dev = (struct nss_dp_dev *)netdev_priv(netdev);
struct nss_gmac_hal_dev *nghd = gmac_dev->gmac_hal_ctx;
syn_disable_dma_rx(nghd);
syn_disable_dma_tx(nghd);
syn_disable_dma_interrupt(nghd);
napi_disable(&gmac_dev->napi);
netdev_dbg(netdev, "Synopsys GMAC dataplane closed\n");
return NSS_DP_SUCCESS;
}
/*
* syn_dp_if_link_state()
* Change of link for the dataplane
*/
static int syn_dp_if_link_state(struct nss_dp_data_plane_ctx *dpc, uint32_t link_state)
{
struct net_device *netdev = dpc->dev;
/*
* Switch interrupt based on the link state
*/
if (link_state) {
netdev_dbg(netdev, "Data plane link up\n");
} else {
netdev_dbg(netdev, "Data plane link down\n");
}
return NSS_DP_SUCCESS;
}
/*
* syn_dp_if_mac_addr()
*/
static int syn_dp_if_mac_addr(struct nss_dp_data_plane_ctx *dpc, uint8_t *addr)
{
return NSS_DP_SUCCESS;
}
/*
* syn_dp_if_change_mtu()
*/
static int syn_dp_if_change_mtu(struct nss_dp_data_plane_ctx *dpc, uint32_t mtu)
{
/*
* TODO: Work on MTU fix along with register update for frame length
*/
return NSS_DP_SUCCESS;
}
/*
* syn_dp_if_set_features()
* Set the supported net_device features
*/
static void syn_dp_if_set_features(struct nss_dp_data_plane_ctx *dpc)
{
struct net_device *netdev = dpc->dev;
netdev->features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
netdev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
netdev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
netdev->wanted_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
}
/*
* syn_dp_if_xmit()
* Dataplane method to transmit the packet
*/
static netdev_tx_t syn_dp_if_xmit(struct nss_dp_data_plane_ctx *dpc, struct sk_buff *skb)
{
struct net_device *netdev = dpc->dev;
struct nss_dp_dev *gmac_dev = (struct nss_dp_dev *)netdev_priv(netdev);
struct syn_dp_info *dev_info = &dp_info[gmac_dev->macid - 1];
int nfrags = skb_shinfo(skb)->nr_frags;
/*
* Most likely, it is not a fragmented pkt, optimize for that
*/
if (likely(nfrags == 0)) {
if (syn_dp_tx(gmac_dev, dev_info, skb)) {
goto drop;
}
return NETDEV_TX_OK;
}
drop:
dev_kfree_skb_any(skb);
dev_info->stats.stats.tx_dropped++;
return NETDEV_TX_BUSY;
}
/*
* syn_dp_if_pause_on_off()
*/
static int syn_dp_if_pause_on_off(struct nss_dp_data_plane_ctx *dpc, uint32_t pause_on)
{
return NSS_DP_SUCCESS;
}
/*
* syn_dp_if_get_stats
* Get Synopsys GMAC data plane stats
*/
static void syn_dp_if_get_stats(struct nss_dp_data_plane_ctx *dpc, struct nss_dp_gmac_stats *stats)
{
struct net_device *netdev = dpc->dev;
struct nss_dp_dev *gmac_dev = (struct nss_dp_dev *)netdev_priv(netdev);
struct syn_dp_info *dev_info = &dp_info[gmac_dev->macid - 1];
spin_lock_bh(&dev_info->stats_lock);
netdev_dbg(netdev, "GETTING stats: rx_packets:%llu rx_bytes:%llu mmc_rx_crc_errors:%llu", dev_info->stats.stats.rx_packets,
dev_info->stats.stats.rx_bytes, dev_info->stats.stats.mmc_rx_crc_errors);
memcpy(stats, &dev_info->stats, sizeof(*stats));
spin_unlock_bh(&dev_info->stats_lock);
}
/*
* syn_dp_if_deinit()
* Free all the Synopsys GMAC resources
*/
static int syn_dp_if_deinit(struct nss_dp_data_plane_ctx *dpc)
{
struct net_device *netdev = dpc->dev;
struct nss_dp_dev *gmac_dev = (struct nss_dp_dev *)netdev_priv(netdev);
struct syn_dp_info *dev_info = &dp_info[gmac_dev->macid - 1];
if (dev_info->napi_added) {
/*
* Remove interrupt handlers and NAPI
*/
if (gmac_dev->drv_flags & NSS_DP_PRIV_FLAG(IRQ_REQUESTED)) {
netdev_dbg(netdev, "Freeing IRQ %d for Mac %d\n", netdev->irq, gmac_dev->macid);
synchronize_irq(netdev->irq);
free_irq(netdev->irq, gmac_dev);
gmac_dev->drv_flags &= ~NSS_DP_PRIV_FLAG(IRQ_REQUESTED);
}
netif_napi_del(&gmac_dev->napi);
dev_info->napi_added = 0;
}
/*
* Cleanup and free the rings
*/
syn_dp_cleanup_rings(gmac_dev, netdev, dev_info);
return NSS_DP_SUCCESS;
}
/*
* nss_dp_gmac_ops
* Data plane operations for Synopsys GMAC
*/
struct nss_dp_data_plane_ops nss_dp_gmac_ops = {
.init = syn_dp_if_init,
.open = syn_dp_if_open,
.close = syn_dp_if_close,
.link_state = syn_dp_if_link_state,
.mac_addr = syn_dp_if_mac_addr,
.change_mtu = syn_dp_if_change_mtu,
.xmit = syn_dp_if_xmit,
.set_features = syn_dp_if_set_features,
.pause_on_off = syn_dp_if_pause_on_off,
.get_stats = syn_dp_if_get_stats,
.deinit = syn_dp_if_deinit,
};

View File

@@ -1,109 +0,0 @@
/*
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef __NSS_DP_SYN_DATAPLANE__
#define __NSS_DP_SYN_DATAPLANE__
#include "nss_dp_dev.h"
#include "syn_dma_desc.h"
#define SYN_DP_TX_DESC_SIZE 128 /* Tx Descriptors needed in the descriptor pool/queue */
#define SYN_DP_RX_DESC_SIZE 128 /* Rx Descriptors needed in the descriptor pool/queue */
#define SYN_DP_MINI_JUMBO_FRAME_MTU 1978
#define SYN_DP_MAX_DESC_BUFF 0x1FFF /* Max size of buffer that can be programed into one field of desc */
/*
* syn_dp_info
* Synopysys GMAC Dataplane information
*/
struct syn_dp_info {
struct nss_dp_gmac_stats stats; /* GMAC driver stats */
struct sk_buff *rx_skb_list[SYN_DP_RX_DESC_SIZE]; /* Rx skb pool helping RX DMA descriptors*/
dma_addr_t rx_desc_dma; /* Dma-albe address of first rx descriptor
either in ring or chain mode, this is
used by the GMAC device */
struct dma_desc *rx_desc; /* start address of RX descriptors ring or
chain, this is used by the driver */
uint32_t busy_rx_desc; /* Number of Rx Descriptors owned by
DMA at any given time */
uint32_t rx_desc_count; /* number of rx descriptors in the
tx descriptor queue/pool */
uint32_t rx_busy; /* index of the rx descriptor owned by DMA,
obtained by nss_gmac_get_rx_qptr() */
uint32_t rx_next; /* index of the rx descriptor next available
with driver, given to DMA by
nss_gmac_set_rx_qptr()*/
struct dma_desc *rx_busy_desc; /* Rx Descriptor address corresponding
to the index tx_busy */
struct dma_desc *rx_next_desc; /* Rx Descriptor address corresponding
to the index rx_next */
struct sk_buff *tx_skb_list[SYN_DP_RX_DESC_SIZE]; /* Tx skb pool helping RX DMA descriptors*/
dma_addr_t tx_desc_dma; /* Dma-able address of first tx descriptor
either in ring or chain mode, this is used
by the GMAC device */
struct dma_desc *tx_desc; /* start address of TX descriptors ring or
chain, this is used by the driver */
uint32_t busy_tx_desc; /* Number of Tx Descriptors owned by
DMA at any given time */
uint32_t tx_desc_count; /* number of tx descriptors in the
rx descriptor queue/pool */
uint32_t tx_busy; /* index of the tx descriptor owned by DMA,
is obtained by nss_gmac_get_tx_qptr() */
uint32_t tx_next; /* index of the tx descriptor next available
with driver, given to DMA by
nss_gmac_set_tx_qptr() */
struct dma_desc *tx_busy_desc; /* Tx Descriptor address corresponding
to the index tx_busy */
struct dma_desc *tx_next_desc; /* Tx Descriptor address corresponding
to the index tx_next */
spinlock_t data_lock; /* Lock to protect datapath */
spinlock_t stats_lock; /* Lock to protect datapath */
int napi_added; /* flag to indicate napi add status */
};
/*
* GMAC Tx/Tx APIs
*/
int syn_dp_setup_rings(struct nss_dp_dev *gmac_dev, struct net_device *netdev, struct device *dev, struct syn_dp_info *dev_info);
int syn_dp_cleanup_rings(struct nss_dp_dev *gmac_dev, struct net_device *netdev, struct syn_dp_info *dev_info);
int syn_dp_rx(struct nss_dp_dev *gmac_dev, struct syn_dp_info *dev_info, int budget);
void syn_dp_rx_refill(struct nss_dp_dev *gmac_dev, struct syn_dp_info *dev_info);
int syn_dp_tx(struct nss_dp_dev *gmac_dev, struct syn_dp_info *dev_info, struct sk_buff *skb);
void syn_dp_process_tx_complete(struct nss_dp_dev *gmac_dev, struct syn_dp_info *dev_info);
#endif /* __NSS_DP_SYN_DATAPLANE__ */

View File

@@ -1,342 +0,0 @@
/*
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef __SYN_DESC__
#define __SYN_DESC__
/**********************************************************
* DMA Engine descriptors
**********************************************************/
/*
******Enhanced Descritpor structure to support 8K buffer per buffer *******
dma_rx_base_addr = 0x000C, CSR3 - Receive Descriptor list base address
dma_rx_base_addr is the pointer to the first Rx Descriptors.
The Descriptor format in Little endian with a 32 bit Data bus is as shown below.
Similarly
dma_tx_base_addr = 0x0010, CSR4 - Transmit Descriptor list base address
dma_tx_base_addr is the pointer to the first Tx Descriptors.
The Descriptor format in Little endian with a 32 bit Data bus is as shown below.
-------------------------------------------------------------------------
RDES0 |OWN (31)| Status |
-------------------------------------------------------------------------
RDES1 | Ctrl | Res | Byte Count Buffer 2 | Ctrl | Res | Byte Count Buffer 1 |
-------------------------------------------------------------------------
RDES2 | Buffer 1 Address |
-------------------------------------------------------------------------
RDES3 | Buffer 2 Address / Next Descriptor Address |
-------------------------------------------------------------------------
RDES4 | Extended Status |
-------------------------------------------------------------------------
RDES5 | Reserved |
-------------------------------------------------------------------------
RDES6 | Receive Timestamp Low |
-------------------------------------------------------------------------
RDES7 | Receive Timestamp High |
-------------------------------------------------------------------------
------------------------------------------------------------------------
TDES0 |OWN (31)| Ctrl | Res | Ctrl | Res | Status |
------------------------------------------------------------------------
TDES1 | Res | Byte Count Buffer 2 | Res | Byte Count Buffer 1 |
------------------------------------------------------------------------
TDES2 | Buffer 1 Address |
------------------------------------------------------------------------
TDES3 | Buffer 2 Address / Next Descriptor Address |
------------------------------------------------------------------------
TDES4 | Reserved |
------------------------------------------------------------------------
TDES5 | Reserved |
------------------------------------------------------------------------
TDES6 | Transmit Timestamp Low |
------------------------------------------------------------------------
TDES7 | Transmit Timestamp Higher |
------------------------------------------------------------------------
*/
/*
* dma_descriptor_status
* status word of DMA descriptor
*/
enum dma_descriptor_status {
desc_own_by_dma = 0x80000000, /* (OWN)Descriptor is
owned by DMA engine */
desc_rx_da_filter_fail = 0x40000000, /* (AFM)Rx - DA Filter
Fail for the rx frame */
desc_rx_frame_length_mask = 0x3FFF0000, /* (FL)Receive descriptor
frame length */
desc_rx_frame_length_shift = 16,
desc_rx_error = 0x00008000, /* (ES)Error summary bit
- OR of the following bits:
DE || OE || IPC || GF || LC || RWT
|| RE || CE */
desc_rx_truncated = 0x00004000, /* (DE)Rx - no more descriptors
for receive frame */
desc_sa_filter_fail = 0x00002000, /* (SAF)Rx - SA Filter Fail for
the received frame */
desc_rx_length_error = 0x00001000, /* (LE)Rx - frm size not
matching with len field */
desc_rx_overflow = 0x00000800, /* (OE)Rx - frm was damaged due
to buffer overflow */
desc_rx_vlan_tag = 0x00000400, /* (VLAN)Rx - received frame
is a VLAN frame */
desc_rx_first = 0x00000200, /* (FS)Rx - first
descriptor of the frame */
desc_rx_last = 0x00000100, /* (LS)Rx - last
descriptor of the frame */
desc_rx_long_frame = 0x00000080, /* (Giant Frame)Rx - frame is
longer than 1518/1522 */
desc_rx_collision = 0x00000040, /* (LC)Rx - late collision
occurred during reception */
desc_rx_frame_ether = 0x00000020, /* (FT)Rx - Frame type - Ether,
otherwise 802.3 */
desc_rx_watchdog = 0x00000010, /* (RWT)Rx - watchdog timer
expired during reception */
desc_rx_mii_error = 0x00000008, /* (RE)Rx - error reported
by MII interface */
desc_rx_dribbling = 0x00000004, /* (DE)Rx - frame contains non
int multiple of 8 bits */
desc_rx_crc = 0x00000002, /* (CE)Rx - CRC error */
desc_rx_ext_sts = 0x00000001, /* Extended Status Available
in RDES4 */
desc_tx_error = 0x00008000, /* (ES)Error summary Bits */
desc_tx_int_enable = 0x40000000, /* (IC)Tx - interrupt on
completion */
desc_tx_last = 0x20000000, /* (LS)Tx - Last segment of the
frame */
desc_tx_first = 0x10000000, /* (FS)Tx - First segment of the
frame */
desc_tx_disable_crc = 0x08000000, /* (DC)Tx - Add CRC disabled
(first segment only) */
desc_tx_disable_padd = 0x04000000, /* (DP)disable padding,
added by - reyaz */
desc_tx_cis_mask = 0x00c00000, /* Tx checksum offloading
control mask */
desc_tx_cis_bypass = 0x00000000, /* Checksum bypass */
desc_tx_cis_ipv4_hdr_cs = 0x00400000, /* IPv4 header checksum */
desc_tx_cis_tcp_only_cs = 0x00800000, /* TCP/UDP/ICMP checksum.
Pseudo header checksum
is assumed to be present */
desc_tx_cis_tcp_pseudo_cs = 0x00c00000, /* TCP/UDP/ICMP checksum fully
in hardware including
pseudo header */
desc_tx_desc_end_of_ring = 0x00200000, /* (TER)End of descriptor ring*/
desc_tx_desc_chain = 0x00100000, /* (TCH)Second buffer address
is chain address */
desc_rx_chk_bit0 = 0x00000001, /* Rx Payload Checksum Error */
desc_rx_chk_bit7 = 0x00000080, /* (IPC CS ERROR)Rx - Ipv4
header checksum error */
desc_rx_chk_bit5 = 0x00000020, /* (FT)Rx - Frame type - Ether,
otherwise 802.3 */
desc_rx_ts_avail = 0x00000080, /* Time stamp available */
desc_rx_frame_type = 0x00000020, /* (FT)Rx - Frame type - Ether,
otherwise 802.3 */
desc_tx_ipv4_chk_error = 0x00010000, /* (IHE) Tx Ip header error */
desc_tx_timeout = 0x00004000, /* (JT)Tx - Transmit
jabber timeout */
desc_tx_frame_flushed = 0x00002000, /* (FF)Tx - DMA/MTL flushed
the frame due to SW flush */
desc_tx_pay_chk_error = 0x00001000, /* (PCE) Tx Payload checksum
Error */
desc_tx_lost_carrier = 0x00000800, /* (LC)Tx - carrier lost
during tramsmission */
desc_tx_no_carrier = 0x00000400, /* (NC)Tx - no carrier signal
from the tranceiver */
desc_tx_late_collision = 0x00000200, /* (LC)Tx - transmission aborted
due to collision */
desc_tx_exc_collisions = 0x00000100, /* (EC)Tx - transmission aborted
after 16 collisions */
desc_tx_vlan_frame = 0x00000080, /* (VF)Tx - VLAN-type frame */
desc_tx_coll_mask = 0x00000078, /* (CC)Tx - Collision count */
desc_tx_coll_shift = 3,
desc_tx_exc_deferral = 0x00000004, /* (ED)Tx - excessive deferral */
desc_tx_underflow = 0x00000002, /* (UF)Tx - late data arrival
from the memory */
desc_tx_deferred = 0x00000001, /* (DB)Tx - frame
transmision deferred */
/*
* This explains the RDES1/TDES1 bits layout
* ------------------------------------------------------
* RDES1/TDES1 | Control Bits | Byte Count Buf 2 | Byte Count Buf 1 |
* ------------------------------------------------------
*/
/* dma_descriptor_length */ /* length word of DMA descriptor */
desc_rx_dis_int_compl = 0x80000000, /* (Disable Rx int on completion) */
desc_rx_desc_end_of_ring = 0x00008000, /* (RER)End of descriptor ring */
desc_rx_desc_chain = 0x00004000, /* (RCH)Second buffer address
is chain address */
desc_size2_mask = 0x1FFF0000, /* (RBS2/TBS2) Buffer 2 size */
desc_size2_shift = 16,
desc_size1_mask = 0x00001FFF, /* (RBS1/TBS1) Buffer 1 size */
desc_size1_shift = 0,
/*
* This explains the RDES4 Extended Status bits layout
* --------------------------------------------------------
* RDES4 | Extended Status |
* --------------------------------------------------------
*/
desc_rx_ts_dropped = 0x00004000, /* PTP snapshot available */
desc_rx_ptp_ver = 0x00002000, /* When set indicates IEEE1584
Version 2 (else Ver1) */
desc_rx_ptp_frame_type = 0x00001000, /* PTP frame type Indicates PTP
sent over ethernet */
desc_rx_ptp_message_type = 0x00000F00, /* Message Type */
desc_rx_ptp_no = 0x00000000, /* 0000 => No PTP message rcvd */
desc_rx_ptp_sync = 0x00000100, /* 0001 => Sync (all clock
types) received */
desc_rx_ptp_follow_up = 0x00000200, /* 0010 => Follow_Up (all clock
types) received */
desc_rx_ptp_delay_req = 0x00000300, /* 0011 => Delay_Req (all clock
types) received */
desc_rx_ptp_delay_resp = 0x00000400, /* 0100 => Delay_Resp (all clock
types) received */
desc_rx_ptp_pdelay_req = 0x00000500, /* 0101 => Pdelay_Req (in P
to P tras clk) or Announce
in Ord and Bound clk */
desc_rx_ptp_pdelay_resp = 0x00000600, /* 0110 => Pdealy_Resp(in P to
P trans clk) or Management in
Ord and Bound clk */
desc_rx_ptp_pdelay_resp_fp = 0x00000700,/* 0111 => Pdelay_Resp_Follow_Up
(in P to P trans clk) or
Signaling in Ord and Bound
clk */
desc_rx_ptp_ipv6 = 0x00000080, /* Received Packet is in IPV6 */
desc_rx_ptp_ipv4 = 0x00000040, /* Received Packet is in IPV4 */
desc_rx_chk_sum_bypass = 0x00000020, /* When set indicates checksum
offload engine is bypassed */
desc_rx_ip_payload_error = 0x00000010, /* When set indicates 16bit IP
payload CS is in error */
desc_rx_ip_header_error = 0x00000008, /* When set indicates 16bit IPV4
hdr CS is err or IP datagram
version is not consistent
with Ethernet type value */
desc_rx_ip_payload_type = 0x00000007, /* Indicate the type of payload
encapsulated in IPdatagram
processed by COE (Rx) */
desc_rx_ip_payload_unknown = 0x00000000,/* Unknown or didnot process
IP payload */
desc_rx_ip_payload_udp = 0x00000001, /* UDP */
desc_rx_ip_payload_tcp = 0x00000002, /* TCP */
desc_rx_ip_payload_icmp = 0x00000003, /* ICMP */
};
/*
* dma_desc
* DMA Descriptor Structure
*
* The structure is common for both receive and transmit descriptors.
*/
struct dma_desc {
uint32_t status; /* Status */
uint32_t length; /* Buffer 1 and Buffer 2 length */
uint32_t buffer1; /* Network Buffer 1 pointer (DMA-able)*/
uint32_t data1; /* This holds virtual address of
buffer1, not used by DMA */
/* This data below is used only by driver */
uint32_t extstatus; /* Extended status of a Rx Descriptor */
uint32_t reserved1; /* Reserved word */
uint32_t timestamplow; /* Lower 32 bits of the 64
bit timestamp value */
uint32_t timestamphigh; /* Higher 32 bits of the 64
bit timestamp value */
};
/*
* syn_dp_gmac_tx_checksum_offload_tcp_pseudo
* The checksum offload engine is enabled to do complete checksum computation.
*/
static inline void syn_dp_gmac_tx_checksum_offload_tcp_pseudo(struct dma_desc *desc)
{
desc->status = ((desc->status & (~desc_tx_cis_mask)) | desc_tx_cis_tcp_pseudo_cs);
}
/*
* syn_dp_gmac_tx_desc_init_ring
* Initialize the tx descriptors for ring or chain mode operation.
*/
static inline void syn_dp_gmac_tx_desc_init_ring(struct dma_desc *desc, uint32_t no_of_desc)
{
struct dma_desc *last_desc = desc + no_of_desc - 1;
memset(desc, 0, no_of_desc * sizeof(struct dma_desc));
last_desc->status = desc_tx_desc_end_of_ring;
}
/*
* syn_dp_gmac_rx_desc_init_ring
* Initialize the rx descriptors for ring or chain mode operation.
*/
static inline void syn_dp_gmac_rx_desc_init_ring(struct dma_desc *desc, uint32_t no_of_desc)
{
struct dma_desc *last_desc = desc + no_of_desc - 1;
memset(desc, 0, no_of_desc * sizeof(struct dma_desc));
last_desc->length = desc_rx_desc_end_of_ring;
}
/*
* syn_dp_gmac_is_rx_desc_valid
* Checks whether the rx descriptor is valid.
*/
static inline bool syn_dp_gmac_is_rx_desc_valid(uint32_t status)
{
return (status & (desc_rx_error | desc_rx_first | desc_rx_last)) ==
(desc_rx_first | desc_rx_last);
}
/*
* syn_dp_gmac_get_rx_desc_frame_length
* Returns the byte length of received frame including CRC.
*/
static inline uint32_t syn_dp_gmac_get_rx_desc_frame_length(uint32_t status)
{
return (status & desc_rx_frame_length_mask) >> desc_rx_frame_length_shift;
}
/*
* syn_dp_gmac_is_desc_owned_by_dma
* Checks whether the descriptor is owned by DMA.
*/
static inline bool syn_dp_gmac_is_desc_owned_by_dma(struct dma_desc *desc)
{
return (desc->status & desc_own_by_dma) == desc_own_by_dma;
}
/*
* syn_dp_gmac_is_desc_empty
* Checks whether the descriptor is empty.
*/
static inline bool syn_dp_gmac_is_desc_empty(struct dma_desc *desc)
{
/*
* If length of both buffer1 & buffer2 are zero then desc is empty
*/
return (desc->length & desc_size1_mask) == 0;
}
/*
* syn_dp_gmac_get_tx_collision_count
* Gives the transmission collision count.
*/
static inline uint32_t syn_dp_gmac_get_tx_collision_count(uint32_t status)
{
return (status & desc_tx_coll_mask) >> desc_tx_coll_shift;
}
#endif /* __SYN_DESC__ */

View File

@@ -1,195 +0,0 @@
/*
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/module.h>
#include "nss_dp_dev.h"
#include "syn_data_plane.h"
#include "syn_reg.h"
/*
* syn_dp_setup_rx_desc_queue
* This sets up the receive Descriptor queue in ring or chain mode.
*/
static int syn_dp_setup_rx_desc_queue(struct net_device *netdev, struct device *dev, struct syn_dp_info *dev_info,
uint32_t no_of_desc, uint32_t desc_mode)
{
struct dma_desc *first_desc = NULL;
dma_addr_t dma_addr;
dev_info->rx_desc_count = 0;
BUG_ON(desc_mode != RINGMODE);
BUG_ON((no_of_desc & (no_of_desc - 1)) != 0);
netdev_dbg(netdev, "total size of memory required for Rx Descriptors in Ring Mode = %u\n", (uint32_t)((sizeof(struct dma_desc) * no_of_desc)));
first_desc = dma_alloc_coherent(dev, sizeof(struct dma_desc) * no_of_desc, &dma_addr, GFP_KERNEL);
if (first_desc == NULL) {
netdev_dbg(netdev, "Error in Rx Descriptor Memory allocation in Ring mode\n");
return -ENOMEM;
}
dev_info->rx_desc_count = no_of_desc;
dev_info->rx_desc = first_desc;
dev_info->rx_desc_dma = dma_addr;
netdev_dbg(netdev, "Rx Descriptors in Ring Mode: No. of descriptors = %d base = 0x%px dma = 0x%px\n",
no_of_desc, first_desc, (void *)dma_addr);
syn_dp_gmac_rx_desc_init_ring(dev_info->rx_desc, no_of_desc);
dev_info->rx_next = 0;
dev_info->rx_busy = 0;
dev_info->rx_next_desc = first_desc;
dev_info->rx_busy_desc = first_desc;
dev_info->busy_rx_desc = 0;
return 0;
}
/*
* syn_dp_setup_tx_desc_queue
* This sets up the transmit Descriptor queue in ring or chain mode.
*/
static int syn_dp_setup_tx_desc_queue(struct net_device *netdev, struct device *dev, struct syn_dp_info *dev_info,
uint32_t no_of_desc, uint32_t desc_mode)
{
struct dma_desc *first_desc = NULL;
dma_addr_t dma_addr;
dev_info->tx_desc_count = 0;
BUG_ON(desc_mode != RINGMODE);
BUG_ON((no_of_desc & (no_of_desc - 1)) != 0);
netdev_dbg(netdev, "Total size of memory required for Tx Descriptors in Ring Mode = %u\n", (uint32_t)((sizeof(struct dma_desc) * no_of_desc)));
first_desc = dma_alloc_coherent(dev, sizeof(struct dma_desc) * no_of_desc, &dma_addr, GFP_KERNEL);
if (first_desc == NULL) {
netdev_dbg(netdev, "Error in Tx Descriptors memory allocation\n");
return -ENOMEM;
}
dev_info->tx_desc_count = no_of_desc;
dev_info->tx_desc = first_desc;
dev_info->tx_desc_dma = dma_addr;
netdev_dbg(netdev, "Tx Descriptors in Ring Mode: No. of descriptors = %d base = 0x%px dma = 0x%px\n"
, no_of_desc, first_desc, (void *)dma_addr);
syn_dp_gmac_tx_desc_init_ring(dev_info->tx_desc, dev_info->tx_desc_count);
dev_info->tx_next = 0;
dev_info->tx_busy = 0;
dev_info->tx_next_desc = first_desc;
dev_info->tx_busy_desc = first_desc;
dev_info->busy_tx_desc = 0;
return 0;
}
/*
* syn_dp_setup_rings
* Perform initial setup of Tx/Rx rings
*/
int syn_dp_setup_rings(struct nss_dp_dev *gmac_dev, struct net_device *netdev, struct device *dev, struct syn_dp_info *dev_info)
{
struct nss_gmac_hal_dev *nghd = gmac_dev->gmac_hal_ctx;
int err;
err = syn_dp_setup_rx_desc_queue(netdev, dev, dev_info, SYN_DP_RX_DESC_SIZE, RINGMODE);
if (err) {
netdev_dbg(netdev, "nss_dp_gmac: rx descriptor setup unsuccessfull, err code: %d", err);
return NSS_DP_FAILURE;
}
err = syn_dp_setup_tx_desc_queue(netdev, dev, dev_info, SYN_DP_TX_DESC_SIZE, RINGMODE);
if (err) {
netdev_dbg(netdev, "nss_dp_gmac: tx descriptor setup unsuccessfull, err code: %d", err);
return NSS_DP_FAILURE;
}
syn_dp_rx_refill(gmac_dev, dev_info);
syn_init_tx_desc_base(nghd, dev_info->tx_desc_dma);
syn_init_rx_desc_base(nghd, dev_info->rx_desc_dma);
return NSS_DP_SUCCESS;
}
/*
* syn_dp_cleanup_rings
* Cleanup Synopsys GMAC rings
*/
int syn_dp_cleanup_rings(struct nss_dp_dev *gmac_dev, struct net_device *netdev, struct syn_dp_info *dev_info)
{
uint32_t rx_skb_index;
struct dma_desc *rxdesc;
uint32_t tx_skb_index;
struct dma_desc *txdesc;
int i;
struct sk_buff *skb;
/*
* Rx Ring cleaning
* We are assuming that the NAPI poll was already completed.
* No need of a lock here since the NAPI and interrupts have been disabled now
*/
rx_skb_index = dev_info->rx_busy;
for (i = 0; i < dev_info->busy_rx_desc; i++) {
rx_skb_index = rx_skb_index & (dev_info->rx_desc_count - 1);
rxdesc = dev_info->rx_busy_desc;
dma_unmap_single(&(gmac_dev->netdev->dev), rxdesc->buffer1,
SYN_DP_MINI_JUMBO_FRAME_MTU, DMA_FROM_DEVICE);
skb = dev_info->rx_skb_list[rx_skb_index];
if (unlikely(skb != NULL)) {
dev_kfree_skb(skb);
dev_info->rx_skb_list[rx_skb_index] = NULL;
}
}
dma_free_coherent(&(gmac_dev->netdev->dev), (sizeof(struct dma_desc) * SYN_DP_RX_DESC_SIZE),
dev_info->rx_desc, dev_info->rx_desc_dma);
/*
* Tx Ring cleaning
*/
spin_lock_bh(&dev_info->data_lock);
tx_skb_index = dev_info->tx_busy;
for (i = 0; i < dev_info->busy_tx_desc; i++) {
tx_skb_index = tx_skb_index & (dev_info->tx_desc_count - 1);
txdesc = dev_info->tx_busy_desc;
dma_unmap_single(&(gmac_dev->netdev->dev), txdesc->buffer1,
SYN_DP_MINI_JUMBO_FRAME_MTU, DMA_FROM_DEVICE);
skb = dev_info->tx_skb_list[tx_skb_index];
if (unlikely(skb != NULL)) {
dev_kfree_skb(skb);
dev_info->tx_skb_list[tx_skb_index] = NULL;
}
}
spin_unlock_bh(&dev_info->data_lock);
dma_free_coherent(&(gmac_dev->netdev->dev), (sizeof(struct dma_desc) * SYN_DP_TX_DESC_SIZE),
dev_info->tx_desc, dev_info->tx_desc_dma);
return 0;
}

View File

@@ -1,425 +0,0 @@
/*
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/debugfs.h>
#include "syn_data_plane.h"
#include "syn_reg.h"
/*
* syn_dp_reset_rx_qptr
* Reset the descriptor after Rx is over.
*/
static inline void syn_dp_reset_rx_qptr(struct nss_dp_dev *gmac_dev, struct syn_dp_info *dev_info)
{
/* Index of descriptor the DMA just completed.
* May be useful when data is spread over multiple buffers/descriptors
*/
uint32_t rxnext = dev_info->rx_busy;
struct dma_desc *rxdesc = dev_info->rx_busy_desc;
BUG_ON(rxdesc != (dev_info->rx_desc + rxnext));
dev_info->rx_busy = (rxnext + 1) & (dev_info->rx_desc_count - 1);
dev_info->rx_busy_desc = dev_info->rx_desc + dev_info->rx_busy;
dev_info->rx_skb_list[rxnext] = NULL;
rxdesc->status = 0;
rxdesc->length &= desc_rx_desc_end_of_ring;
rxdesc->buffer1 = 0;
rxdesc->data1 = 0;
rxdesc->reserved1 = 0;
/*
* This returns one descriptor to processor. So busy count will be decremented by one.
*/
dev_info->busy_rx_desc--;
}
/*
* syn_dp_set_rx_qptr
* Prepares the descriptor to receive packets.
*/
static inline int32_t syn_dp_set_rx_qptr(struct nss_dp_dev *gmac_dev, struct syn_dp_info *dev_info,
uint32_t Buffer1, uint32_t Length1, struct sk_buff *skb)
{
uint32_t rxnext = dev_info->rx_next;
struct dma_desc *rxdesc = dev_info->rx_next_desc;
uint32_t rx_skb_index = rxnext;
BUG_ON(dev_info->busy_rx_desc >= dev_info->rx_desc_count);
BUG_ON(rxdesc != (dev_info->rx_desc + rxnext));
BUG_ON(!syn_dp_gmac_is_desc_empty(rxdesc));
BUG_ON(syn_dp_gmac_is_desc_owned_by_dma(rxdesc));
if (Length1 > SYN_DP_MAX_DESC_BUFF) {
rxdesc->length |= (SYN_DP_MAX_DESC_BUFF << desc_size1_shift) & desc_size1_mask;
rxdesc->length |= ((Length1 - SYN_DP_MAX_DESC_BUFF) << desc_size2_shift) & desc_size2_mask;
} else {
rxdesc->length |= ((Length1 << desc_size1_shift) & desc_size1_mask);
}
rxdesc->buffer1 = Buffer1;
dev_info->rx_skb_list[rx_skb_index] = skb;
/* Program second buffer address if using two buffers. */
if (Length1 > SYN_DP_MAX_DESC_BUFF)
rxdesc->data1 = Buffer1 + SYN_DP_MAX_DESC_BUFF;
else
rxdesc->data1 = 0;
rxdesc->extstatus = 0;
rxdesc->timestamplow = 0;
rxdesc->timestamphigh = 0;
/*
* Ensure all write completed before setting own by dma bit so when gmac
* HW takeover this descriptor, all the fields are filled correctly
*/
wmb();
rxdesc->status = desc_own_by_dma;
dev_info->rx_next = (rxnext + 1) & (dev_info->rx_desc_count - 1);
dev_info->rx_next_desc = dev_info->rx_desc + dev_info->rx_next;
/*
* 1 descriptor will be given to HW. So busy count incremented by 1.
*/
dev_info->busy_rx_desc++;
return rxnext;
}
/*
* syn_dp_rx_refill
* Refill the RX descrptor
*/
void syn_dp_rx_refill(struct nss_dp_dev *gmac_dev, struct syn_dp_info *dev_info)
{
struct net_device *netdev = gmac_dev->netdev;
struct device *dev = &gmac_dev->pdev->dev;
int empty_count = SYN_DP_RX_DESC_SIZE - dev_info->busy_rx_desc;
dma_addr_t dma_addr;
int i;
struct sk_buff *skb;
for (i = 0; i < empty_count; i++) {
skb = __netdev_alloc_skb(netdev, SYN_DP_MINI_JUMBO_FRAME_MTU, GFP_ATOMIC);
if (unlikely(skb == NULL)) {
netdev_dbg(netdev, "Unable to allocate skb, will try next time\n");
break;
}
skb_reserve(skb, NET_IP_ALIGN);
dma_addr = dma_map_single(dev, skb->data, SYN_DP_MINI_JUMBO_FRAME_MTU, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(dev, dma_addr))) {
dev_kfree_skb(skb);
netdev_dbg(netdev, "DMA mapping failed for empty buffer\n");
break;
}
syn_dp_set_rx_qptr(gmac_dev, dev_info, dma_addr, SYN_DP_MINI_JUMBO_FRAME_MTU, skb);
}
}
/*
* syn_dp_rx()
* Process RX packets
*/
int syn_dp_rx(struct nss_dp_dev *gmac_dev, struct syn_dp_info *dev_info, int budget)
{
struct dma_desc *desc = NULL;
int frame_length, busy;
uint32_t status;
struct sk_buff *rx_skb;
uint32_t rx_skb_index;
if (!dev_info->busy_rx_desc) {
/* no desc are held by gmac dma, we are done */
return 0;
}
busy = dev_info->busy_rx_desc;
if (busy > budget)
busy = budget;
do {
desc = dev_info->rx_busy_desc;
if (syn_dp_gmac_is_desc_owned_by_dma(desc)) {
/* desc still hold by gmac dma, so we are done */
break;
}
status = desc->status;
rx_skb_index = dev_info->rx_busy;
rx_skb = dev_info->rx_skb_list[rx_skb_index];
dma_unmap_single(&(gmac_dev->netdev->dev), desc->buffer1,
SYN_DP_MINI_JUMBO_FRAME_MTU, DMA_FROM_DEVICE);
spin_lock_bh(&dev_info->stats_lock);
if (likely(syn_dp_gmac_is_rx_desc_valid(status))) {
/* We have a pkt to process get the frame length */
frame_length = syn_dp_gmac_get_rx_desc_frame_length(status);
/* Get rid of FCS: 4 */
frame_length -= ETH_FCS_LEN;
/* Valid packet, collect stats */
dev_info->stats.stats.rx_packets++;
dev_info->stats.stats.rx_bytes += frame_length;
/* type_trans and deliver to linux */
skb_put(rx_skb, frame_length);
rx_skb->protocol = eth_type_trans(rx_skb, gmac_dev->netdev);
rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
napi_gro_receive(&gmac_dev->napi, rx_skb);
} else {
dev_info->stats.stats.rx_errors++;
dev_kfree_skb(rx_skb);
if (status & (desc_rx_crc | desc_rx_collision |
desc_rx_overflow | desc_rx_dribbling |
desc_rx_length_error)) {
dev_info->stats.stats.mmc_rx_crc_errors += (status & desc_rx_crc) ? 1 : 0;
dev_info->stats.stats.rx_late_collision_errors += (status & desc_rx_collision) ? 1 : 0;
dev_info->stats.stats.mmc_rx_overflow_errors += (status & desc_rx_overflow) ? 1 : 0;
dev_info->stats.stats.rx_dribble_bit_errors += (status & desc_rx_dribbling) ? 1 : 0;
dev_info->stats.stats.rx_length_errors += (status & desc_rx_length_error) ? 1 : 0;
}
}
spin_unlock_bh(&dev_info->stats_lock);
syn_dp_reset_rx_qptr(gmac_dev, dev_info);
busy--;
} while (busy > 0);
return budget - busy;
}
/*
* syn_dp_reset_tx_qptr
* Reset the descriptor after Tx is over.
*/
static inline void syn_dp_reset_tx_qptr(struct nss_dp_dev *gmac_dev, struct syn_dp_info *dev_info)
{
uint32_t txover = dev_info->tx_busy;
struct dma_desc *txdesc = dev_info->tx_busy_desc;
BUG_ON(txdesc != (dev_info->tx_desc + txover));
dev_info->tx_busy = (txover + 1) & (dev_info->tx_desc_count - 1);
dev_info->tx_busy_desc = dev_info->tx_desc + dev_info->tx_busy;
dev_info->tx_skb_list[txover] = NULL;
txdesc->status &= desc_tx_desc_end_of_ring;
txdesc->length = 0;
txdesc->buffer1 = 0;
txdesc->data1 = 0;
txdesc->reserved1 = 0;
/*
* Busy tx descriptor is reduced by one as
* it will be handed over to Processor now.
*/
dev_info->busy_tx_desc--;
}
/*
* syn_dp_set_tx_qptr
* Populate the tx desc structure with the buffer address.
*/
static inline struct dma_desc *syn_dp_set_tx_qptr(struct nss_dp_dev *gmac_dev, struct syn_dp_info *dev_info,
uint32_t Buffer1, uint32_t Length1, struct sk_buff *skb, uint32_t offload_needed,
uint32_t tx_cntl, uint32_t set_dma)
{
uint32_t txnext = dev_info->tx_next;
struct dma_desc *txdesc = dev_info->tx_next_desc;
uint32_t tx_skb_index = txnext;
BUG_ON(dev_info->busy_tx_desc > dev_info->tx_desc_count);
BUG_ON(txdesc != (dev_info->tx_desc + txnext));
BUG_ON(!syn_dp_gmac_is_desc_empty(txdesc));
BUG_ON(syn_dp_gmac_is_desc_owned_by_dma(txdesc));
if (Length1 > SYN_DP_MAX_DESC_BUFF) {
txdesc->length |= (SYN_DP_MAX_DESC_BUFF << desc_size1_shift) & desc_size1_mask;
txdesc->length |=
((Length1 - SYN_DP_MAX_DESC_BUFF) << desc_size2_shift) & desc_size2_mask;
} else {
txdesc->length |= ((Length1 << desc_size1_shift) & desc_size1_mask);
}
txdesc->status |= tx_cntl;
txdesc->buffer1 = Buffer1;
dev_info->tx_skb_list[tx_skb_index] = skb;
/* Program second buffer address if using two buffers. */
if (Length1 > SYN_DP_MAX_DESC_BUFF)
txdesc->data1 = Buffer1 + SYN_DP_MAX_DESC_BUFF;
else
txdesc->data1 = 0;
if (likely(offload_needed)) {
syn_dp_gmac_tx_checksum_offload_tcp_pseudo(txdesc);
}
/*
* Ensure all write completed before setting own by dma bit so when gmac
* HW takeover this descriptor, all the fields are filled correctly
*/
wmb();
txdesc->status |= set_dma;
dev_info->tx_next = (txnext + 1) & (dev_info->tx_desc_count - 1);
dev_info->tx_next_desc = dev_info->tx_desc + dev_info->tx_next;
return txdesc;
}
/*
* syn_dp_tx_queue_desc
* Queue TX descriptor to the TX ring
*/
static void syn_dp_tx_desc_queue(struct nss_dp_dev *gmac_dev, struct syn_dp_info *dev_info, struct sk_buff *skb, dma_addr_t dma_addr)
{
unsigned int len = skb->len;
spin_lock_bh(&dev_info->data_lock);
syn_dp_set_tx_qptr(gmac_dev, dev_info, dma_addr, len, skb, (skb->ip_summed == CHECKSUM_PARTIAL),
(desc_tx_last | desc_tx_first | desc_tx_int_enable), desc_own_by_dma);
dev_info->busy_tx_desc++;
spin_unlock_bh(&dev_info->data_lock);
}
/*
* syn_dp_process_tx_complete
* Xmit complete, clear descriptor and free the skb
*/
void syn_dp_process_tx_complete(struct nss_dp_dev *gmac_dev, struct syn_dp_info *dev_info)
{
int busy, len;
uint32_t status;
struct dma_desc *desc = NULL;
struct sk_buff *skb;
uint32_t tx_skb_index;
spin_lock_bh(&dev_info->data_lock);
busy = dev_info->busy_tx_desc;
if (!busy) {
/* No desc are hold by gmac dma, we are done */
spin_unlock_bh(&dev_info->data_lock);
return;
}
do {
desc = dev_info->tx_busy_desc;
if (syn_dp_gmac_is_desc_owned_by_dma(desc)) {
/* desc still hold by gmac dma, so we are done */
break;
}
len = (desc->length & desc_size1_mask) >> desc_size1_shift;
dma_unmap_single(&(gmac_dev->pdev->dev), desc->buffer1, len, DMA_TO_DEVICE);
status = desc->status;
if (status & desc_tx_last) {
/* TX is done for this whole skb, we can free it */
/* Get the skb from the tx skb pool */
tx_skb_index = dev_info->tx_busy;
skb = dev_info->tx_skb_list[tx_skb_index];
BUG_ON(!skb);
dev_kfree_skb(skb);
spin_lock_bh(&dev_info->stats_lock);
if (unlikely(status & desc_tx_error)) {
/* Some error happen, collect statistics */
dev_info->stats.stats.tx_errors++;
dev_info->stats.stats.tx_jabber_timeout_errors += (status & desc_tx_timeout) ? 1 : 0;
dev_info->stats.stats.tx_frame_flushed_errors += (status & desc_tx_frame_flushed) ? 1 : 0;
dev_info->stats.stats.tx_loss_of_carrier_errors += (status & desc_tx_lost_carrier) ? 1 : 0;
dev_info->stats.stats.tx_no_carrier_errors += (status & desc_tx_no_carrier) ? 1 : 0;
dev_info->stats.stats.tx_late_collision_errors += (status & desc_tx_late_collision) ? 1 : 0;
dev_info->stats.stats.tx_excessive_collision_errors += (status & desc_tx_exc_collisions) ? 1 : 0;
dev_info->stats.stats.tx_excessive_deferral_errors += (status & desc_tx_exc_deferral) ? 1 : 0;
dev_info->stats.stats.tx_underflow_errors += (status & desc_tx_underflow) ? 1 : 0;
dev_info->stats.stats.tx_ip_header_errors += (status & desc_tx_ipv4_chk_error) ? 1 : 0;
dev_info->stats.stats.tx_ip_payload_errors += (status & desc_tx_pay_chk_error) ? 1 : 0;
} else {
/* No error, recored tx pkts/bytes and
* collision
*/
dev_info->stats.stats.tx_packets++;
dev_info->stats.stats.tx_collisions += syn_dp_gmac_get_tx_collision_count(status);
dev_info->stats.stats.tx_bytes += len;
}
spin_unlock_bh(&dev_info->stats_lock);
}
syn_dp_reset_tx_qptr(gmac_dev, dev_info);
busy--;
} while (busy > 0);
spin_unlock_bh(&dev_info->data_lock);
}
/*
* syn_dp_tx
* TX routine for Synopsys GMAC
*/
int syn_dp_tx(struct nss_dp_dev *gmac_dev, struct syn_dp_info *dev_info, struct sk_buff *skb)
{
struct net_device *netdev = gmac_dev->netdev;
struct nss_gmac_hal_dev *nghd = gmac_dev->gmac_hal_ctx;
unsigned len = skb->len;
dma_addr_t dma_addr;
/*
* If we don't have enough tx descriptor for this pkt, return busy.
*/
if ((SYN_DP_TX_DESC_SIZE - dev_info->busy_tx_desc) < 1) {
netdev_dbg(netdev, "Not enough descriptors available");
return -1;
}
dma_addr = dma_map_single(&gmac_dev->pdev->dev, skb->data, len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(&gmac_dev->pdev->dev, dma_addr))) {
netdev_dbg(netdev, "DMA mapping failed for empty buffer\n");
return -1;
}
/*
* Queue packet to the GMAC rings
*/
syn_dp_tx_desc_queue(gmac_dev, dev_info, skb, dma_addr);
syn_resume_dma_tx(nghd);
return 0;
}

View File

@@ -1,132 +0,0 @@
/*
**************************************************************************
* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
* OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
**************************************************************************
*/
#ifndef __NSS_DP_DEV_H__
#define __NSS_DP_DEV_H__
#include <linux/version.h>
#include <linux/ethtool.h>
#include <linux/etherdevice.h>
#include <linux/netdevice.h>
#include <linux/platform_device.h>
#include <linux/if_vlan.h>
#include <linux/switch.h>
#include "nss_dp_api_if.h"
#include "nss_dp_hal_if.h"
#define NSS_DP_ACL_DEV_ID 0
struct nss_dp_global_ctx;
/*
* nss data plane device structure
*/
struct nss_dp_dev {
uint32_t macid; /* Sequence# of Mac on the platform */
uint32_t vsi; /* vsi number */
unsigned long flags; /* Status flags */
unsigned long drv_flags; /* Driver specific feature flags */
/* Phy related stuff */
struct phy_device *phydev; /* Phy device */
struct mii_bus *miibus; /* MII bus */
uint32_t phy_mii_type; /* RGMII/SGMII/QSGMII */
uint32_t phy_mdio_addr; /* Mdio address */
bool link_poll; /* Link polling enable? */
uint32_t forced_speed; /* Forced speed? */
uint32_t forced_duplex; /* Forced duplex? */
uint32_t link_state; /* Current link state */
uint32_t pause; /* Current flow control settings */
struct net_device *netdev;
struct platform_device *pdev;
struct napi_struct napi;
struct nss_dp_data_plane_ctx *dpc;
/* context when NSS owns GMACs */
struct nss_dp_data_plane_ops *data_plane_ops;
/* ops for each data plane */
struct nss_dp_global_ctx *ctx; /* Global NSS DP context */
struct nss_gmac_hal_dev *gmac_hal_ctx; /* context of gmac hal */
struct nss_gmac_hal_ops *gmac_hal_ops; /* GMAC HAL OPS */
/* switchdev related attributes */
#ifdef CONFIG_NET_SWITCHDEV
u8 stp_state; /* STP state of this physical port */
unsigned long brport_flags; /* bridge port flags */
#endif
};
/*
* nss data plane global context
*/
struct nss_dp_global_ctx {
struct nss_dp_dev *nss_dp[NSS_DP_HAL_MAX_PORTS];
struct nss_gmac_hal_ops *gmac_hal_ops[GMAC_HAL_TYPE_MAX];
/* GMAC HAL OPS */
bool common_init_done; /* Flag to hold common init state */
uint8_t slowproto_acl_bm; /* Port bitmap to allow slow protocol packets */
};
/* Global data */
extern struct nss_dp_global_ctx dp_global_ctx;
extern struct nss_dp_data_plane_ctx dp_global_data_plane_ctx[NSS_DP_HAL_MAX_PORTS];
/*
* nss data plane link state
*/
enum nss_dp_link_state {
__NSS_DP_LINK_UP, /* Indicate link is UP */
__NSS_DP_LINK_DOWN /* Indicate link is down */
};
/*
* nss data plane status
*/
enum nss_dp_state {
__NSS_DP_UP, /* set to indicate the interface is UP */
__NSS_DP_RXCSUM, /* Rx checksum enabled */
__NSS_DP_AUTONEG, /* Autonegotiation Enabled */
__NSS_DP_LINKPOLL, /* Poll link status */
};
/*
* nss data plane private flags
*/
enum nss_dp_priv_flags {
__NSS_DP_PRIV_FLAG_INIT_DONE,
__NSS_DP_PRIV_FLAG_IRQ_REQUESTED,
__NSS_DP_PRIV_FLAG_MAX,
};
#define NSS_DP_PRIV_FLAG(x) (1 << __NSS_DP_PRIV_FLAG_ ## x)
/*
* nss_dp_set_ethtool_ops()
*/
void nss_dp_set_ethtool_ops(struct net_device *netdev);
/*
* nss data plane switchdev helpers
*/
#ifdef CONFIG_NET_SWITCHDEV
void nss_dp_switchdev_setup(struct net_device *dev);
bool nss_dp_is_phy_dev(struct net_device *dev);
#endif
#endif /* __NSS_DP_DEV_H__ */

View File

@@ -1,192 +0,0 @@
/*
**************************************************************************
* Copyright (c) 2016-2017, 2019-2020 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
* OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
**************************************************************************
*/
#include <linux/version.h>
#include "nss_dp_hal.h"
/*
* nss_dp_reset_netdev_features()
* Resets the netdev features
*/
static inline void nss_dp_reset_netdev_features(struct net_device *netdev)
{
netdev->features = 0;
netdev->hw_features = 0;
netdev->vlan_features = 0;
netdev->wanted_features = 0;
}
/*
* nss_dp_receive()
* Called by overlay drivers to deliver packets to nss-dp
*/
void nss_dp_receive(struct net_device *netdev, struct sk_buff *skb,
struct napi_struct *napi)
{
struct nss_dp_dev *dp_dev = netdev_priv(netdev);
skb->dev = netdev;
skb->protocol = eth_type_trans(skb, netdev);
netdev_dbg(netdev, "Rx on port%d, packet len %d, CSUM %d\n",
dp_dev->macid, skb->len, skb->ip_summed);
#ifdef CONFIG_NET_SWITCHDEV
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0))
skb->offload_fwd_mark = netdev->offload_fwd_mark;
#else
/*
* TODO: Implement ndo_get_devlink_port()
*/
skb->offload_fwd_mark = 0;
#endif
#endif
napi_gro_receive(napi, skb);
}
EXPORT_SYMBOL(nss_dp_receive);
/*
* nss_dp_is_in_open_state()
* Return if a data plane is opened or not
*/
bool nss_dp_is_in_open_state(struct net_device *netdev)
{
struct nss_dp_dev *dp_dev = (struct nss_dp_dev *)netdev_priv(netdev);
if (test_bit(__NSS_DP_UP, &dp_dev->flags))
return true;
return false;
}
EXPORT_SYMBOL(nss_dp_is_in_open_state);
/*
* nss_dp_override_data_plane()
* API to allow overlay drivers to override the data plane
*/
int nss_dp_override_data_plane(struct net_device *netdev,
struct nss_dp_data_plane_ops *dp_ops,
struct nss_dp_data_plane_ctx *dpc)
{
struct nss_dp_dev *dp_dev = (struct nss_dp_dev *)netdev_priv(netdev);
if (!dp_ops->open || !dp_ops->close || !dp_ops->link_state
|| !dp_ops->mac_addr || !dp_ops->change_mtu || !dp_ops->xmit
|| !dp_ops->set_features || !dp_ops->pause_on_off || !dp_ops->deinit) {
netdev_dbg(netdev, "All the op functions must be present, reject this registeration\n");
return NSS_DP_FAILURE;
}
/*
* If this data plane is up, close the netdev to force TX/RX stop, and
* also reset the features
*/
if (test_bit(__NSS_DP_UP, &dp_dev->flags)) {
netdev->netdev_ops->ndo_stop(netdev);
nss_dp_reset_netdev_features(netdev);
}
/*
* Free up the resources used by the data plane
*/
if (dp_dev->drv_flags & NSS_DP_PRIV_FLAG(INIT_DONE)) {
if (dp_dev->data_plane_ops->deinit(dpc)) {
netdev_dbg(netdev, "Data plane init failed\n");
return -ENOMEM;
}
dp_dev->drv_flags &= ~NSS_DP_PRIV_FLAG(INIT_DONE);
}
/*
* Override the data_plane_ctx, data_plane_ops
*/
dp_dev->dpc = dpc;
dp_dev->data_plane_ops = dp_ops;
return NSS_DP_SUCCESS;
}
EXPORT_SYMBOL(nss_dp_override_data_plane);
/*
* nss_dp_start_data_plane()
* Data plane to inform netdev it is ready to start
*/
void nss_dp_start_data_plane(struct net_device *netdev,
struct nss_dp_data_plane_ctx *dpc)
{
struct nss_dp_dev *dp_dev = (struct nss_dp_dev *)netdev_priv(netdev);
if (test_bit(__NSS_DP_UP, &dp_dev->flags)) {
netdev_dbg(netdev, "This netdev already up, something is wrong\n");
return;
}
if (dp_dev->dpc != dpc) {
netdev_dbg(netdev, "Cookie %px does not match, reject\n", dpc);
return;
}
netdev->netdev_ops->ndo_open(dp_dev->netdev);
}
EXPORT_SYMBOL(nss_dp_start_data_plane);
/*
* nss_dp_restore_data_plane()
* Called by overlay drivers to detach itself from nss-dp
*/
void nss_dp_restore_data_plane(struct net_device *netdev)
{
struct nss_dp_dev *dp_dev = (struct nss_dp_dev *)netdev_priv(netdev);
/*
* If this data plane is up, close the netdev to force TX/RX stop, and
* also reset the features
*/
if (test_bit(__NSS_DP_UP, &dp_dev->flags)) {
netdev->netdev_ops->ndo_stop(netdev);
nss_dp_reset_netdev_features(netdev);
}
dp_dev->data_plane_ops = nss_dp_hal_get_data_plane_ops();
dp_dev->dpc = &dp_global_data_plane_ctx[dp_dev->macid - NSS_DP_START_IFNUM];
/*
* TODO: Re-initialize EDMA dataplane
*/
}
EXPORT_SYMBOL(nss_dp_restore_data_plane);
/*
* nss_dp_get_netdev_by_nss_if_num()
* return the net device of the corrsponding id if exist
*/
struct net_device *nss_dp_get_netdev_by_nss_if_num(int if_num)
{
struct nss_dp_dev *dp_dev;
if ((if_num > NSS_DP_HAL_MAX_PORTS) || (if_num < NSS_DP_START_IFNUM)) {
pr_err("Invalid if_num %d\n", if_num);
return NULL;
}
dp_dev = dp_global_ctx.nss_dp[if_num - NSS_DP_START_IFNUM];
if (!dp_dev)
return NULL;
return dp_dev->netdev;
}
EXPORT_SYMBOL(nss_dp_get_netdev_by_nss_if_num);

View File

@@ -1,378 +0,0 @@
/*
**************************************************************************
* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
* OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
**************************************************************************
*/
#include <linux/version.h>
#include <linux/ethtool.h>
#include <linux/phy.h>
#include <linux/mii.h>
#include "nss_dp_dev.h"
#include "fal/fal_port_ctrl.h"
/*
* nss_dp_get_ethtool_stats()
*/
static void nss_dp_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, uint64_t *data)
{
struct nss_dp_dev *dp_priv = (struct nss_dp_dev *)netdev_priv(netdev);
dp_priv->gmac_hal_ops->getethtoolstats(dp_priv->gmac_hal_ctx, data);
}
/*
* nss_dp_get_strset_count()
*/
static int32_t nss_dp_get_strset_count(struct net_device *netdev, int32_t sset)
{
struct nss_dp_dev *dp_priv = (struct nss_dp_dev *)netdev_priv(netdev);
return dp_priv->gmac_hal_ops->getssetcount(dp_priv->gmac_hal_ctx, sset);
}
/*
* nss_dp_get_strings()
*/
static void nss_dp_get_strings(struct net_device *netdev, uint32_t stringset,
uint8_t *data)
{
struct nss_dp_dev *dp_priv = (struct nss_dp_dev *)netdev_priv(netdev);
dp_priv->gmac_hal_ops->getstrings(dp_priv->gmac_hal_ctx, stringset,
data);
}
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0))
/*
* nss_dp_get_settings()
*/
static int32_t nss_dp_get_settings(struct net_device *netdev,
struct ethtool_cmd *cmd)
{
struct nss_dp_dev *dp_priv = (struct nss_dp_dev *)netdev_priv(netdev);
/*
* If there is a PHY attached, get the status from Kernel helper
*/
if (dp_priv->phydev)
return phy_ethtool_gset(dp_priv->phydev, cmd);
return -EIO;
}
/*
* nss_dp_set_settings()
*/
static int32_t nss_dp_set_settings(struct net_device *netdev,
struct ethtool_cmd *cmd)
{
struct nss_dp_dev *dp_priv = (struct nss_dp_dev *)netdev_priv(netdev);
if (!dp_priv->phydev)
return -EIO;
return phy_ethtool_sset(dp_priv->phydev, cmd);
}
#endif
/*
* nss_dp_get_pauseparam()
*/
static void nss_dp_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct nss_dp_dev *dp_priv = (struct nss_dp_dev *)netdev_priv(netdev);
pause->rx_pause = dp_priv->pause & FLOW_CTRL_RX ? 1 : 0;
pause->tx_pause = dp_priv->pause & FLOW_CTRL_TX ? 1 : 0;
pause->autoneg = AUTONEG_ENABLE;
}
/*
* nss_dp_set_pauseparam()
*/
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0))
static int32_t nss_dp_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct nss_dp_dev *dp_priv = (struct nss_dp_dev *)netdev_priv(netdev);
/* set flow control settings */
dp_priv->pause = 0;
if (pause->rx_pause)
dp_priv->pause |= FLOW_CTRL_RX;
if (pause->tx_pause)
dp_priv->pause |= FLOW_CTRL_TX;
if (!dp_priv->phydev)
return 0;
/* Update flow control advertisment */
dp_priv->phydev->advertising &=
~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
if (pause->rx_pause)
dp_priv->phydev->advertising |=
(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
if (pause->tx_pause)
dp_priv->phydev->advertising |= ADVERTISED_Asym_Pause;
genphy_config_aneg(dp_priv->phydev);
return 0;
}
#else
static int32_t nss_dp_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct nss_dp_dev *dp_priv = (struct nss_dp_dev *)netdev_priv(netdev);
__ETHTOOL_DECLARE_LINK_MODE_MASK(advertising) = { 0, };
/* set flow control settings */
dp_priv->pause = 0;
if (pause->rx_pause)
dp_priv->pause |= FLOW_CTRL_RX;
if (pause->tx_pause)
dp_priv->pause |= FLOW_CTRL_TX;
if (!dp_priv->phydev)
return 0;
/* Update flow control advertisment */
linkmode_copy(advertising, dp_priv->phydev->advertising);
linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertising);
linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertising);
if (pause->rx_pause) {
linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertising);
linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertising);
}
if (pause->tx_pause)
linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertising);
linkmode_copy(dp_priv->phydev->advertising, advertising);
genphy_config_aneg(dp_priv->phydev);
return 0;
}
#endif
/*
* nss_dp_fal_to_ethtool_linkmode_xlate()
* Translate linkmode from FAL type to ethtool type.
*/
static inline void nss_dp_fal_to_ethtool_linkmode_xlate(uint32_t *xlate_to, uint32_t *xlate_from)
{
uint32_t pos;
while (*xlate_from) {
pos = ffs(*xlate_from);
switch (1 << (pos - 1)) {
case FAL_PHY_EEE_10BASE_T:
*xlate_to |= SUPPORTED_10baseT_Full;
break;
case FAL_PHY_EEE_100BASE_T:
*xlate_to |= SUPPORTED_100baseT_Full;
break;
case FAL_PHY_EEE_1000BASE_T:
*xlate_to |= SUPPORTED_1000baseT_Full;
break;
case FAL_PHY_EEE_2500BASE_T:
*xlate_to |= SUPPORTED_2500baseX_Full;
break;
case FAL_PHY_EEE_5000BASE_T:
/*
* Ethtool does not support enumeration for 5G.
*/
break;
case FAL_PHY_EEE_10000BASE_T:
*xlate_to |= SUPPORTED_10000baseT_Full;
break;
}
*xlate_from &= (~(1 << (pos - 1)));
}
}
/*
* nss_dp_get_eee()
* Get EEE settings.
*/
static int32_t nss_dp_get_eee(struct net_device *netdev, struct ethtool_eee *eee)
{
struct nss_dp_dev *dp_priv = (struct nss_dp_dev *)netdev_priv(netdev);
fal_port_eee_cfg_t port_eee_cfg;
uint32_t port_id;
sw_error_t ret;
memset(&port_eee_cfg, 0, sizeof(fal_port_eee_cfg_t));
port_id = dp_priv->macid;
ret = fal_port_interface_eee_cfg_get(NSS_DP_ACL_DEV_ID, port_id, &port_eee_cfg);
if (ret != SW_OK) {
netdev_dbg(netdev, "Could not fetch EEE settings err = %d\n", ret);
return -EIO;
}
/*
* Translate the FAL linkmode types to ethtool linkmode types.
*/
nss_dp_fal_to_ethtool_linkmode_xlate(&eee->supported, &port_eee_cfg.capability);
nss_dp_fal_to_ethtool_linkmode_xlate(&eee->advertised, &port_eee_cfg.advertisement);
nss_dp_fal_to_ethtool_linkmode_xlate(&eee->lp_advertised, &port_eee_cfg.link_partner_advertisement);
eee->eee_enabled = port_eee_cfg.enable;
eee->eee_active = port_eee_cfg.eee_status;
eee->tx_lpi_enabled = port_eee_cfg.lpi_tx_enable;
eee->tx_lpi_timer = port_eee_cfg.lpi_sleep_timer;
return 0;
}
/*
* nss_dp_set_eee()
* Set EEE settings.
*/
static int32_t nss_dp_set_eee(struct net_device *netdev, struct ethtool_eee *eee)
{
struct nss_dp_dev *dp_priv = (struct nss_dp_dev *)netdev_priv(netdev);
fal_port_eee_cfg_t port_eee_cfg, port_eee_cur_cfg;
uint32_t port_id, pos;
sw_error_t ret;
memset(&port_eee_cfg, 0, sizeof(fal_port_eee_cfg_t));
memset(&port_eee_cur_cfg, 0, sizeof(fal_port_eee_cfg_t));
port_id = dp_priv->macid;
/*
* Get current EEE configuration.
*/
ret = fal_port_interface_eee_cfg_get(NSS_DP_ACL_DEV_ID, port_id, &port_eee_cur_cfg);
if (ret != SW_OK) {
netdev_dbg(netdev, "Could not fetch EEE settings err = %d\n", ret);
return -EIO;
}
port_eee_cfg.enable = eee->eee_enabled;
/*
* Translate the ethtool speed types to FAL speed types.
*/
while (eee->advertised) {
pos = ffs(eee->advertised);
switch (1 << (pos - 1)) {
case ADVERTISED_10baseT_Full:
if (port_eee_cur_cfg.capability & FAL_PHY_EEE_10BASE_T) {
port_eee_cfg.advertisement |= FAL_PHY_EEE_10BASE_T;
break;
}
netdev_dbg(netdev, "Advertised value 10baseT_Full is not supported\n");
return -EIO;
case ADVERTISED_100baseT_Full:
if (port_eee_cur_cfg.capability & FAL_PHY_EEE_100BASE_T) {
port_eee_cfg.advertisement |= FAL_PHY_EEE_100BASE_T;
break;
}
netdev_dbg(netdev, "Advertised value 100baseT_Full is not supported\n");
return -EIO;
case ADVERTISED_1000baseT_Full:
if (port_eee_cur_cfg.capability & FAL_PHY_EEE_1000BASE_T) {
port_eee_cfg.advertisement |= FAL_PHY_EEE_1000BASE_T;
break;
}
netdev_dbg(netdev, "Advertised value 1000baseT_Full is not supported\n");
return -EIO;
case ADVERTISED_2500baseX_Full:
if (port_eee_cur_cfg.capability & FAL_PHY_EEE_2500BASE_T) {
port_eee_cfg.advertisement |= FAL_PHY_EEE_2500BASE_T;
break;
}
netdev_dbg(netdev, "Advertised value 2500baseX_Full is not supported\n");
return -EIO;
case ADVERTISED_10000baseT_Full:
if (port_eee_cur_cfg.capability & FAL_PHY_EEE_10000BASE_T) {
port_eee_cfg.advertisement |= FAL_PHY_EEE_10000BASE_T;
break;
}
netdev_dbg(netdev, "Advertised value 10000baseT_Full is not supported\n");
return -EIO;
default:
netdev_dbg(netdev, "Advertised value is not supported\n");
return -EIO;
}
eee->advertised &= (~(1 << (pos - 1)));
}
port_eee_cfg.lpi_tx_enable = eee->tx_lpi_enabled;
port_eee_cfg.lpi_sleep_timer = eee->tx_lpi_timer;
ret = fal_port_interface_eee_cfg_set(NSS_DP_ACL_DEV_ID, port_id, &port_eee_cfg);
if (ret != SW_OK) {
netdev_dbg(netdev, "Could not configure EEE err = %d\n", ret);
return -EIO;
}
return 0;
}
/*
* Ethtool operations
*/
struct ethtool_ops nss_dp_ethtool_ops = {
.get_strings = &nss_dp_get_strings,
.get_sset_count = &nss_dp_get_strset_count,
.get_ethtool_stats = &nss_dp_get_ethtool_stats,
.get_link = &ethtool_op_get_link,
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0))
.get_settings = &nss_dp_get_settings,
.set_settings = &nss_dp_set_settings,
#else
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
#endif
.get_pauseparam = &nss_dp_get_pauseparam,
.set_pauseparam = &nss_dp_set_pauseparam,
.get_eee = &nss_dp_get_eee,
.set_eee = &nss_dp_set_eee,
};
/*
* nss_dp_set_ethtool_ops()
* Set ethtool operations
*/
void nss_dp_set_ethtool_ops(struct net_device *netdev)
{
netdev->ethtool_ops = &nss_dp_ethtool_ops;
}

View File

@@ -1,830 +0,0 @@
/*
**************************************************************************
* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
* OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
**************************************************************************
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/version.h>
#include <linux/of.h>
#include <linux/of_net.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/of_address.h>
#include <linux/of_mdio.h>
#include <linux/phy.h>
#if defined(NSS_DP_PPE_SUPPORT)
#include <ref/ref_vsi.h>
#endif
#include <net/switchdev.h>
#include "nss_dp_hal.h"
/*
* Number of TX/RX queue supported is based on the number of host CPU
*/
#define NSS_DP_NETDEV_TX_QUEUE_NUM NSS_DP_HAL_CPU_NUM
#define NSS_DP_NETDEV_RX_QUEUE_NUM NSS_DP_HAL_CPU_NUM
/* ipq40xx_mdio_data */
struct ipq40xx_mdio_data {
struct mii_bus *mii_bus;
void __iomem *membase;
int phy_irq[PHY_MAX_ADDR];
};
/* Global data */
struct nss_dp_global_ctx dp_global_ctx;
struct nss_dp_data_plane_ctx dp_global_data_plane_ctx[NSS_DP_HAL_MAX_PORTS];
/*
* nss_dp_do_ioctl()
*/
static int32_t nss_dp_do_ioctl(struct net_device *netdev, struct ifreq *ifr,
int32_t cmd)
{
int ret = -EINVAL;
struct nss_dp_dev *dp_priv;
if (!netdev || !ifr)
return ret;
dp_priv = (struct nss_dp_dev *)netdev_priv(netdev);
if (dp_priv->phydev)
return phy_mii_ioctl(dp_priv->phydev, ifr, cmd);
return ret;
}
/*
* nss_dp_change_mtu()
*/
static int32_t nss_dp_change_mtu(struct net_device *netdev, int32_t newmtu)
{
int ret = -EINVAL;
struct nss_dp_dev *dp_priv;
if (!netdev)
return ret;
dp_priv = (struct nss_dp_dev *)netdev_priv(netdev);
/* Let the underlying data plane decide if the newmtu is applicable */
if (dp_priv->data_plane_ops->change_mtu(dp_priv->dpc, newmtu)) {
netdev_dbg(netdev, "Data plane change mtu failed\n");
return ret;
}
netdev->mtu = newmtu;
return 0;
}
/*
* nss_dp_set_mac_address()
*/
static int32_t nss_dp_set_mac_address(struct net_device *netdev, void *macaddr)
{
struct nss_dp_dev *dp_priv;
struct sockaddr *addr = (struct sockaddr *)macaddr;
int ret = 0;
if (!netdev)
return -EINVAL;
dp_priv = (struct nss_dp_dev *)netdev_priv(netdev);
netdev_dbg(netdev, "AddrFamily: %d, %0x:%0x:%0x:%0x:%0x:%0x\n",
addr->sa_family, addr->sa_data[0], addr->sa_data[1],
addr->sa_data[2], addr->sa_data[3], addr->sa_data[4],
addr->sa_data[5]);
ret = eth_prepare_mac_addr_change(netdev, macaddr);
if (ret)
return ret;
if (dp_priv->data_plane_ops->mac_addr(dp_priv->dpc, macaddr)) {
netdev_dbg(netdev, "Data plane set MAC address failed\n");
return -EAGAIN;
}
eth_commit_mac_addr_change(netdev, macaddr);
dp_priv->gmac_hal_ops->setmacaddr(dp_priv->gmac_hal_ctx,
(uint8_t *)addr->sa_data);
return 0;
}
/*
* nss_dp_get_stats64()
*/
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0))
static struct rtnl_link_stats64 *nss_dp_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
struct nss_dp_dev *dp_priv;
if (!netdev)
return stats;
dp_priv = (struct nss_dp_dev *)netdev_priv(netdev);
dp_priv->gmac_hal_ops->getndostats(dp_priv->gmac_hal_ctx, stats);
return stats;
}
#else
static void nss_dp_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
struct nss_dp_dev *dp_priv;
if (!netdev)
return;
dp_priv = (struct nss_dp_dev *)netdev_priv(netdev);
dp_priv->gmac_hal_ops->getndostats(dp_priv->gmac_hal_ctx, stats);
}
#endif
/*
* nss_dp_xmit()
*/
static netdev_tx_t nss_dp_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct nss_dp_dev *dp_priv;
if (!skb || !netdev)
return NETDEV_TX_OK;
dp_priv = (struct nss_dp_dev *)netdev_priv(netdev);
netdev_dbg(netdev, "Tx packet, len %d\n", skb->len);
return dp_priv->data_plane_ops->xmit(dp_priv->dpc, skb);
}
/*
* nss_dp_close()
*/
static int nss_dp_close(struct net_device *netdev)
{
struct nss_dp_dev *dp_priv = (struct nss_dp_dev *)netdev_priv(netdev);
if (!dp_priv)
return -EINVAL;
netif_stop_queue(netdev);
netif_carrier_off(netdev);
/* Notify data plane link is going down */
if (dp_priv->data_plane_ops->link_state(dp_priv->dpc, 0)) {
netdev_dbg(netdev, "Data plane set link failed\n");
return -EAGAIN;
}
if (dp_priv->phydev)
phy_stop(dp_priv->phydev);
dp_priv->link_state = __NSS_DP_LINK_DOWN;
#if defined(NSS_DP_PPE_SUPPORT)
/* Notify data plane to unassign VSI */
if (dp_priv->data_plane_ops->vsi_unassign(dp_priv->dpc, dp_priv->vsi)) {
netdev_dbg(netdev, "Data plane vsi unassign failed\n");
return -EAGAIN;
}
#endif
/*
* Notify data plane to close
*/
if (dp_priv->data_plane_ops->close(dp_priv->dpc)) {
netdev_dbg(netdev, "Data plane close failed\n");
return -EAGAIN;
}
clear_bit(__NSS_DP_UP, &dp_priv->flags);
return 0;
}
/*
* nss_dp_open()
*/
static int nss_dp_open(struct net_device *netdev)
{
struct nss_dp_dev *dp_priv = (struct nss_dp_dev *)netdev_priv(netdev);
if (!dp_priv)
return -EINVAL;
netif_carrier_off(netdev);
/*
* Call data plane init if it has not been done yet
*/
if (!(dp_priv->drv_flags & NSS_DP_PRIV_FLAG(INIT_DONE))) {
if (dp_priv->data_plane_ops->init(dp_priv->dpc)) {
netdev_dbg(netdev, "Data plane init failed\n");
return -ENOMEM;
}
dp_priv->drv_flags |= NSS_DP_PRIV_FLAG(INIT_DONE);
}
/*
* Inform the Linux Networking stack about the hardwar capability of
* checksum offloading and other features. Each data_plane is
* responsible to maintain the feature set it supports
*/
dp_priv->data_plane_ops->set_features(dp_priv->dpc);
set_bit(__NSS_DP_UP, &dp_priv->flags);
#if defined(NSS_DP_PPE_SUPPORT)
if (dp_priv->data_plane_ops->vsi_assign(dp_priv->dpc, dp_priv->vsi)) {
netdev_dbg(netdev, "Data plane vsi assign failed\n");
return -EAGAIN;
}
#endif
if (dp_priv->data_plane_ops->mac_addr(dp_priv->dpc, netdev->dev_addr)) {
netdev_dbg(netdev, "Data plane set MAC address failed\n");
return -EAGAIN;
}
if (dp_priv->data_plane_ops->change_mtu(dp_priv->dpc, netdev->mtu)) {
netdev_dbg(netdev, "Data plane change mtu failed\n");
return -EAGAIN;
}
if (dp_priv->data_plane_ops->open(dp_priv->dpc, 0, 0, 0)) {
netdev_dbg(netdev, "Data plane open failed\n");
return -EAGAIN;
}
netif_start_queue(netdev);
if (!dp_priv->link_poll) {
/* Notify data plane link is up */
if (dp_priv->data_plane_ops->link_state(dp_priv->dpc, 1)) {
netdev_dbg(netdev, "Data plane set link failed\n");
return -EAGAIN;
}
dp_priv->link_state = __NSS_DP_LINK_UP;
netif_carrier_on(netdev);
} else {
dp_priv->link_state = __NSS_DP_LINK_DOWN;
phy_start(dp_priv->phydev);
phy_start_aneg(dp_priv->phydev);
}
return 0;
}
#ifdef CONFIG_RFS_ACCEL
/*
* nss_dp_rx_flow_steer()
* Steer the flow rule to NSS
*/
static int nss_dp_rx_flow_steer(struct net_device *netdev, const struct sk_buff *_skb,
uint16_t rxq, uint32_t flow)
{
struct nss_dp_dev *dp_priv;
struct netdev_rx_queue *rxqueue;
struct rps_sock_flow_table *sock_flow_table;
struct rps_dev_flow_table *flow_table;
struct rps_dev_flow *rxflow;
struct sk_buff *skb = (struct sk_buff *)_skb;
uint16_t index;
uint32_t hash;
uint32_t rfscpu;
uint32_t rxcpu;
if (!netdev)
return -EINVAL;
dp_priv = (struct nss_dp_dev *)netdev_priv(netdev);
if (!dp_priv)
return -EINVAL;
rxqueue = netdev->_rx;
if (skb_rx_queue_recorded(skb)) {
index = skb_get_rx_queue(skb);
rxqueue += index;
}
flow_table = rcu_dereference(rxqueue->rps_flow_table);
if (!flow_table) {
netdev_dbg(netdev, "RX queue RPS flow table not found\n");
return -EINVAL;
}
hash = skb_get_hash(skb);
rxflow = &flow_table->flows[hash & flow_table->mask];
rxcpu = (uint32_t)rxflow->cpu;
sock_flow_table = rcu_dereference(rps_sock_flow_table);
if (!sock_flow_table) {
netdev_dbg(netdev, "Global RPS flow table not found\n");
return -EINVAL;
}
rfscpu = sock_flow_table->ents[hash & sock_flow_table->mask];
rfscpu &= rps_cpu_mask;
if (rxcpu == rfscpu)
return 0;
/*
* check rx_flow_steer is defined in data plane ops
*/
if (!dp_priv->data_plane_ops->rx_flow_steer) {
netdev_dbg(netdev, "Data plane ops not defined for flow steer\n");
return -EINVAL;
}
/*
* Delete the old flow rule
*/
if (dp_priv->data_plane_ops->rx_flow_steer(dp_priv->dpc, skb, rxcpu, false)) {
netdev_dbg(netdev, "Data plane delete flow rule failed\n");
return -EAGAIN;
}
/*
* Add the new flow rule
*/
if (dp_priv->data_plane_ops->rx_flow_steer(dp_priv->dpc, skb, rfscpu, true)) {
netdev_dbg(netdev, "Data plane add flow rule failed\n");
return -EAGAIN;
}
return 0;
}
#endif
/*
* nss_dp_select_queue()
* Select tx queue
*/
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0))
static u16 nss_dp_select_queue(struct net_device *netdev, struct sk_buff *skb,
void *accel_priv, select_queue_fallback_t fallback)
#else
static u16 nss_dp_select_queue(struct net_device *netdev, struct sk_buff *skb,
struct net_device *sb_dev)
#endif
{
int cpu = get_cpu();
put_cpu();
/*
* The number of queue is matching the number of CPUs so get_cpu will
* always match a valid queue
*/
return cpu;
}
/*
* Netdevice operations
*/
static const struct net_device_ops nss_dp_netdev_ops = {
.ndo_open = nss_dp_open,
.ndo_stop = nss_dp_close,
.ndo_start_xmit = nss_dp_xmit,
.ndo_get_stats64 = nss_dp_get_stats64,
.ndo_set_mac_address = nss_dp_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = nss_dp_change_mtu,
.ndo_do_ioctl = nss_dp_do_ioctl,
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0))
.ndo_bridge_setlink = switchdev_port_bridge_setlink,
.ndo_bridge_getlink = switchdev_port_bridge_getlink,
.ndo_bridge_dellink = switchdev_port_bridge_dellink,
#endif
.ndo_select_queue = nss_dp_select_queue,
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = nss_dp_rx_flow_steer,
#endif
};
/*
* nss_dp_of_get_pdata()
*/
static int32_t nss_dp_of_get_pdata(struct device_node *np,
struct net_device *netdev,
struct gmac_hal_platform_data *hal_pdata)
{
uint8_t *maddr;
struct nss_dp_dev *dp_priv;
struct resource memres_devtree = {0};
dp_priv = netdev_priv(netdev);
if (of_property_read_u32(np, "qcom,id", &dp_priv->macid)) {
pr_err("%s: error reading id\n", np->name);
return -EFAULT;
}
if (dp_priv->macid > NSS_DP_HAL_MAX_PORTS || !dp_priv->macid) {
pr_err("%s: invalid macid %d\n", np->name, dp_priv->macid);
return -EFAULT;
}
if (of_property_read_u32(np, "qcom,mactype", &hal_pdata->mactype)) {
pr_err("%s: error reading mactype\n", np->name);
return -EFAULT;
}
if (of_address_to_resource(np, 0, &memres_devtree) != 0)
return -EFAULT;
netdev->base_addr = memres_devtree.start;
hal_pdata->reg_len = resource_size(&memres_devtree);
hal_pdata->netdev = netdev;
hal_pdata->macid = dp_priv->macid;
dp_priv->phy_mii_type = of_get_phy_mode(np);
dp_priv->link_poll = of_property_read_bool(np, "qcom,link-poll");
if (of_property_read_u32(np, "qcom,phy-mdio-addr",
&dp_priv->phy_mdio_addr) && dp_priv->link_poll) {
pr_err("%s: mdio addr required if link polling is enabled\n",
np->name);
return -EFAULT;
}
of_property_read_u32(np, "qcom,forced-speed", &dp_priv->forced_speed);
of_property_read_u32(np, "qcom,forced-duplex", &dp_priv->forced_duplex);
maddr = (uint8_t *)of_get_mac_address(np);
#if (LINUX_VERSION_CODE > KERNEL_VERSION(5, 4, 0))
if (IS_ERR((void *)maddr)) {
maddr = NULL;
}
#endif
if (maddr && is_valid_ether_addr(maddr)) {
ether_addr_copy(netdev->dev_addr, maddr);
} else {
random_ether_addr(netdev->dev_addr);
pr_info("GMAC%d(%px) Invalid MAC@ - using %pM\n", dp_priv->macid,
dp_priv, netdev->dev_addr);
}
return 0;
}
/*
* nss_dp_mdio_attach()
*/
static struct mii_bus *nss_dp_mdio_attach(struct platform_device *pdev)
{
struct device_node *mdio_node;
struct platform_device *mdio_plat;
struct ipq40xx_mdio_data *mdio_data;
/*
* Find mii_bus using "mdio-bus" handle.
*/
mdio_node = of_parse_phandle(pdev->dev.of_node, "mdio-bus", 0);
if (mdio_node) {
return of_mdio_find_bus(mdio_node);
}
mdio_node = of_find_compatible_node(NULL, NULL, "qcom,ipq40xx-mdio");
if (!mdio_node) {
dev_err(&pdev->dev, "cannot find mdio node by phandle\n");
return NULL;
}
mdio_plat = of_find_device_by_node(mdio_node);
if (!mdio_plat) {
dev_err(&pdev->dev, "cannot find platform device from mdio node\n");
of_node_put(mdio_node);
return NULL;
}
mdio_data = dev_get_drvdata(&mdio_plat->dev);
if (!mdio_data) {
dev_err(&pdev->dev, "cannot get mii bus reference from device data\n");
of_node_put(mdio_node);
return NULL;
}
return mdio_data->mii_bus;
}
#ifdef CONFIG_NET_SWITCHDEV
/*
* nss_dp_is_phy_dev()
* Check if it is dp device
*/
bool nss_dp_is_phy_dev(struct net_device *dev)
{
return (dev->netdev_ops == &nss_dp_netdev_ops);
}
#endif
/*
* nss_dp_adjust_link()
*/
void nss_dp_adjust_link(struct net_device *netdev)
{
struct nss_dp_dev *dp_priv = netdev_priv(netdev);
int current_state = dp_priv->link_state;
if (!test_bit(__NSS_DP_UP, &dp_priv->flags))
return;
if (dp_priv->phydev->link && (current_state == __NSS_DP_LINK_UP))
return;
if (!dp_priv->phydev->link && (current_state == __NSS_DP_LINK_DOWN))
return;
if (current_state == __NSS_DP_LINK_DOWN) {
netdev_info(netdev, "PHY Link up speed: %d\n",
dp_priv->phydev->speed);
if (dp_priv->data_plane_ops->link_state(dp_priv->dpc, 1)) {
netdev_dbg(netdev, "Data plane set link up failed\n");
return;
}
dp_priv->link_state = __NSS_DP_LINK_UP;
netif_carrier_on(netdev);
} else {
netdev_info(netdev, "PHY Link is down\n");
if (dp_priv->data_plane_ops->link_state(dp_priv->dpc, 0)) {
netdev_dbg(netdev, "Data plane set link down failed\n");
return;
}
dp_priv->link_state = __NSS_DP_LINK_DOWN;
netif_carrier_off(netdev);
}
}
/*
* nss_dp_probe()
*/
static int32_t nss_dp_probe(struct platform_device *pdev)
{
struct net_device *netdev;
struct nss_dp_dev *dp_priv;
struct device_node *np = pdev->dev.of_node;
struct gmac_hal_platform_data gmac_hal_pdata;
int32_t ret = 0;
uint8_t phy_id[MII_BUS_ID_SIZE + 3];
#if defined(NSS_DP_PPE_SUPPORT)
uint32_t vsi_id;
fal_port_t port_id;
#endif
/* TODO: See if we need to do some SoC level common init */
netdev = alloc_etherdev_mqs(sizeof(struct nss_dp_dev),
NSS_DP_NETDEV_TX_QUEUE_NUM, NSS_DP_NETDEV_RX_QUEUE_NUM);
if (!netdev) {
pr_info("alloc_etherdev() failed\n");
return -ENOMEM;
}
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
/* max_mtu is set to 1500 in ether_setup() */
netdev->max_mtu = ETH_MAX_MTU;
#endif
dp_priv = netdev_priv(netdev);
memset((void *)dp_priv, 0, sizeof(struct nss_dp_dev));
dp_priv->pdev = pdev;
dp_priv->netdev = netdev;
netdev->watchdog_timeo = 5 * HZ;
netdev->netdev_ops = &nss_dp_netdev_ops;
nss_dp_set_ethtool_ops(netdev);
#ifdef CONFIG_NET_SWITCHDEV
nss_dp_switchdev_setup(netdev);
#endif
ret = nss_dp_of_get_pdata(np, netdev, &gmac_hal_pdata);
if (ret != 0) {
goto fail;
}
/* Use data plane ops as per the configured SoC */
dp_priv->data_plane_ops = nss_dp_hal_get_data_plane_ops();
if (!dp_priv->data_plane_ops) {
netdev_dbg(netdev, "Dataplane ops not found.\n");
goto fail;
}
dp_priv->dpc = &dp_global_data_plane_ctx[dp_priv->macid-1];
dp_priv->dpc->dev = netdev;
dp_priv->ctx = &dp_global_ctx;
/* TODO:locks init */
/*
* HAL's init function will return the pointer to the HAL context
* (private to hal), which dp will store in its data structures.
* The subsequent hal_ops calls expect the DP to pass the HAL
* context pointer as an argument
*/
dp_priv->gmac_hal_ops = nss_dp_hal_get_gmac_ops(gmac_hal_pdata.mactype);
if (!dp_priv->gmac_hal_ops) {
netdev_dbg(netdev, "Unsupported Mac type: %d\n", gmac_hal_pdata.mactype);
goto fail;
}
dp_priv->gmac_hal_ctx = dp_priv->gmac_hal_ops->init(&gmac_hal_pdata);
if (!(dp_priv->gmac_hal_ctx)) {
netdev_dbg(netdev, "gmac hal init failed\n");
goto fail;
}
if (dp_priv->link_poll) {
dp_priv->miibus = nss_dp_mdio_attach(pdev);
if (!dp_priv->miibus) {
netdev_dbg(netdev, "failed to find miibus\n");
goto fail;
}
snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
dp_priv->miibus->id, dp_priv->phy_mdio_addr);
SET_NETDEV_DEV(netdev, &pdev->dev);
dp_priv->phydev = phy_connect(netdev, phy_id,
&nss_dp_adjust_link,
dp_priv->phy_mii_type);
if (IS_ERR(dp_priv->phydev)) {
netdev_dbg(netdev, "failed to connect to phy device\n");
goto fail;
}
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0))
dp_priv->phydev->advertising |=
(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
dp_priv->phydev->supported |=
(SUPPORTED_Pause | SUPPORTED_Asym_Pause);
#else
linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, dp_priv->phydev->advertising);
linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, dp_priv->phydev->advertising);
linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, dp_priv->phydev->supported);
linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, dp_priv->phydev->supported);
#endif
}
#if defined(NSS_DP_PPE_SUPPORT)
/* Get port's default VSI */
port_id = dp_priv->macid;
if (ppe_port_vsi_get(0, port_id, &vsi_id)) {
netdev_dbg(netdev, "failed to get port's default VSI\n");
goto fail;
}
dp_priv->vsi = vsi_id;
#endif
/* TODO: Features: CSUM, tx/rx offload... configure */
/* Register the network interface */
ret = register_netdev(netdev);
if (ret) {
netdev_dbg(netdev, "Error registering netdevice %s\n",
netdev->name);
dp_priv->gmac_hal_ops->exit(dp_priv->gmac_hal_ctx);
goto fail;
}
dp_global_ctx.nss_dp[dp_priv->macid - 1] = dp_priv;
dp_global_ctx.slowproto_acl_bm = 0;
netdev_dbg(netdev, "Init NSS DP GMAC%d (base = 0x%lx)\n", dp_priv->macid, netdev->base_addr);
return 0;
fail:
free_netdev(netdev);
return -EFAULT;
}
/*
* nss_dp_remove()
*/
static int nss_dp_remove(struct platform_device *pdev)
{
uint32_t i;
struct nss_dp_dev *dp_priv;
struct nss_gmac_hal_ops *hal_ops;
for (i = 0; i < NSS_DP_HAL_MAX_PORTS; i++) {
dp_priv = dp_global_ctx.nss_dp[i];
if (!dp_priv)
continue;
hal_ops = dp_priv->gmac_hal_ops;
if (dp_priv->phydev)
phy_disconnect(dp_priv->phydev);
unregister_netdev(dp_priv->netdev);
hal_ops->exit(dp_priv->gmac_hal_ctx);
free_netdev(dp_priv->netdev);
dp_global_ctx.nss_dp[i] = NULL;
}
return 0;
}
static struct of_device_id nss_dp_dt_ids[] = {
{ .compatible = "qcom,nss-dp" },
{},
};
MODULE_DEVICE_TABLE(of, nss_dp_dt_ids);
static struct platform_driver nss_dp_drv = {
.probe = nss_dp_probe,
.remove = nss_dp_remove,
.driver = {
.name = "nss-dp",
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(nss_dp_dt_ids),
},
};
/*
* nss_dp_init()
*/
int __init nss_dp_init(void)
{
int ret;
/*
* Bail out on not supported platform
* TODO: Handle this properly with SoC ops
*/
if (!of_machine_is_compatible("qcom,ipq807x") &&
!of_machine_is_compatible("qcom,ipq8074") &&
!of_machine_is_compatible("qcom,ipq6018") &&
!of_machine_is_compatible("qcom,ipq5018"))
return 0;
/*
* TODO Move this to soc_ops
*/
dp_global_ctx.common_init_done = false;
if (!nss_dp_hal_init()) {
pr_err("DP hal init failed.\n");
return -EFAULT;
}
ret = platform_driver_register(&nss_dp_drv);
if (ret)
pr_info("NSS DP platform drv register failed\n");
dp_global_ctx.common_init_done = true;
pr_info("**********************************************************\n");
pr_info("* NSS Data Plane driver\n");
pr_info("**********************************************************\n");
return ret;
}
/*
* nss_dp_exit()
*/
void __exit nss_dp_exit(void)
{
/*
* TODO Move this to soc_ops
*/
if (dp_global_ctx.common_init_done) {
nss_dp_hal_cleanup();
dp_global_ctx.common_init_done = false;
}
platform_driver_unregister(&nss_dp_drv);
}
module_init(nss_dp_init);
module_exit(nss_dp_exit);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("NSS Data Plane Network Driver");

View File

@@ -1,367 +0,0 @@
/*
**************************************************************************
* Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
* OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
**************************************************************************
*/
#include <linux/version.h>
#include <net/switchdev.h>
#include <linux/if_bridge.h>
#include <net/switchdev.h>
#include "nss_dp_dev.h"
#include "fal/fal_stp.h"
#include "fal/fal_ctrlpkt.h"
#define NSS_DP_SWITCH_ID 0
#define NSS_DP_SW_ETHTYPE_PID 0 /* PPE ethtype profile ID for slow protocols */
#define ETH_P_NONE 0
/*
* nss_dp_set_slow_proto_filter()
* Enable/Disable filter to allow Ethernet slow-protocol
*/
static void nss_dp_set_slow_proto_filter(struct nss_dp_dev *dp_priv, bool filter_enable)
{
sw_error_t ret = 0;
fal_ctrlpkt_profile_t profile;
fal_ctrlpkt_action_t action;
memset(&profile, 0, sizeof(profile));
/*
* Action is redirect cpu
*/
action.action = FAL_MAC_RDT_TO_CPU;
action.sg_bypass = A_FALSE;
/*
* Bypass stp
*/
action.in_stp_bypass = A_TRUE;
action.in_vlan_fltr_bypass = A_FALSE;
action.l2_filter_bypass = A_FALSE;
profile.action = action;
profile.ethtype_profile_bitmap = 0x1;
/*
* Set port map
*/
profile.port_map = (1 << dp_priv->macid);
if (filter_enable) {
ret = fal_mgmtctrl_ctrlpkt_profile_add(NSS_DP_SWITCH_ID, &profile);
if (ret != SW_OK) {
netdev_dbg(dp_priv->netdev, "failed to add profile for port_map: 0x%x, ret: %d\n", profile.port_map, ret);
return;
}
/*
* Enable filter to allow ethernet slow-protocol,
* if this is the first port being disabled by STP
*/
if (!dp_priv->ctx->slowproto_acl_bm) {
ret = fal_mgmtctrl_ethtype_profile_set(NSS_DP_SWITCH_ID, NSS_DP_SW_ETHTYPE_PID, ETH_P_SLOW);
if (ret != SW_OK) {
netdev_dbg(dp_priv->netdev, "failed to set ethertype profile: 0x%x, ret: %d\n", ETH_P_SLOW, ret);
ret = fal_mgmtctrl_ctrlpkt_profile_del(NSS_DP_SWITCH_ID, &profile);
if (ret != SW_OK) {
netdev_dbg(dp_priv->netdev, "failed to delete profile for port_map: 0x%x, ret: %d\n", profile.port_map, ret);
}
return;
}
}
/*
* Add port to port bitmap
*/
dp_priv->ctx->slowproto_acl_bm = dp_priv->ctx->slowproto_acl_bm | (1 << dp_priv->macid);
} else {
ret = fal_mgmtctrl_ctrlpkt_profile_del(NSS_DP_SWITCH_ID, &profile);
if (ret != SW_OK) {
netdev_dbg(dp_priv->netdev, "failed to delete profile for port_map: 0x%x, ret: %d\n", profile.port_map, ret);
return;
}
/*
* Delete port from port bitmap
*/
dp_priv->ctx->slowproto_acl_bm = dp_priv->ctx->slowproto_acl_bm & (~(1 << dp_priv->macid));
/*
* If all ports are in STP-enabled state, then we do not need
* the filter to allow ethernet slow protocol packets
*/
if (!dp_priv->ctx->slowproto_acl_bm) {
ret = fal_mgmtctrl_ethtype_profile_set(NSS_DP_SWITCH_ID, NSS_DP_SW_ETHTYPE_PID, ETH_P_NONE);
if (ret != SW_OK) {
netdev_dbg(dp_priv->netdev, "failed to reset ethertype profile: 0x%x ret: %d\n", ETH_P_NONE, ret);
}
}
}
}
/*
* nss_dp_stp_state_set()
* Set bridge port STP state to the port of NSS data plane.
*/
static int nss_dp_stp_state_set(struct nss_dp_dev *dp_priv, u8 state)
{
sw_error_t err;
fal_stp_state_t stp_state;
switch (state) {
case BR_STATE_DISABLED:
stp_state = FAL_STP_DISABLED;
/*
* Dynamic bond interfaces which are bridge slaves need to receive
* ethernet slow protocol packets for LACP protocol even in STP
* disabled state
*/
nss_dp_set_slow_proto_filter(dp_priv, true);
break;
case BR_STATE_LISTENING:
stp_state = FAL_STP_LISTENING;
break;
case BR_STATE_BLOCKING:
stp_state = FAL_STP_BLOCKING;
break;
case BR_STATE_LEARNING:
stp_state = FAL_STP_LEARNING;
break;
case BR_STATE_FORWARDING:
stp_state = FAL_STP_FORWARDING;
/*
* Remove the filter for allowing ethernet slow protocol packets
* for bond interfaces
*/
nss_dp_set_slow_proto_filter(dp_priv, false);
break;
default:
return -EOPNOTSUPP;
}
err = fal_stp_port_state_set(NSS_DP_SWITCH_ID, 0, dp_priv->macid,
stp_state);
if (err) {
netdev_dbg(dp_priv->netdev, "failed to set ftp state\n");
/*
* Restore the slow proto filters
*/
if (state == BR_STATE_DISABLED)
nss_dp_set_slow_proto_filter(dp_priv, false);
else if (state == BR_STATE_FORWARDING)
nss_dp_set_slow_proto_filter(dp_priv, true);
return -EINVAL;
}
return 0;
}
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0))
/*
* nss_dp_attr_get()
* Get port information to update switchdev attribute for NSS data plane.
*/
static int nss_dp_attr_get(struct net_device *dev, struct switchdev_attr *attr)
{
struct nss_dp_dev *dp_priv = (struct nss_dp_dev *)netdev_priv(dev);
switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
attr->u.ppid.id_len = 1;
attr->u.ppid.id[0] = NSS_DP_SWITCH_ID;
break;
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
attr->u.brport_flags = dp_priv->brport_flags;
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
/*
* nss_dp_attr_set()
* Get switchdev attribute and set to the device of NSS data plane.
*/
static int nss_dp_attr_set(struct net_device *dev,
const struct switchdev_attr *attr,
struct switchdev_trans *trans)
{
struct nss_dp_dev *dp_priv = (struct nss_dp_dev *)netdev_priv(dev);
struct net_device *upper_dev;
struct vlan_dev_priv *vlan;
struct list_head *iter;
uint32_t stp_state = attr->u.stp_state;
if (switchdev_trans_ph_prepare(trans))
return 0;
switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
dp_priv->brport_flags = attr->u.brport_flags;
netdev_dbg(dev, "set brport_flags %lu\n", attr->u.brport_flags);
return 0;
case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
/*
* The stp state is not changed to FAL_STP_DISABLED if
* the net_device (dev) has any vlan configured. Otherwise
* traffic on other vlan(s) will not work.
*
* Note: STP for VLANs is not supported by PPE.
*/
if ((stp_state == BR_STATE_DISABLED) ||
(stp_state == BR_STATE_BLOCKING)) {
rcu_read_lock();
netdev_for_each_upper_dev_rcu(dev, upper_dev, iter) {
if (!is_vlan_dev(upper_dev))
continue;
vlan = vlan_dev_priv(upper_dev);
if (vlan->real_dev == dev) {
rcu_read_unlock();
netdev_dbg(dev, "Do not update stp state to: %u since vlan id: %d is configured on netdevice: %s\n",
stp_state, vlan->vlan_id, vlan->real_dev->name);
return 0;
}
}
rcu_read_unlock();
}
return nss_dp_stp_state_set(dp_priv, stp_state);
default:
return -EOPNOTSUPP;
}
}
/*
* nss_dp_switchdev_ops
* Switchdev operations of NSS data plane.
*/
static const struct switchdev_ops nss_dp_switchdev_ops = {
.switchdev_port_attr_get = nss_dp_attr_get,
.switchdev_port_attr_set = nss_dp_attr_set,
};
/*
* nss_dp_switchdev_setup()
* Set up NSS data plane switchdev operations.
*/
void nss_dp_switchdev_setup(struct net_device *dev)
{
dev->switchdev_ops = &nss_dp_switchdev_ops;
switchdev_port_fwd_mark_set(dev, NULL, false);
}
#else
/*
* nss_dp_port_attr_set()
* Sets attributes
*/
static int nss_dp_port_attr_set(struct net_device *dev,
const struct switchdev_attr *attr,
struct switchdev_trans *trans)
{
struct nss_dp_dev *dp_priv = (struct nss_dp_dev *)netdev_priv(dev);
if (switchdev_trans_ph_prepare(trans))
return 0;
switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
dp_priv->brport_flags = attr->u.brport_flags;
netdev_dbg(dev, "set brport_flags %lu\n", attr->u.brport_flags);
return 0;
case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
return nss_dp_stp_state_set(dp_priv, attr->u.stp_state);
default:
return -EOPNOTSUPP;
}
}
/*
* nss_dp_switchdev_port_attr_set_event()
* Attribute set event
*/
static int nss_dp_switchdev_port_attr_set_event(struct net_device *netdev,
struct switchdev_notifier_port_attr_info *port_attr_info)
{
int err;
err = nss_dp_port_attr_set(netdev, port_attr_info->attr,
port_attr_info->trans);
port_attr_info->handled = true;
return notifier_from_errno(err);
}
/*
* nss_dp_switchdev_event()
* Switch dev event on netdevice
*/
static int nss_dp_switchdev_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
/*
* Handle switchdev event only for physical devices
*/
if (!nss_dp_is_phy_dev(dev)) {
return NOTIFY_DONE;
}
if (event == SWITCHDEV_PORT_ATTR_SET)
nss_dp_switchdev_port_attr_set_event(dev, ptr);
return NOTIFY_DONE;
}
static struct notifier_block nss_dp_switchdev_notifier = {
.notifier_call = nss_dp_switchdev_event,
};
static bool switch_init_done;
/*
* nss_dp_switchdev_setup()
* Setup switch dev
*/
void nss_dp_switchdev_setup(struct net_device *dev)
{
int err;
if (switch_init_done) {
return;
}
err = register_switchdev_blocking_notifier(&nss_dp_switchdev_notifier);
if (err) {
netdev_dbg(dev, "%px:Failed to register switchdev notifier\n", dev);
}
switch_init_done = true;
}
#endif