Revert "mediatek-sdk: update SDK to mp2.3 final"

This reverts commit 5f1d562f61.

Signed-off-by: John Crispin <john@phrozen.org>
This commit is contained in:
John Crispin
2024-12-19 06:42:41 +01:00
parent ee80309fc3
commit 3d43f4fc95
14 changed files with 684 additions and 1088 deletions

View File

@@ -1,56 +0,0 @@
From de4d3e25a555dedd70793d0362b1e501ed1a77f1 Mon Sep 17 00:00:00 2001
From: Benjamin Lin <benjamin-jw.lin@mediatek.com>
Date: Tue, 30 Apr 2024 10:28:29 +0800
Subject: [PATCH] mac80211: mtk: fix inconsistent QoS mapping between AP and
AP_VLAN VIFs
Fix inconsistent QoS mapping between AP and AP_VLAN IFs.
Specifically, when WDS AP IF is connected by a WDS STA, the QoS map of the AP_VLAN VIF is NULL.
So the QoS types of packets to the WDS STA will be determined using the default mapping rule.
However, SoftMAC driver uses the QoS map of the AP VIF, which may already be set.
Therefore, it is possible that the QoS mappings of SW and HW are inconsistent.
Thus, sync QoS map of AP VIF to that of AP_VLAN VIF.
Signed-off-by: Benjamin Lin <benjamin-jw.lin@mediatek.com>
---
net/mac80211/iface.c | 23 ++++++++++++++++++++++-
1 file changed, 22 insertions(+), 1 deletion(-)
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index ef32d53..138ad79 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -297,8 +297,29 @@ static int ieee80211_check_concurrent_iface(struct ieee80211_sub_if_data *sdata,
* can only add VLANs to enabled APs
*/
if (iftype == NL80211_IFTYPE_AP_VLAN &&
- nsdata->vif.type == NL80211_IFTYPE_AP)
+ nsdata->vif.type == NL80211_IFTYPE_AP) {
+ struct mac80211_qos_map *old_qos_map, *new_qos_map = NULL;
+
sdata->bss = &nsdata->u.ap;
+
+ rcu_read_lock();
+ old_qos_map = rcu_dereference(nsdata->qos_map);
+ if (old_qos_map) {
+ new_qos_map = kzalloc(sizeof(*new_qos_map), GFP_KERNEL);
+ if (!new_qos_map) {
+ rcu_read_unlock();
+ return -ENOMEM;
+ }
+ memcpy(&new_qos_map->qos_map, &old_qos_map->qos_map,
+ sizeof(new_qos_map->qos_map));
+ }
+ rcu_read_unlock();
+
+ old_qos_map = sdata_dereference(sdata->qos_map, sdata);
+ rcu_assign_pointer(sdata->qos_map, new_qos_map);
+ if (old_qos_map)
+ kfree_rcu(old_qos_map, rcu_head);
+ }
}
}
--
2.18.0

View File

@@ -456,190 +456,6 @@ static ssize_t pppq_toggle_write(struct file *file, const char __user *ptr,
return len;
}
int pse_info_usage(struct seq_file *m, void *private)
{
pr_info("====================Advanced Settings====================\n");
pr_info("Usage: echo [port] [option] > /sys/kernel/debug/mtketh/pse_info\n");
pr_info(" 0~15 0~2 Show PSE info\n");
pr_info(" 3 Show PSE legacy info\n");
return 0;
}
static int pse_info_open(struct inode *inode, struct file *file)
{
return single_open(file, pse_info_usage, inode->i_private);
}
void pse_lgc_info_get_v2(struct mtk_eth *eth, int port, u32 dbg1, u32 dbg2)
{
struct mtk_pse_fs_lgc_info_v2 *info;
u64 val;
info = (struct mtk_pse_fs_lgc_info_v2 *)&val;
val = mtk_r32(eth, dbg1);
val |= ((u64)mtk_r32(eth, dbg2) << 32);
pr_info("PSE port%d legacy FS info (v2)\n", port);
pr_info("=========================================\n");
pr_info("sport=%d, fport=%d, dr_idx=%d, ppe_crsn=%d\n",
info->sport, info->fport, info->dr_idx, info->ppe_crsn);
pr_info("l2_len=%d, l3_len=%d, l3_pidx=%d\n",
info->l2_len, info->l3_len, info->l3_pidx);
pr_info("is_l4f=%d, is_l4vld=%d, is_tack=%d\n",
info->is_l4f, info->is_l4vld, info->is_tack);
pr_info("is_ip4f=%d, is_ip4=%d, is_ip6=%d\n",
info->is_ip4f, info->is_ip4, info->is_ip6);
pr_info("=========================================\n");
}
void pse_lgc_info_get_v3(struct mtk_eth *eth, int port, u32 dbg1, u32 dbg2)
{
struct mtk_pse_fs_lgc_info_v3 *info;
u64 val;
info = (struct mtk_pse_fs_lgc_info_v3 *)&val;
val = mtk_r32(eth, dbg1);
val |= ((u64)mtk_r32(eth, dbg2) << 32);
pr_info("PSE port%d legacy FS info (v3)\n", port);
pr_info("=========================================\n");
pr_info("sport=%d, fport=%d, ppe_crsn=%d\n",
info->sport, info->fport, info->ppe_crsn);
pr_info("l2_len=%d, l3_len=%d, l3_pidx=%d\n",
info->l2_len, info->l3_len, info->l3_pidx);
pr_info("is_l4f=%d, is_l4vld=%d, is_tack=%d\n",
info->is_l4f, info->is_l4vld, info->is_tack);
pr_info("is_ip4f=%d, is_ip4=%d, is_ip6=%d\n",
info->is_ip4f, info->is_ip4, info->is_ip6);
pr_info("is_snap=%d, vofst=%d, pl_end=%d\n",
info->is_snap, info->vofst, info->pl_end);
pr_info("is_err_pkt=%d, err_pkt_action=%d\n",
info->is_err_pkt, info->err_pkt_action);
pr_info("=========================================\n");
}
void pse_info_get_gdm(struct mtk_eth *eth, int port, int index, int options)
{
u32 dbg1[2] = {MTK_FE_GDM1_DBG1, MTK_FE_GDM2_DBG1};
u32 dbg2[2] = {MTK_FE_GDM1_DBG2, MTK_FE_GDM2_DBG2};
if (index < 0 || index > 1)
return;
if (options == 3)
pse_lgc_info_get_v2(eth, port, dbg1[index], dbg2[index]);
else
pr_info("Not supported\n");
}
void pse_info_get_cdm(struct mtk_eth *eth, int port, int index, int options)
{
u32 dbg1[7] = {MTK_FE_CDM1_DBG1, MTK_FE_CDM2_DBG1, MTK_FE_CDM3_DBG1,
MTK_FE_CDM4_DBG1, MTK_FE_CDM5_DBG1, MTK_FE_CDM6_DBG1,
MTK_FE_CDM7_DBG1};
u32 dbg2[7] = {MTK_FE_CDM1_DBG2, MTK_FE_CDM2_DBG2, MTK_FE_CDM3_DBG2,
MTK_FE_CDM4_DBG2, MTK_FE_CDM5_DBG2, MTK_FE_CDM6_DBG2,
MTK_FE_CDM7_DBG2};
if (index < 0 || index > 6)
return;
if (options == 3) {
#if defined(CONFIG_MEDIATEK_NETSYS_V3)
if (index > 1 && index < 7)
return pse_lgc_info_get_v3(eth, port, dbg1[index], dbg2[index]);
#endif
pse_lgc_info_get_v2(eth, port, dbg1[index], dbg2[index]);
} else
pr_info("Not supported\n");
}
int pse_info_get(struct mtk_eth *eth, int port, int options)
{
switch (port) {
case PSE_GDM1_PORT:
pse_info_get_gdm(eth, port, 0, options);
break;
case PSE_GDM2_PORT:
pse_info_get_gdm(eth, port, 1, options);
break;
case PSE_ADMA_PORT:
pse_info_get_cdm(eth, port, 0, options);
break;
case PSE_QDMA_TX_PORT:
case PSE_QDMA_RX_PORT:
pse_info_get_cdm(eth, port, 1, options);
break;
case PSE_WDMA0_PORT:
pse_info_get_cdm(eth, port, 2, options);
break;
case PSE_WDMA1_PORT:
pse_info_get_cdm(eth, port, 3, options);
break;
case PSE_WDMA2_PORT:
pse_info_get_cdm(eth, port, 4, options);
break;
case PSE_TDMA_PORT:
pse_info_get_cdm(eth, port, 5, options);
break;
case PSE_EIP197_PORT:
pse_info_get_cdm(eth, port, 6, options);
break;
break;
default:
pr_info("Not supported\n");
break;
}
return 0;
}
static ssize_t pse_info_write(struct file *file, const char __user *buffer,
size_t count, loff_t *off)
{
struct seq_file *m = file->private_data;
struct mtk_eth *eth = m->private;
long arg0 = 0, arg1 = 0;
char buf[32];
char *p_buf;
char *p_token = NULL;
char *p_delimiter = " \t";
u32 len = count;
int ret;
if (len >= sizeof(buf)) {
pr_info("input handling fail!\n");
return -1;
}
if (copy_from_user(buf, buffer, len))
return -EFAULT;
buf[len] = '\0';
p_buf = buf;
p_token = strsep(&p_buf, p_delimiter);
if (!p_token)
arg0 = 0;
else
ret = kstrtol(p_token, 10, &arg0);
if (arg0 >= 0 && arg0 <= 15) {
p_token = strsep(&p_buf, p_delimiter);
if (!p_token)
arg1 = 0;
else
ret = kstrtol(p_token, 10, &arg1);
}
pse_info_get(eth, arg0, arg1);
return len;
}
static const struct file_operations fops_reg_w = {
.owner = THIS_MODULE,
.open = simple_open,
@@ -663,15 +479,6 @@ static const struct file_operations fops_pppq_toggle = {
.release = single_release,
};
static const struct file_operations fops_pse_info = {
.owner = THIS_MODULE,
.open = pse_info_open,
.read = seq_read,
.llseek = seq_lseek,
.write = pse_info_write,
.release = single_release,
};
static const struct file_operations fops_mt7530sw_reg_w = {
.owner = THIS_MODULE,
.open = simple_open,
@@ -694,8 +501,6 @@ int mtketh_debugfs_init(struct mtk_eth *eth)
ret = -ENOMEM;
}
debugfs_create_file("pse_info", 0444,
eth_debug.root, eth, &fops_pse_info);
debugfs_create_file("pppq_toggle", 0444,
eth_debug.root, eth, &fops_pppq_toggle);
debugfs_create_file("phy_regs", S_IRUGO,

View File

@@ -30,24 +30,6 @@
#define MTK_FE_GDM1_FSM 0x228
#define MTK_FE_GDM2_FSM 0x22C
#define MTK_FE_GDM3_FSM 0x23C
#define MTK_FE_CDM1_DBG1 0x200
#define MTK_FE_CDM1_DBG2 0x204
#define MTK_FE_CDM2_DBG1 0x208
#define MTK_FE_CDM2_DBG2 0x20C
#define MTK_FE_CDM3_DBG1 0x230
#define MTK_FE_CDM3_DBG2 0x234
#define MTK_FE_CDM4_DBG1 0x290
#define MTK_FE_CDM4_DBG2 0x294
#define MTK_FE_CDM5_DBG1 0x310
#define MTK_FE_CDM5_DBG2 0x314
#define MTK_FE_CDM6_DBG1 0x320
#define MTK_FE_CDM6_DBG2 0x324
#define MTK_FE_CDM7_DBG1 0x330
#define MTK_FE_CDM7_DBG2 0x334
#define MTK_FE_GDM1_DBG1 0x210
#define MTK_FE_GDM1_DBG2 0x214
#define MTK_FE_GDM2_DBG1 0x218
#define MTK_FE_GDM2_DBG2 0x21C
#define MTK_FE_PSE_FREE 0x240
#define MTK_FE_DROP_FQ 0x244
#define MTK_FE_DROP_FC 0x248
@@ -193,46 +175,6 @@
mtk_w32(eth, reg_val, MTK_LRO_CTRL_DW2_CFG(x)); \
}
struct mtk_pse_fs_lgc_info_v2 {
u32 rev3 : 14;
u32 ppe_crsn: 5;
u32 sport : 4;
u32 is_l4f: 1;
u32 is_l4vld: 1;
u32 is_tack : 1;
u32 is_ip4f : 1;
u32 is_ip4 : 1;
u32 is_ip6 : 1;
u32 dr_idx : 2;
u32 rev2 : 4;
u32 l3_pidx : 2;
u32 rev : 2;
u32 fport : 4;
u32 l2_len : 7;
u32 l3_len : 14;
} __packed;
struct mtk_pse_fs_lgc_info_v3 {
u32 is_snap : 1;
u32 vofst : 3;
u32 l3_pidx : 2;
u32 pse_sport : 4;
u32 fport : 4;
u32 ppe_crsn: 5;
u32 sport : 4;
u32 is_l4f: 1;
u32 is_l4vld: 1;
u32 is_tack : 1;
u32 is_ip4f : 1;
u32 is_ip4 : 1;
u32 is_ip6 : 1;
u32 is_err_pkt : 1;
u32 err_pkt_action : 2;
u32 pl_end : 11;
u32 l2_len : 7;
u32 l3_len : 14;
} __packed;
struct mtk_lro_alt_v1_info0 {
u32 dtp : 16;
u32 stp : 16;

View File

@@ -1158,9 +1158,10 @@ static void mtk_mac_link_up(struct phylink_config *config, unsigned int mode,
}
/* Configure duplex */
mcr |= MAC_MCR_FORCE_DPX;
if (duplex == DUPLEX_HALF &&
interface == PHY_INTERFACE_MODE_GMII)
if (duplex == DUPLEX_FULL ||
interface == PHY_INTERFACE_MODE_SGMII)
mcr |= MAC_MCR_FORCE_DPX;
else if (interface == PHY_INTERFACE_MODE_GMII)
mcr |= MAC_MCR_PRMBL_LMT_EN;
/* Configure pause modes -
@@ -3867,9 +3868,17 @@ static int mtk_start_dma(struct mtk_eth *eth)
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
val = mtk_r32(eth, reg_map->qdma.glo_cfg);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
val &= ~(MTK_RESV_BUF_MASK | MTK_DMA_SIZE_MASK);
mtk_w32(eth,
val | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
MTK_DMA_SIZE_16DWORDS | MTK_TX_WB_DDONE |
MTK_NDP_CO_PRO | MTK_MUTLI_CNT |
MTK_RESV_BUF | MTK_WCOMP_EN |
MTK_DMAD_WR_WDONE | MTK_CHK_DDONE_EN |
MTK_RX_2B_OFFSET, reg_map->qdma.glo_cfg);
} else if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
val &= ~MTK_RESV_BUF_MASK;
mtk_w32(eth,
val | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
MTK_DMA_SIZE_32DWORDS | MTK_TX_WB_DDONE |
@@ -4409,7 +4418,7 @@ static int mtk_hw_init(struct mtk_eth *eth, u32 type)
/* GDM and CDM Threshold */
mtk_w32(eth, 0x00000004, MTK_CDM2_THRES);
mtk_w32(eth, 0x08000707, MTK_CDMW0_THRES);
mtk_w32(eth, 0x00000707, MTK_CDMW0_THRES);
mtk_w32(eth, 0x00000077, MTK_CDMW1_THRES);
/* Disable GDM1 RX CRC stripping */

View File

@@ -119,7 +119,7 @@ void cr_set_field(void __iomem *reg, u32 field, u32 val)
}
/*boundary entry can't be used to accelerate data flow*/
void exclude_boundary_entry(struct foe_entry *foe_table_cpu)
static void exclude_boundary_entry(struct foe_entry *foe_table_cpu)
{
int entry_base = 0;
int bad_entry, i, j;

View File

@@ -145,7 +145,6 @@
#define HASH_MODE (0x3 << 14) /* RW */
#define SCAN_MODE (0x3 << 16) /* RW */
#define XMODE (0x3 << 18) /* RW */
#define HASH_DBG (0x3 << 21) /* RW */
#define TICK_SEL (0x1 << 24) /* RW */
#define DSCP_TRFC_ECN_EN (0x1 << 25) /* RW */
@@ -1147,7 +1146,6 @@ enum FoeIpAct {
#define NR_WDMA1_PORT 9
#define NR_WDMA2_PORT 13
#define NR_GMAC3_PORT 15
#define NR_QDMA_TPORT 1
#define LAN_DEV_NAME hnat_priv->lan
#define LAN2_DEV_NAME hnat_priv->lan2
#define IS_WAN(dev) \
@@ -1312,7 +1310,6 @@ int hnat_enable_hook(void);
int hnat_disable_hook(void);
void hnat_cache_ebl(int enable);
void hnat_qos_shaper_ebl(u32 id, u32 enable);
void exclude_boundary_entry(struct foe_entry *foe_table_cpu);
void set_gmac_ppe_fwd(int gmac_no, int enable);
int entry_detail(u32 ppe_id, int index);
int entry_delete_by_mac(u8 *mac);

View File

@@ -755,7 +755,6 @@ int cr_set_usage(int level)
pr_info(" 5 0~255 Set TCP keep alive interval\n");
pr_info(" 6 0~255 Set UDP keep alive interval\n");
pr_info(" 7 0~1 Set hnat counter update to nf_conntrack\n");
pr_info(" 8 0~6 Set PPE hash debug mode\n");
return 0;
}
@@ -867,56 +866,6 @@ int set_nf_update_toggle(int toggle)
return 0;
}
int set_hash_dbg_mode(int dbg_mode)
{
static const char * const hash_dbg_mode[] = {
"Normal", "Source port[15:0]",
"IPv4 source IP[15:0]", "IPv6 source IP[15:0]", "Destination port[15:0]",
"IPv4 destination IP[15:0]", "IPv6 destination IP[15:0]" };
unsigned int foe_table_sz, foe_acct_tb_sz, ppe_id, i;
if (dbg_mode < 0 || dbg_mode > 6) {
pr_info("Invalid hash debug mode %d\n", dbg_mode);
pr_info("[debug mode]\n");
for (i = 0; i <= 6; i++)
pr_info(" %d %s\n", i, hash_dbg_mode[i]);
return -EINVAL;
}
foe_table_sz = hnat_priv->foe_etry_num * sizeof(struct foe_entry);
foe_acct_tb_sz = hnat_priv->foe_etry_num * sizeof(struct hnat_accounting);
/* send all traffic back to the DMA engine */
set_gmac_ppe_fwd(NR_GMAC1_PORT, 0);
set_gmac_ppe_fwd(NR_GMAC2_PORT, 0);
set_gmac_ppe_fwd(NR_GMAC3_PORT, 0);
for (ppe_id = 0; ppe_id < CFG_PPE_NUM; ppe_id++) {
cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG,
HASH_DBG, dbg_mode);
memset(hnat_priv->foe_table_cpu[ppe_id], 0, foe_table_sz);
if (hnat_priv->data->version == MTK_HNAT_V1_1)
exclude_boundary_entry(hnat_priv->foe_table_cpu[ppe_id]);
if (hnat_priv->data->per_flow_accounting)
memset(hnat_priv->acct[ppe_id], 0, foe_acct_tb_sz);
}
/* clear HWNAT cache */
hnat_cache_ebl(1);
set_gmac_ppe_fwd(NR_GMAC1_PORT, 1);
set_gmac_ppe_fwd(NR_GMAC2_PORT, 1);
set_gmac_ppe_fwd(NR_GMAC3_PORT, 1);
pr_info("Hash debug mode enabled, set to %s mode\n", hash_dbg_mode[dbg_mode]);
return 0;
}
static const debugfs_write_func hnat_set_func[] = {
[0] = hnat_set_usage,
[1] = hnat_cpu_reason,
@@ -938,7 +887,6 @@ static const debugfs_write_func cr_set_func[] = {
[2] = tcp_bind_lifetime, [3] = fin_bind_lifetime,
[4] = udp_bind_lifetime, [5] = tcp_keep_alive,
[6] = udp_keep_alive, [7] = set_nf_update_toggle,
[8] = set_hash_dbg_mode,
};
int read_mib(struct mtk_hnat *h, u32 ppe_id,
@@ -1949,7 +1897,6 @@ ssize_t hnat_setting_write(struct file *file, const char __user *buffer,
case 5:
case 6:
case 7:
case 8:
p_token = strsep(&p_buf, p_delimiter);
if (!p_token)
arg1 = 0;

View File

@@ -1614,7 +1614,7 @@ static unsigned int skb_to_hnat_info(struct sk_buff *skb,
IS_LAN_GRP(dev)) ||
(IS_PPPQ_MODE &&
IS_PPPQ_PATH(dev, skb)))
entry.ipv4_hnapt.tport_id = NR_QDMA_TPORT;
entry.ipv4_hnapt.tport_id = 1;
else
entry.ipv4_hnapt.tport_id = 0;
#else
@@ -1813,8 +1813,7 @@ static unsigned int skb_to_hnat_info(struct sk_buff *skb,
entry.ipv4_hnapt.iblk2.fqos = 0;
else
#if defined(CONFIG_MEDIATEK_NETSYS_V3)
entry.ipv4_hnapt.tport_id = HQOS_FLAG(dev, skb, qid) ?
NR_QDMA_TPORT : 0;
entry.ipv4_hnapt.tport_id = HQOS_FLAG(dev, skb, qid) ? 1 : 0;
#else
entry.ipv4_hnapt.iblk2.fqos = HQOS_FLAG(dev, skb, qid) ? 1 : 0;
#endif
@@ -1854,16 +1853,16 @@ static unsigned int skb_to_hnat_info(struct sk_buff *skb,
case IPV4_MAP_E:
case IPV4_MAP_T:
entry.ipv4_mape.tport_id =
HQOS_FLAG(dev, skb, qid) ? NR_QDMA_TPORT : 0;
HQOS_FLAG(dev, skb, qid) ? 1 : 0;
break;
case IPV6_HNAPT:
case IPV6_HNAT:
entry.ipv6_hnapt.tport_id =
HQOS_FLAG(dev, skb, qid) ? NR_QDMA_TPORT : 0;
HQOS_FLAG(dev, skb, qid) ? 1 : 0;
break;
default:
entry.ipv6_5t_route.tport_id =
HQOS_FLAG(dev, skb, qid) ? NR_QDMA_TPORT : 0;
HQOS_FLAG(dev, skb, qid) ? 1 : 0;
break;
}
#else
@@ -2002,7 +2001,7 @@ int mtk_sw_nat_hook_tx(struct sk_buff *skb, int gmac_no)
entry.ipv4_hnapt.winfo.bssid = skb_hnat_bss_id(skb);
entry.ipv4_hnapt.winfo.wcid = skb_hnat_wc_id(skb);
#if defined(CONFIG_MEDIATEK_NETSYS_V3)
entry.ipv4_hnapt.tport_id = IS_HQOS_DL_MODE ? NR_QDMA_TPORT : 0;
entry.ipv4_hnapt.tport_id = IS_HQOS_DL_MODE ? 1 : 0;
entry.ipv4_hnapt.iblk2.rxid = skb_hnat_rx_id(skb);
entry.ipv4_hnapt.iblk2.winfoi = 1;
entry.ipv4_hnapt.winfo_pao.usr_info =
@@ -2060,7 +2059,7 @@ int mtk_sw_nat_hook_tx(struct sk_buff *skb, int gmac_no)
entry.ipv6_hnapt.winfo_pao.is_sp = skb_hnat_is_sp(skb);
entry.ipv6_hnapt.winfo_pao.hf = skb_hnat_hf(skb);
entry.ipv6_hnapt.winfo_pao.amsdu = skb_hnat_amsdu(skb);
entry.ipv6_hnapt.tport_id = IS_HQOS_DL_MODE ? NR_QDMA_TPORT : 0;
entry.ipv6_hnapt.tport_id = IS_HQOS_DL_MODE ? 1 : 0;
#endif
} else {
entry.ipv6_5t_route.iblk2.fqos = 0;
@@ -2076,7 +2075,7 @@ int mtk_sw_nat_hook_tx(struct sk_buff *skb, int gmac_no)
case IPV4_MAP_T:
entry.ipv4_mape.winfo.bssid = skb_hnat_bss_id(skb);
entry.ipv4_mape.winfo.wcid = skb_hnat_wc_id(skb);
entry.ipv4_mape.tport_id = IS_HQOS_DL_MODE ? NR_QDMA_TPORT : 0;
entry.ipv4_mape.tport_id = IS_HQOS_DL_MODE ? 1 : 0;
entry.ipv4_mape.iblk2.rxid = skb_hnat_rx_id(skb);
entry.ipv4_mape.iblk2.winfoi = 1;
entry.ipv4_mape.winfo_pao.usr_info =
@@ -2097,7 +2096,7 @@ int mtk_sw_nat_hook_tx(struct sk_buff *skb, int gmac_no)
default:
entry.ipv6_5t_route.winfo.bssid = skb_hnat_bss_id(skb);
entry.ipv6_5t_route.winfo.wcid = skb_hnat_wc_id(skb);
entry.ipv6_5t_route.tport_id = IS_HQOS_DL_MODE ? NR_QDMA_TPORT : 0;
entry.ipv6_5t_route.tport_id = IS_HQOS_DL_MODE ? 1 : 0;
entry.ipv6_5t_route.iblk2.rxid = skb_hnat_rx_id(skb);
entry.ipv6_5t_route.iblk2.winfoi = 1;
entry.ipv6_5t_route.winfo_pao.usr_info =

View File

@@ -1,9 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
/* FILE NAME: an8801.h
* PURPOSE:
* Define Airoha phy driver function
/*SPDX-License-Identifier: GPL-2.0*/
/*FILE NAME: an8801.h
*PURPOSE:
*Define Airoha phy driver function
*
* NOTES:
*NOTES:
*
*/
@@ -12,12 +12,13 @@
/* NAMING DECLARATIONS
*/
#define AN8801_DRIVER_VERSION "1.1.0"
#define AN8801_DRIVER_VERSION "1.1.4"
#define DEBUGFS_COUNTER "counter"
#define DEBUGFS_DRIVER_INFO "driver_info"
#define DEBUGFS_INFO "driver_info"
#define DEBUGFS_PBUS_OP "pbus_op"
#define DEBUGFS_POLARITY "polarity"
#define DEBUGFS_MDIO "mdio"
#define AN8801_MDIO_PHY_ID 0x1
#define AN8801_PHY_ID1 0xc0ff
@@ -97,6 +98,7 @@
#define PHY_PRE_SPEED_REG (0x2b)
#define MMD_DEV_VSPEC1 (0x1E)
#define MMD_DEV_VSPEC2 (0x1F)
#define RGMII_DELAY_STEP_MASK 0x7
@@ -202,6 +204,8 @@ struct an8801_priv {
#ifdef AN8801SB_DEBUGFS
struct dentry *debugfs_root;
#endif
int pol;
int surge;
};
enum an8801_polarity {
@@ -211,4 +215,10 @@ enum an8801_polarity {
AIR_POL_TX_REV_RX_NOR,
};
enum air_surge {
AIR_SURGE_0R,
AIR_SURGE_5R,
AIR_SURGE_LAST = 0xff
};
#endif /* End of __AN8801_H */

View File

@@ -25,8 +25,8 @@
#define PHY_AUX_SPEED_MASK GENMASK(4, 2)
/* Registers on MDIO_MMD_VEND1 */
#define MTK_PHY_LINK_STATUS_MISC (0xa2)
#define MTK_PHY_FDX_ENABLE BIT(5)
#define MTK_PHY_LINK_STATUS_MISC (0xa2)
#define MTK_PHY_FDX_ENABLE BIT(5)
#define MTK_PHY_LPI_PCS_DSP_CTRL (0x121)
#define MTK_PHY_LPI_SIG_EN_LO_THRESH100_MASK GENMASK(12, 8)
@@ -46,20 +46,10 @@
#define MTK_EXT_PAGE_ACCESS 0x1f
#define MTK_PHY_PAGE_STANDARD 0x0000
#define MTK_PHY_PAGE_EXTENDED_1 0x1
#define MTK_PHY_AUX_CTRL_AND_STATUS (0x14)
#define MTK_PHY_ENABLE_DOWNSHIFT BIT(4)
/* Registers on Token Ring debug nodes */
#define MTK_PHY_PAGE_EXTENDED_52B5 0x52b5
/* ch_addr = 0x0, node_addr = 0xf, data_addr = 0x3c */
#define AUTO_NP_10XEN BIT(6)
struct mtk_i2p5ge_phy_priv {
bool fw_loaded;
u16 tr_low;
u16 tr_high;
};
enum {
@@ -69,67 +59,6 @@ enum {
PHY_AUX_SPD_2500,
};
static void tr_access(struct phy_device *phydev, bool read, u8 ch_addr, u8 node_addr, u8 data_addr)
{
u16 tr_cmd = BIT(15); /* bit 14 & 0 are reserved */
if (read)
tr_cmd |= BIT(13);
tr_cmd |= (((ch_addr & 0x3) << 11) |
((node_addr & 0xf) << 7) |
((data_addr & 0x3f) << 1));
dev_dbg(&phydev->mdio.dev, "tr_cmd: 0x%x\n", tr_cmd);
__phy_write(phydev, 0x10, tr_cmd);
}
static void __tr_read(struct phy_device *phydev, u8 ch_addr, u8 node_addr, u8 data_addr)
{
struct mtk_i2p5ge_phy_priv *priv = phydev->priv;
tr_access(phydev, true, ch_addr, node_addr, data_addr);
priv->tr_low = __phy_read(phydev, 0x11);
priv->tr_high = __phy_read(phydev, 0x12);
dev_dbg(&phydev->mdio.dev, "tr_high read: 0x%x, tr_low read: 0x%x\n",
priv->tr_high, priv->tr_low);
}
static void tr_read(struct phy_device *phydev, u8 ch_addr, u8 node_addr, u8 data_addr)
{
phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_52B5);
__tr_read(phydev, ch_addr, node_addr, data_addr);
phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0);
}
static void __tr_write(struct phy_device *phydev, u8 ch_addr, u8 node_addr, u8 data_addr,
u32 tr_data)
{
__phy_write(phydev, 0x11, tr_data & 0xffff);
__phy_write(phydev, 0x12, tr_data >> 16);
tr_access(phydev, false, ch_addr, node_addr, data_addr);
}
static void tr_write(struct phy_device *phydev, u8 ch_addr, u8 node_addr, u8 data_addr, u32 tr_data)
{
phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_52B5);
__tr_write(phydev, ch_addr, node_addr, data_addr, tr_data);
phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0);
}
static void tr_modify(struct phy_device *phydev, u8 ch_addr, u8 node_addr, u8 data_addr,
u32 mask, u32 set)
{
u32 tr_data;
struct mtk_i2p5ge_phy_priv *priv = phydev->priv;
phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_52B5);
__tr_read(phydev, ch_addr, node_addr, data_addr);
tr_data = (priv->tr_high << 16) | priv->tr_low;
tr_data = (tr_data & ~mask) | set;
__tr_write(phydev, ch_addr, node_addr, data_addr, tr_data);
phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0);
}
static int mtk_2p5ge_phy_read_page(struct phy_device *phydev)
{
return __phy_read(phydev, MTK_EXT_PAGE_ACCESS);
@@ -142,14 +71,14 @@ static int mtk_2p5ge_phy_write_page(struct phy_device *phydev, int page)
static int mt7988_2p5ge_phy_probe(struct phy_device *phydev)
{
struct mtk_i2p5ge_phy_priv *priv;
struct mtk_i2p5ge_phy_priv *phy_priv;
priv = devm_kzalloc(&phydev->mdio.dev,
phy_priv = devm_kzalloc(&phydev->mdio.dev,
sizeof(struct mtk_i2p5ge_phy_priv), GFP_KERNEL);
if (!priv)
if (!phy_priv)
return -ENOMEM;
phydev->priv = priv;
phydev->priv = phy_priv;
return 0;
}
@@ -162,11 +91,11 @@ static int mt7988_2p5ge_phy_config_init(struct phy_device *phydev)
struct device_node *np;
void __iomem *pmb_addr;
void __iomem *md32_en_cfg_base;
struct mtk_i2p5ge_phy_priv *priv = phydev->priv;
struct mtk_i2p5ge_phy_priv *phy_priv = phydev->priv;
u16 reg;
struct pinctrl *pinctrl;
if (!priv->fw_loaded) {
if (!phy_priv->fw_loaded) {
np = of_find_compatible_node(NULL, NULL, "mediatek,2p5gphy-fw");
if (!np)
return -ENOENT;
@@ -202,11 +131,9 @@ static int mt7988_2p5ge_phy_config_init(struct phy_device *phydev)
writew(reg & ~MD32_EN, md32_en_cfg_base);
writew(reg | MD32_EN, md32_en_cfg_base);
phy_set_bits(phydev, 0, BIT(15));
/* We need a delay here to stabilize initialization of MCU */
usleep_range(7000, 8000);
dev_info(dev, "Firmware loading/trigger ok.\n");
priv->fw_loaded = true;
phy_priv->fw_loaded = true;
}
/* Setup LED */
@@ -227,12 +154,10 @@ static int mt7988_2p5ge_phy_config_init(struct phy_device *phydev)
MTK_PHY_LPI_SIG_EN_LO_THRESH100_MASK, 0);
/* Enable 16-bit next page exchange bit if 1000-BT isn't advertizing */
tr_modify(phydev, 0x0, 0xf, 0x3c, AUTO_NP_10XEN,
FIELD_PREP(AUTO_NP_10XEN, 0x1));
/* Enable downshift */
phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_1);
__phy_set_bits(phydev, MTK_PHY_AUX_CTRL_AND_STATUS, MTK_PHY_ENABLE_DOWNSHIFT);
phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_52B5);
__phy_write(phydev, 0x11, 0xfbfa);
__phy_write(phydev, 0x12, 0xc3);
__phy_write(phydev, 0x10, 0x87f8);
phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0);
return 0;
@@ -298,7 +223,6 @@ static int mt7988_2p5ge_phy_get_features(struct phy_device *phydev)
static int mt7988_2p5ge_phy_read_status(struct phy_device *phydev)
{
int ret;
u16 status;
ret = genphy_update_link(phydev);
if (ret)
@@ -323,35 +247,32 @@ static int mt7988_2p5ge_phy_read_status(struct phy_device *phydev)
linkmode_zero(phydev->lp_advertising);
}
status = phy_read(phydev, MII_BMSR);
if (status & BMSR_LSTATUS) {
ret = phy_read(phydev, PHY_AUX_CTRL_STATUS);
if (ret < 0)
return ret;
ret = phy_read(phydev, PHY_AUX_CTRL_STATUS);
if (ret < 0)
return ret;
switch (FIELD_GET(PHY_AUX_SPEED_MASK, ret)) {
case PHY_AUX_SPD_10:
phydev->speed = SPEED_10;
break;
case PHY_AUX_SPD_100:
phydev->speed = SPEED_100;
break;
case PHY_AUX_SPD_1000:
phydev->speed = SPEED_1000;
break;
case PHY_AUX_SPD_2500:
phydev->speed = SPEED_2500;
break;
}
ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_LINK_STATUS_MISC);
if (ret < 0)
return ret;
phydev->duplex = (ret & MTK_PHY_FDX_ENABLE) ? DUPLEX_FULL : DUPLEX_HALF;
/* FIXME: The current firmware always enables rate adaptation mode. */
phydev->rate_matching = RATE_MATCH_PAUSE;
switch (FIELD_GET(PHY_AUX_SPEED_MASK, ret)) {
case PHY_AUX_SPD_10:
phydev->speed = SPEED_10;
break;
case PHY_AUX_SPD_100:
phydev->speed = SPEED_100;
break;
case PHY_AUX_SPD_1000:
phydev->speed = SPEED_1000;
break;
case PHY_AUX_SPD_2500:
phydev->speed = SPEED_2500;
break;
}
ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_LINK_STATUS_MISC);
if (ret < 0)
return ret;
phydev->duplex = (ret & MTK_PHY_FDX_ENABLE) ? DUPLEX_FULL : DUPLEX_HALF;
/* FIXME: The current firmware always enables rate adaptation mode. */
phydev->rate_matching = RATE_MATCH_PAUSE;
return 0;
}

View File

@@ -109,6 +109,7 @@
#define CK_TOP_F26M 48
#define CK_TOP_AUD_L 49
#define CK_TOP_A_TUNER 50
#define CLK_TOP_CONN_MCUSYS_SEL 50
#define CK_TOP_U2U3_REF 51
#define CK_TOP_U2U3_SYS 52
#define CK_TOP_U2U3_XHCI 53
@@ -121,6 +122,7 @@
#define CK_TOP_PWM_SEL 60
#define CK_TOP_I2C_SEL 61
#define CK_TOP_PEXTP_TL_SEL 62
#define CLK_TOP_AP2CNN_HOST_SEL 62
#define CK_TOP_EMMC_250M_SEL 63
#define CK_TOP_EMMC_416M_SEL 64
#define CK_TOP_F_26M_ADC_SEL 65

View File

@@ -1,397 +0,0 @@
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -1222,6 +1222,7 @@ static struct safexcel_alg_template *saf
&safexcel_alg_cfb_aes,
&safexcel_alg_ofb_aes,
&safexcel_alg_ctr_aes,
+ &safexcel_alg_basic_ctr_aes,
&safexcel_alg_md5,
&safexcel_alg_sha1,
&safexcel_alg_sha224,
--- a/drivers/crypto/inside-secure/safexcel.h
+++ b/drivers/crypto/inside-secure/safexcel.h
@@ -930,6 +930,7 @@ extern struct safexcel_alg_template safe
extern struct safexcel_alg_template safexcel_alg_cfb_aes;
extern struct safexcel_alg_template safexcel_alg_ofb_aes;
extern struct safexcel_alg_template safexcel_alg_ctr_aes;
+extern struct safexcel_alg_template safexcel_alg_basic_ctr_aes;
extern struct safexcel_alg_template safexcel_alg_md5;
extern struct safexcel_alg_template safexcel_alg_sha1;
extern struct safexcel_alg_template safexcel_alg_sha224;
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -51,6 +51,8 @@ struct safexcel_cipher_ctx {
u8 xcm; /* 0=authenc, 1=GCM, 2 reserved for CCM */
u8 aadskip;
u8 blocksz;
+ bool basic_ctr;
+ u32 processed;
u32 ivmask;
u32 ctrinit;
@@ -79,7 +81,7 @@ struct safexcel_cipher_req {
static int safexcel_skcipher_iv(struct safexcel_cipher_ctx *ctx, u8 *iv,
struct safexcel_command_desc *cdesc)
{
- if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD) {
+ if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD && !(ctx->basic_ctr)) {
cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
/* 32 bit nonce */
cdesc->control_data.token[0] = ctx->nonce;
@@ -513,8 +515,8 @@ static int safexcel_aead_setkey(struct c
memcpy(ctx->opad, &ostate.state, ctx->state_sz);
memzero_explicit(&keys, sizeof(keys));
- return 0;
+ return 0;
badkey:
memzero_explicit(&keys, sizeof(keys));
return err;
@@ -622,6 +624,43 @@ static int safexcel_context_control(stru
return 0;
}
+static int safexcel_queue_req(struct crypto_async_request *base,
+ struct safexcel_cipher_req *sreq,
+ enum safexcel_cipher_direction dir)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
+ struct safexcel_crypto_priv *priv = ctx->priv;
+ int ret, ring;
+
+ sreq->needs_inv = false;
+ sreq->direction = dir;
+
+ if (ctx->base.ctxr) {
+ if (priv->flags & EIP197_TRC_CACHE && ctx->base.needs_inv) {
+ sreq->needs_inv = true;
+ ctx->base.needs_inv = false;
+ }
+ } else {
+ ctx->base.ring = safexcel_select_ring(priv);
+ ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
+ EIP197_GFP_FLAGS(*base),
+ &ctx->base.ctxr_dma);
+ if (!ctx->base.ctxr)
+ return -ENOMEM;
+ }
+
+ ring = ctx->base.ring;
+
+ spin_lock_bh(&priv->ring[ring].queue_lock);
+ ret = crypto_enqueue_request(&priv->ring[ring].queue, base);
+ spin_unlock_bh(&priv->ring[ring].queue_lock);
+
+ queue_work(priv->ring[ring].workqueue,
+ &priv->ring[ring].work_data.work);
+
+ return ret;
+}
+
static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring,
struct crypto_async_request *async,
struct scatterlist *src,
@@ -635,6 +674,7 @@ static int safexcel_handle_req_result(st
struct safexcel_cipher_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct safexcel_result_desc *rdesc;
int ndesc = 0;
+ int flag;
*ret = 0;
@@ -677,7 +717,13 @@ static int safexcel_handle_req_result(st
crypto_skcipher_ivsize(skcipher)));
}
- *should_complete = true;
+ if (ctx->basic_ctr && ctx->processed != cryptlen) {
+ *should_complete = false;
+ flag = safexcel_queue_req(async, sreq, sreq->direction);
+ } else {
+ *should_complete = true;
+ ctx->processed = 0;
+ }
return ndesc;
}
@@ -700,12 +746,16 @@ static int safexcel_send_req(struct cryp
unsigned int totlen;
unsigned int totlen_src = cryptlen + assoclen;
unsigned int totlen_dst = totlen_src;
+ unsigned int pass_byte = 0;
+ unsigned int pass;
struct safexcel_token *atoken;
int n_cdesc = 0, n_rdesc = 0;
int queued, i, ret = 0;
bool first = true;
- sreq->nr_src = sg_nents_for_len(src, totlen_src);
+ pass_byte = ctx->processed;
+ pass = pass_byte;
+ sreq->nr_src = sg_nents_for_len(src, totlen_src + pass_byte);
if (ctx->aead) {
/*
@@ -736,7 +786,7 @@ static int safexcel_send_req(struct cryp
crypto_skcipher_ivsize(skcipher)));
}
- sreq->nr_dst = sg_nents_for_len(dst, totlen_dst);
+ sreq->nr_dst = sg_nents_for_len(dst, totlen_dst + pass_byte);
/*
* Remember actual input length, source buffer length may be
@@ -798,14 +848,23 @@ static int safexcel_send_req(struct cryp
for_each_sg(src, sg, sreq->nr_src, i) {
int len = sg_dma_len(sg);
+ if (pass) {
+ if (pass >= len) {
+ pass -= len;
+ continue;
+ }
+ len = len - pass;
+ }
/* Do not overflow the request */
if (queued < len)
len = queued;
cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc,
- !(queued - len),
- sg_dma_address(sg), len, totlen,
- ctx->base.ctxr_dma, &atoken);
+ !(queued - len),
+ sg_dma_address(sg) + pass, len,
+ totlen, ctx->base.ctxr_dma, &atoken);
+ pass = 0;
+
if (IS_ERR(cdesc)) {
/* No space left in the command descriptor ring */
ret = PTR_ERR(cdesc);
@@ -820,6 +879,7 @@ static int safexcel_send_req(struct cryp
if (!queued)
break;
}
+
skip_cdesc:
/* Add context control words and token to first command descriptor */
safexcel_context_control(ctx, base, sreq, first_cdesc);
@@ -831,11 +891,20 @@ skip_cdesc:
safexcel_skcipher_token(ctx, iv, first_cdesc, atoken,
cryptlen);
+ pass = pass_byte;
/* result descriptors */
for_each_sg(dst, sg, sreq->nr_dst, i) {
bool last = (i == sreq->nr_dst - 1);
u32 len = sg_dma_len(sg);
+ if (pass) {
+ if (pass >= len) {
+ pass -= len;
+ continue;
+ }
+ len -= pass;
+ }
+
/* only allow the part of the buffer we know we need */
if (len > totlen_dst)
len = totlen_dst;
@@ -855,9 +924,11 @@ skip_cdesc:
len - assoclen);
assoclen = 0;
} else {
+
rdesc = safexcel_add_rdesc(priv, ring, first, last,
- sg_dma_address(sg),
- len);
+ sg_dma_address(sg) + pass,
+ len);
+ pass = 0;
}
if (IS_ERR(rdesc)) {
/* No space left in the result descriptor ring */
@@ -892,6 +963,7 @@ skip_cdesc:
*commands = n_cdesc;
*results = n_rdesc;
+
return 0;
rdesc_rollback:
@@ -1033,6 +1105,26 @@ static int safexcel_cipher_send_inv(stru
return 0;
}
+static void accum_iv(u8 *iv, u32 blocks)
+{
+ u32 *counter;
+ int i;
+
+ for (i = 12; i >= 0; i = i - 4) {
+ counter = (u32 *) &iv[i];
+ if (be32_to_cpu(*counter) + blocks >= be32_to_cpu(*counter)) {
+ *counter = cpu_to_be32(be32_to_cpu(*counter) + blocks);
+ blocks = 0;
+ } else {
+ *counter = cpu_to_be32(be32_to_cpu(*counter) + blocks);
+ blocks = 1;
+ }
+
+ if (blocks == 0)
+ break;
+ }
+}
+
static int safexcel_skcipher_send(struct crypto_async_request *async, int ring,
int *commands, int *results)
{
@@ -1049,6 +1141,8 @@ static int safexcel_skcipher_send(struct
} else {
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
u8 input_iv[AES_BLOCK_SIZE];
+ u32 blocks;
+ u32 *counter;
/*
* Save input IV in case of CBC decrypt mode
@@ -1056,9 +1150,29 @@ static int safexcel_skcipher_send(struct
*/
memcpy(input_iv, req->iv, crypto_skcipher_ivsize(skcipher));
- ret = safexcel_send_req(async, ring, sreq, req->src,
+ if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD && ctx->basic_ctr) {
+ counter = (u32 *) &req->iv[12];
+ blocks = (req->cryptlen / ctx->blocksz) - (ctx->processed / 16);
+ if (req->cryptlen % ctx->blocksz)
+ blocks++;
+ if (be32_to_cpu(*counter) + blocks < be32_to_cpu(*counter)) {
+ blocks = 0 - be32_to_cpu(*counter);
+ ret = safexcel_send_req(async, ring, sreq, req->src,
+ req->dst, min(blocks * AES_BLOCK_SIZE, req->cryptlen), 0, 0, input_iv,
+ commands, results);
+ ctx->processed += min(blocks * AES_BLOCK_SIZE, req->cryptlen);
+ } else {
+ ret = safexcel_send_req(async, ring, sreq, req->src,
+ req->dst, req->cryptlen - ctx->processed,
+ 0, 0, input_iv, commands, results);
+ ctx->processed = req->cryptlen;
+ }
+ accum_iv(req->iv, blocks);
+ } else {
+ ret = safexcel_send_req(async, ring, sreq, req->src,
req->dst, req->cryptlen, 0, 0, input_iv,
commands, results);
+ }
}
sreq->rdescs = *results;
@@ -1152,43 +1266,6 @@ static int safexcel_aead_exit_inv(struct
return safexcel_cipher_exit_inv(tfm, &req->base, sreq, &result);
}
-static int safexcel_queue_req(struct crypto_async_request *base,
- struct safexcel_cipher_req *sreq,
- enum safexcel_cipher_direction dir)
-{
- struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
- struct safexcel_crypto_priv *priv = ctx->priv;
- int ret, ring;
-
- sreq->needs_inv = false;
- sreq->direction = dir;
-
- if (ctx->base.ctxr) {
- if (priv->flags & EIP197_TRC_CACHE && ctx->base.needs_inv) {
- sreq->needs_inv = true;
- ctx->base.needs_inv = false;
- }
- } else {
- ctx->base.ring = safexcel_select_ring(priv);
- ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
- EIP197_GFP_FLAGS(*base),
- &ctx->base.ctxr_dma);
- if (!ctx->base.ctxr)
- return -ENOMEM;
- }
-
- ring = ctx->base.ring;
-
- spin_lock_bh(&priv->ring[ring].queue_lock);
- ret = crypto_enqueue_request(&priv->ring[ring].queue, base);
- spin_unlock_bh(&priv->ring[ring].queue_lock);
-
- queue_work(priv->ring[ring].workqueue,
- &priv->ring[ring].work_data.work);
-
- return ret;
-}
-
static int safexcel_encrypt(struct skcipher_request *req)
{
return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
@@ -1216,6 +1293,8 @@ static int safexcel_skcipher_cra_init(st
ctx->base.send = safexcel_skcipher_send;
ctx->base.handle_result = safexcel_skcipher_handle_result;
ctx->ivmask = EIP197_OPTION_4_TOKEN_IV_CMD;
+ ctx->basic_ctr = false;
+ ctx->processed = 0;
ctx->ctrinit = 1;
return 0;
}
@@ -1496,6 +1575,44 @@ struct safexcel_alg_template safexcel_al
},
};
+static int safexcel_skcipher_basic_aes_ctr_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_skcipher_cra_init(tfm);
+ ctx->alg = SAFEXCEL_AES;
+ ctx->blocksz = AES_BLOCK_SIZE;
+ ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD;
+ ctx->basic_ctr = true;
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_basic_ctr_aes = {
+ .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+ .algo_mask = SAFEXCEL_ALG_AES,
+ .alg.skcipher = {
+ .setkey = safexcel_skcipher_aes_setkey,
+ .encrypt = safexcel_encrypt,
+ .decrypt = safexcel_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .base = {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "safexcel-basic-ctr-aes",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_skcipher_basic_aes_ctr_cra_init,
+ .cra_exit = safexcel_skcipher_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
static int safexcel_des_setkey(struct crypto_skcipher *ctfm, const u8 *key,
unsigned int len)
{
@@ -1724,6 +1841,9 @@ static int safexcel_aead_cra_init(struct
ctx->aead = true;
ctx->base.send = safexcel_aead_send;
ctx->base.handle_result = safexcel_aead_handle_result;
+ ctx->basic_ctr = false;
+ ctx->processed = 0;
+
return 0;
}

View File

@@ -1,58 +0,0 @@
From b241c6831557c3141801dc2f87e839269ef7bad1 Mon Sep 17 00:00:00 2001
From: Howard Hsu <howard-yh.hsu@mediatek.com>
Date: Fri, 19 Apr 2024 15:43:23 +0800
Subject: [PATCH] wifi: mt76: mt7915: adjust rx filter
Adjust rx filter setting to drop the packet that we do not need to
receive.
Fixes: e57b7901469f ("mt76: add mac80211 driver for MT7915 PCIe-based chipsets")
Signed-off-by: Howard Hsu <howard-yh.hsu@mediatek.com>
---
mt7915/main.c | 11 +++++++----
1 file changed, 7 insertions(+), 4 deletions(-)
diff --git a/mt7915/main.c b/mt7915/main.c
index 26f9a5a..137e09a 100644
--- a/mt7915/main.c
+++ b/mt7915/main.c
@@ -489,7 +489,8 @@ static int mt7915_config(struct ieee80211_hw *hw, u32 changed)
rxfilter |= MT_WF_RFCR_DROP_OTHER_UC;
dev->monitor_mask &= ~BIT(band);
} else {
- rxfilter &= ~MT_WF_RFCR_DROP_OTHER_UC;
+ rxfilter &= ~(MT_WF_RFCR_DROP_A2_BSSID |
+ MT_WF_RFCR_DROP_OTHER_UC);
dev->monitor_mask |= BIT(band);
}
@@ -552,13 +553,14 @@ static void mt7915_configure_filter(struct ieee80211_hw *hw,
MT_WF_RFCR_DROP_MCAST |
MT_WF_RFCR_DROP_BCAST |
MT_WF_RFCR_DROP_DUPLICATE |
- MT_WF_RFCR_DROP_A2_BSSID |
MT_WF_RFCR_DROP_UNWANTED_CTL |
MT_WF_RFCR_DROP_STBC_MULTI);
+ phy->rxfilter |= MT_WF_RFCR_DROP_VERSION;
MT76_FILTER(OTHER_BSS, MT_WF_RFCR_DROP_OTHER_TIM |
MT_WF_RFCR_DROP_A3_MAC |
- MT_WF_RFCR_DROP_A3_BSSID);
+ MT_WF_RFCR_DROP_A3_BSSID |
+ MT_WF_RFCR_DROP_A2_BSSID);
MT76_FILTER(FCSFAIL, MT_WF_RFCR_DROP_FCSFAIL);
@@ -569,7 +571,8 @@ static void mt7915_configure_filter(struct ieee80211_hw *hw,
*total_flags = flags;
rxfilter = phy->rxfilter;
if (hw->conf.flags & IEEE80211_CONF_MONITOR)
- rxfilter &= ~MT_WF_RFCR_DROP_OTHER_UC;
+ rxfilter &= ~(MT_WF_RFCR_DROP_A2_BSSID |
+ MT_WF_RFCR_DROP_OTHER_UC);
else
rxfilter |= MT_WF_RFCR_DROP_OTHER_UC;
mt76_wr(dev, MT_WF_RFCR(band), rxfilter);
--
2.18.0