mirror of
https://github.com/Telecominfraproject/wlan-ap.git
synced 2025-11-02 19:37:51 +00:00
246 lines
6.7 KiB
Diff
246 lines
6.7 KiB
Diff
From: Wei Wang <weiwan@google.com>
|
|
Date: Mon, 8 Feb 2021 11:34:09 -0800
|
|
Subject: [PATCH] net: implement threaded-able napi poll loop support
|
|
|
|
This patch allows running each napi poll loop inside its own
|
|
kernel thread.
|
|
The kthread is created during netif_napi_add() if dev->threaded
|
|
is set. And threaded mode is enabled in napi_enable(). We will
|
|
provide a way to set dev->threaded and enable threaded mode
|
|
without a device up/down in the following patch.
|
|
|
|
Once that threaded mode is enabled and the kthread is
|
|
started, napi_schedule() will wake-up such thread instead
|
|
of scheduling the softirq.
|
|
|
|
The threaded poll loop behaves quite likely the net_rx_action,
|
|
but it does not have to manipulate local irqs and uses
|
|
an explicit scheduling point based on netdev_budget.
|
|
|
|
Co-developed-by: Paolo Abeni <pabeni@redhat.com>
|
|
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
|
|
Co-developed-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
|
|
Signed-off-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
|
|
Co-developed-by: Jakub Kicinski <kuba@kernel.org>
|
|
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
|
|
Signed-off-by: Wei Wang <weiwan@google.com>
|
|
Reviewed-by: Alexander Duyck <alexanderduyck@fb.com>
|
|
Signed-off-by: David S. Miller <davem@davemloft.net>
|
|
---
|
|
|
|
--- a/include/linux/netdevice.h
|
|
+++ b/include/linux/netdevice.h
|
|
@@ -319,6 +319,7 @@ struct napi_struct {
|
|
struct list_head dev_list;
|
|
struct hlist_node napi_hash_node;
|
|
unsigned int napi_id;
|
|
+ struct task_struct *thread;
|
|
};
|
|
|
|
enum {
|
|
@@ -326,6 +327,7 @@ enum {
|
|
NAPI_STATE_DISABLE, /* Disable pending */
|
|
NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
|
|
NAPI_STATE_HASHED, /* In NAPI hash */
|
|
+ NAPI_STATE_THREADED, /* The poll is performed inside its own thread*/
|
|
};
|
|
|
|
enum gro_result {
|
|
@@ -501,13 +503,7 @@ void napi_disable(struct napi_struct *n)
|
|
* Resume NAPI from being scheduled on this context.
|
|
* Must be paired with napi_disable.
|
|
*/
|
|
-static inline void napi_enable(struct napi_struct *n)
|
|
-{
|
|
- BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
|
|
- smp_mb__before_atomic();
|
|
- clear_bit(NAPI_STATE_SCHED, &n->state);
|
|
- clear_bit(NAPI_STATE_NPSVC, &n->state);
|
|
-}
|
|
+void napi_enable(struct napi_struct *n);
|
|
|
|
/**
|
|
* napi_synchronize - wait until NAPI is not running
|
|
@@ -1573,6 +1569,8 @@ enum netdev_priv_flags_ext {
|
|
* switch driver and used to set the phys state of the
|
|
* switch port.
|
|
*
|
|
+ * @threaded: napi threaded mode is enabled
|
|
+ *
|
|
* FIXME: cleanup struct net_device such that network protocol info
|
|
* moves out.
|
|
*/
|
|
@@ -1852,6 +1850,7 @@ struct net_device {
|
|
struct phy_device *phydev;
|
|
struct lock_class_key *qdisc_tx_busylock;
|
|
bool proto_down;
|
|
+ unsigned threaded:1;
|
|
};
|
|
#define to_net_dev(d) container_of(d, struct net_device, dev)
|
|
|
|
--- a/net/core/dev.c
|
|
+++ b/net/core/dev.c
|
|
@@ -94,6 +94,7 @@
|
|
#include <linux/ethtool.h>
|
|
#include <linux/notifier.h>
|
|
#include <linux/skbuff.h>
|
|
+#include <linux/kthread.h>
|
|
#include <net/net_namespace.h>
|
|
#include <net/sock.h>
|
|
#include <linux/rtnetlink.h>
|
|
@@ -1304,6 +1305,27 @@ void netdev_notify_peers(struct net_devi
|
|
}
|
|
EXPORT_SYMBOL(netdev_notify_peers);
|
|
|
|
+static int napi_threaded_poll(void *data);
|
|
+
|
|
+static int napi_kthread_create(struct napi_struct *n)
|
|
+{
|
|
+ int err = 0;
|
|
+
|
|
+ /* Create and wake up the kthread once to put it in
|
|
+ * TASK_INTERRUPTIBLE mode to avoid the blocked task
|
|
+ * warning and work with loadavg.
|
|
+ */
|
|
+ n->thread = kthread_run(napi_threaded_poll, n, "napi/%s-%d",
|
|
+ n->dev->name, n->napi_id);
|
|
+ if (IS_ERR(n->thread)) {
|
|
+ err = PTR_ERR(n->thread);
|
|
+ pr_err("kthread_run failed with err %d\n", err);
|
|
+ n->thread = NULL;
|
|
+ }
|
|
+
|
|
+ return err;
|
|
+}
|
|
+
|
|
static int __dev_open(struct net_device *dev)
|
|
{
|
|
const struct net_device_ops *ops = dev->netdev_ops;
|
|
@@ -3248,6 +3270,21 @@ int weight_p __read_mostly = 64;
|
|
static inline void ____napi_schedule(struct softnet_data *sd,
|
|
struct napi_struct *napi)
|
|
{
|
|
+ struct task_struct *thread;
|
|
+
|
|
+ if (test_bit(NAPI_STATE_THREADED, &napi->state)) {
|
|
+ /* Paired with smp_mb__before_atomic() in
|
|
+ * napi_enable(). Use READ_ONCE() to guarantee
|
|
+ * a complete read on napi->thread. Only call
|
|
+ * wake_up_process() when it's not NULL.
|
|
+ */
|
|
+ thread = READ_ONCE(napi->thread);
|
|
+ if (thread) {
|
|
+ wake_up_process(thread);
|
|
+ return;
|
|
+ }
|
|
+ }
|
|
+
|
|
list_add_tail(&napi->poll_list, &sd->poll_list);
|
|
__raise_softirq_irqoff(NET_RX_SOFTIRQ);
|
|
}
|
|
@@ -4828,9 +4865,33 @@ void netif_napi_add(struct net_device *d
|
|
napi->poll_owner = -1;
|
|
#endif
|
|
set_bit(NAPI_STATE_SCHED, &napi->state);
|
|
+ /* Create kthread for this napi if dev->threaded is set.
|
|
+ * Clear dev->threaded if kthread creation failed so that
|
|
+ * threaded mode will not be enabled in napi_enable().
|
|
+ */
|
|
+ if (dev->threaded && napi_kthread_create(napi))
|
|
+ dev->threaded = 0;
|
|
}
|
|
EXPORT_SYMBOL(netif_napi_add);
|
|
|
|
+/**
|
|
+ * napi_enable - enable NAPI scheduling
|
|
+ * @n: NAPI context
|
|
+ *
|
|
+ * Resume NAPI from being scheduled on this context.
|
|
+ * Must be paired with napi_disable.
|
|
+ */
|
|
+void napi_enable(struct napi_struct *n)
|
|
+{
|
|
+ BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
|
|
+ smp_mb__before_atomic();
|
|
+ clear_bit(NAPI_STATE_SCHED, &n->state);
|
|
+ clear_bit(NAPI_STATE_NPSVC, &n->state);
|
|
+ if (n->dev->threaded && n->thread)
|
|
+ set_bit(NAPI_STATE_THREADED, &n->state);
|
|
+}
|
|
+EXPORT_SYMBOL(napi_enable);
|
|
+
|
|
void napi_disable(struct napi_struct *n)
|
|
{
|
|
might_sleep();
|
|
@@ -4844,6 +4905,7 @@ void napi_disable(struct napi_struct *n)
|
|
hrtimer_cancel(&n->timer);
|
|
|
|
clear_bit(NAPI_STATE_DISABLE, &n->state);
|
|
+ clear_bit(NAPI_STATE_THREADED, &n->state);
|
|
}
|
|
EXPORT_SYMBOL(napi_disable);
|
|
|
|
@@ -4855,6 +4917,11 @@ void netif_napi_del(struct napi_struct *
|
|
kfree_skb_list(napi->gro_list);
|
|
napi->gro_list = NULL;
|
|
napi->gro_count = 0;
|
|
+
|
|
+ if (napi->thread) {
|
|
+ kthread_stop(napi->thread);
|
|
+ napi->thread = NULL;
|
|
+ }
|
|
}
|
|
EXPORT_SYMBOL(netif_napi_del);
|
|
|
|
@@ -4940,6 +5007,50 @@ static int napi_poll(struct napi_struct
|
|
return work;
|
|
}
|
|
|
|
+static int napi_thread_wait(struct napi_struct *napi)
|
|
+{
|
|
+ set_current_state(TASK_INTERRUPTIBLE);
|
|
+
|
|
+ while (!kthread_should_stop() && !napi_disable_pending(napi)) {
|
|
+ if (test_bit(NAPI_STATE_SCHED, &napi->state)) {
|
|
+ WARN_ON(!list_empty(&napi->poll_list));
|
|
+ __set_current_state(TASK_RUNNING);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ schedule();
|
|
+ set_current_state(TASK_INTERRUPTIBLE);
|
|
+ }
|
|
+ __set_current_state(TASK_RUNNING);
|
|
+ return -1;
|
|
+}
|
|
+
|
|
+static int napi_threaded_poll(void *data)
|
|
+{
|
|
+ struct napi_struct *napi = data;
|
|
+ void *have;
|
|
+
|
|
+ while (!napi_thread_wait(napi)) {
|
|
+ for (;;) {
|
|
+ bool repoll = false;
|
|
+
|
|
+ local_bh_disable();
|
|
+
|
|
+ have = netpoll_poll_lock(napi);
|
|
+ __napi_poll(napi, &repoll);
|
|
+ netpoll_poll_unlock(have);
|
|
+
|
|
+ local_bh_enable();
|
|
+
|
|
+ if (!repoll)
|
|
+ break;
|
|
+
|
|
+ cond_resched();
|
|
+ }
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
static void net_rx_action(struct softirq_action *h)
|
|
{
|
|
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
|