XDP hardware hints discussion mail archive
 help / color / mirror / Atom feed
From: Alexander Lobakin <alexandr.lobakin@intel.com>
To: Alexei Starovoitov <ast@kernel.org>,
	Daniel Borkmann <daniel@iogearbox.net>,
	Andrii Nakryiko <andrii@kernel.org>
Cc: "Alexander Lobakin" <alexandr.lobakin@intel.com>,
	"Larysa Zaremba" <larysa.zaremba@intel.com>,
	"Michal Swiatkowski" <michal.swiatkowski@linux.intel.com>,
	"Jesper Dangaard Brouer" <hawk@kernel.org>,
	"Björn Töpel" <bjorn@kernel.org>,
	"Magnus Karlsson" <magnus.karlsson@intel.com>,
	"Maciej Fijalkowski" <maciej.fijalkowski@intel.com>,
	"Jonathan Lemon" <jonathan.lemon@gmail.com>,
	"Toke Hoiland-Jorgensen" <toke@redhat.com>,
	"Lorenzo Bianconi" <lorenzo@kernel.org>,
	"David S. Miller" <davem@davemloft.net>,
	"Eric Dumazet" <edumazet@google.com>,
	"Jakub Kicinski" <kuba@kernel.org>,
	"Paolo Abeni" <pabeni@redhat.com>,
	"Jesse Brandeburg" <jesse.brandeburg@intel.com>,
	"John Fastabend" <john.fastabend@gmail.com>,
	"Yajun Deng" <yajun.deng@linux.dev>,
	"Willem de Bruijn" <willemb@google.com>,
	bpf@vger.kernel.org, netdev@vger.kernel.org,
	linux-kernel@vger.kernel.org, xdp-hints@xdp-project.net
Subject: [xdp-hints] [PATCH RFC bpf-next 30/52] net, gro: decouple GRO from the NAPI layer
Date: Tue, 28 Jun 2022 21:47:50 +0200	[thread overview]
Message-ID: <20220628194812.1453059-31-alexandr.lobakin@intel.com> (raw)
In-Reply-To: <20220628194812.1453059-1-alexandr.lobakin@intel.com>

In fact, these two are not tied closely to each other. The only
requirement to GRO is to use it in the BH context and have some
sane limits on the packet batches, e.g. NAPI has a limit of its
budget (64/8/etc.).
Factor out purely GRO fields into a new structure, &gro_node.
Embed it into &napi_struct and adjust all the references. ::timer
was moved because it is more tied to GRO than to NAPI as the former
relies on deciding whether to do a full or a partial flush.
This does not make GRO ready to use outside of the NAPI context
yet.

Signed-off-by: Alexander Lobakin <alexandr.lobakin@intel.com>
---
 drivers/net/ethernet/brocade/bna/bnad.c |  1 +
 drivers/net/ethernet/cortina/gemini.c   |  1 +
 include/linux/netdevice.h               | 19 ++++---
 include/net/gro.h                       | 35 ++++++++----
 net/core/dev.c                          | 75 +++++++++++--------------
 net/core/gro.c                          | 63 ++++++++++-----------
 6 files changed, 103 insertions(+), 91 deletions(-)

diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index f6fe08df568b..8bcae1616b15 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -19,6 +19,7 @@
 #include <linux/ip.h>
 #include <linux/prefetch.h>
 #include <linux/module.h>
+#include <net/gro.h>
 
 #include "bnad.h"
 #include "bna.h"
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 9e6de2f968fa..6f208ce457dd 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -40,6 +40,7 @@
 #include <linux/in.h>
 #include <linux/ip.h>
 #include <linux/ipv6.h>
+#include <net/gro.h>
 
 #include "gemini.h"
 
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index bc2d82a3d0de..60df42b3f116 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -318,11 +318,19 @@ struct gro_list {
 };
 
 /*
- * size of gro hash buckets, must less than bit number of
- * napi_struct::gro_bitmask
+ * size of gro hash buckets, must be <= the number of bits in
+ * gro_node::bitmask
  */
 #define GRO_HASH_BUCKETS	8
 
+struct gro_node {
+	unsigned long		bitmask;	/* Mask of used buckets */
+	struct gro_list		hash[GRO_HASH_BUCKETS]; /* Pending GRO skbs */
+	struct list_head	rx_list;	/* Pending GRO_NORMAL skbs */
+	int			rx_count;	/* Length of rx_list */
+	struct hrtimer		timer;		/* Timer for deferred flush */
+};
+
 /*
  * Structure for NAPI scheduling similar to tasklet but with weighting
  */
@@ -338,17 +346,13 @@ struct napi_struct {
 	unsigned long		state;
 	int			weight;
 	int			defer_hard_irqs_count;
-	unsigned long		gro_bitmask;
 	int			(*poll)(struct napi_struct *, int);
 #ifdef CONFIG_NETPOLL
 	int			poll_owner;
 #endif
 	struct net_device	*dev;
-	struct gro_list		gro_hash[GRO_HASH_BUCKETS];
+	struct gro_node		gro;
 	struct sk_buff		*skb;
-	struct list_head	rx_list; /* Pending GRO_NORMAL skbs */
-	int			rx_count; /* length of rx_list */
-	struct hrtimer		timer;
 	struct list_head	dev_list;
 	struct hlist_node	napi_hash_node;
 	unsigned int		napi_id;
@@ -3788,7 +3792,6 @@ int netif_receive_skb_core(struct sk_buff *skb);
 void netif_receive_skb_list_internal(struct list_head *head);
 void netif_receive_skb_list(struct list_head *head);
 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
-void napi_gro_flush(struct napi_struct *napi, bool flush_old);
 struct sk_buff *napi_get_frags(struct napi_struct *napi);
 gro_result_t napi_gro_frags(struct napi_struct *napi);
 struct packet_offload *gro_find_receive_by_type(__be16 type);
diff --git a/include/net/gro.h b/include/net/gro.h
index 867656b0739c..75211ebd8765 100644
--- a/include/net/gro.h
+++ b/include/net/gro.h
@@ -421,26 +421,41 @@ static inline __wsum ip6_gro_compute_pseudo(struct sk_buff *skb, int proto)
 }
 
 int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb);
+void __gro_flush(struct gro_node *gro, bool flush_old);
+
+static inline void gro_flush(struct gro_node *gro, bool flush_old)
+{
+	if (!gro->bitmask)
+		return;
+
+	__gro_flush(gro, flush_old);
+}
+
+static inline void napi_gro_flush(struct napi_struct *napi, bool flush_old)
+{
+	gro_flush(&napi->gro, flush_old);
+}
 
 /* Pass the currently batched GRO_NORMAL SKBs up to the stack. */
-static inline void gro_normal_list(struct napi_struct *napi)
+static inline void gro_normal_list(struct gro_node *gro)
 {
-	if (!napi->rx_count)
+	if (!gro->rx_count)
 		return;
-	netif_receive_skb_list_internal(&napi->rx_list);
-	INIT_LIST_HEAD(&napi->rx_list);
-	napi->rx_count = 0;
+	netif_receive_skb_list_internal(&gro->rx_list);
+	INIT_LIST_HEAD(&gro->rx_list);
+	gro->rx_count = 0;
 }
 
 /* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded,
  * pass the whole batch up to the stack.
  */
-static inline void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb, int segs)
+static inline void gro_normal_one(struct gro_node *gro, struct sk_buff *skb,
+				  int segs)
 {
-	list_add_tail(&skb->list, &napi->rx_list);
-	napi->rx_count += segs;
-	if (napi->rx_count >= gro_normal_batch)
-		gro_normal_list(napi);
+	list_add_tail(&skb->list, &gro->rx_list);
+	gro->rx_count += segs;
+	if (gro->rx_count >= gro_normal_batch)
+		gro_normal_list(gro);
 }
 
 
diff --git a/net/core/dev.c b/net/core/dev.c
index 52b64d24c439..8b334aa974c2 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -5765,7 +5765,7 @@ bool napi_complete_done(struct napi_struct *n, int work_done)
 		return false;
 
 	if (work_done) {
-		if (n->gro_bitmask)
+		if (n->gro.bitmask)
 			timeout = READ_ONCE(n->dev->gro_flush_timeout);
 		n->defer_hard_irqs_count = READ_ONCE(n->dev->napi_defer_hard_irqs);
 	}
@@ -5775,15 +5775,13 @@ bool napi_complete_done(struct napi_struct *n, int work_done)
 		if (timeout)
 			ret = false;
 	}
-	if (n->gro_bitmask) {
-		/* When the NAPI instance uses a timeout and keeps postponing
-		 * it, we need to bound somehow the time packets are kept in
-		 * the GRO layer
-		 */
-		napi_gro_flush(n, !!timeout);
-	}
 
-	gro_normal_list(n);
+	/* When the NAPI instance uses a timeout and keeps postponing
+	 * it, we need to bound somehow the time packets are kept in
+	 * the GRO layer
+	 */
+	gro_flush(&n->gro, !!timeout);
+	gro_normal_list(&n->gro);
 
 	if (unlikely(!list_empty(&n->poll_list))) {
 		/* If n->poll_list is not empty, we need to mask irqs */
@@ -5815,7 +5813,7 @@ bool napi_complete_done(struct napi_struct *n, int work_done)
 	}
 
 	if (timeout)
-		hrtimer_start(&n->timer, ns_to_ktime(timeout),
+		hrtimer_start(&n->gro.timer, ns_to_ktime(timeout),
 			      HRTIMER_MODE_REL_PINNED);
 	return ret;
 }
@@ -5839,19 +5837,17 @@ static struct napi_struct *napi_by_id(unsigned int napi_id)
 static void __busy_poll_stop(struct napi_struct *napi, bool skip_schedule)
 {
 	if (!skip_schedule) {
-		gro_normal_list(napi);
+		gro_normal_list(&napi->gro);
 		__napi_schedule(napi);
 		return;
 	}
 
-	if (napi->gro_bitmask) {
-		/* flush too old packets
-		 * If HZ < 1000, flush all packets.
-		 */
-		napi_gro_flush(napi, HZ >= 1000);
-	}
+	/* flush too old packets
+	 * If HZ < 1000, flush all packets.
+	 */
+	gro_flush(&napi->gro, HZ >= 1000);
+	gro_normal_list(&napi->gro);
 
-	gro_normal_list(napi);
 	clear_bit(NAPI_STATE_SCHED, &napi->state);
 }
 
@@ -5880,7 +5876,7 @@ static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock, bool
 		napi->defer_hard_irqs_count = READ_ONCE(napi->dev->napi_defer_hard_irqs);
 		timeout = READ_ONCE(napi->dev->gro_flush_timeout);
 		if (napi->defer_hard_irqs_count && timeout) {
-			hrtimer_start(&napi->timer, ns_to_ktime(timeout), HRTIMER_MODE_REL_PINNED);
+			hrtimer_start(&napi->gro.timer, ns_to_ktime(timeout), HRTIMER_MODE_REL_PINNED);
 			skip_schedule = true;
 		}
 	}
@@ -5947,7 +5943,7 @@ void napi_busy_loop(unsigned int napi_id,
 		}
 		work = napi_poll(napi, budget);
 		trace_napi_poll(napi, work, budget);
-		gro_normal_list(napi);
+		gro_normal_list(&napi->gro);
 count:
 		if (work > 0)
 			__NET_ADD_STATS(dev_net(napi->dev),
@@ -6015,7 +6011,7 @@ static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
 {
 	struct napi_struct *napi;
 
-	napi = container_of(timer, struct napi_struct, timer);
+	napi = container_of(timer, struct napi_struct, gro.timer);
 
 	/* Note : we use a relaxed variant of napi_schedule_prep() not setting
 	 * NAPI_STATE_MISSED, since we do not react to a device IRQ.
@@ -6034,10 +6030,10 @@ static void init_gro_hash(struct napi_struct *napi)
 	int i;
 
 	for (i = 0; i < GRO_HASH_BUCKETS; i++) {
-		INIT_LIST_HEAD(&napi->gro_hash[i].list);
-		napi->gro_hash[i].count = 0;
+		INIT_LIST_HEAD(&napi->gro.hash[i].list);
+		napi->gro.hash[i].count = 0;
 	}
-	napi->gro_bitmask = 0;
+	napi->gro.bitmask = 0;
 }
 
 int dev_set_threaded(struct net_device *dev, bool threaded)
@@ -6109,12 +6105,12 @@ void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
 
 	INIT_LIST_HEAD(&napi->poll_list);
 	INIT_HLIST_NODE(&napi->napi_hash_node);
-	hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
-	napi->timer.function = napi_watchdog;
+	hrtimer_init(&napi->gro.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
+	napi->gro.timer.function = napi_watchdog;
 	init_gro_hash(napi);
 	napi->skb = NULL;
-	INIT_LIST_HEAD(&napi->rx_list);
-	napi->rx_count = 0;
+	INIT_LIST_HEAD(&napi->gro.rx_list);
+	napi->gro.rx_count = 0;
 	napi->poll = poll;
 	if (weight > NAPI_POLL_WEIGHT)
 		netdev_err_once(dev, "%s() called with weight %d\n", __func__,
@@ -6159,7 +6155,7 @@ void napi_disable(struct napi_struct *n)
 			break;
 	}
 
-	hrtimer_cancel(&n->timer);
+	hrtimer_cancel(&n->gro.timer);
 
 	clear_bit(NAPI_STATE_DISABLE, &n->state);
 }
@@ -6194,9 +6190,9 @@ static void flush_gro_hash(struct napi_struct *napi)
 	for (i = 0; i < GRO_HASH_BUCKETS; i++) {
 		struct sk_buff *skb, *n;
 
-		list_for_each_entry_safe(skb, n, &napi->gro_hash[i].list, list)
+		list_for_each_entry_safe(skb, n, &napi->gro.hash[i].list, list)
 			kfree_skb(skb);
-		napi->gro_hash[i].count = 0;
+		napi->gro.hash[i].count = 0;
 	}
 }
 
@@ -6211,7 +6207,7 @@ void __netif_napi_del(struct napi_struct *napi)
 	napi_free_frags(napi);
 
 	flush_gro_hash(napi);
-	napi->gro_bitmask = 0;
+	napi->gro.bitmask = 0;
 
 	if (napi->thread) {
 		kthread_stop(napi->thread);
@@ -6268,14 +6264,11 @@ static int __napi_poll(struct napi_struct *n, bool *repoll)
 		return work;
 	}
 
-	if (n->gro_bitmask) {
-		/* flush too old packets
-		 * If HZ < 1000, flush all packets.
-		 */
-		napi_gro_flush(n, HZ >= 1000);
-	}
-
-	gro_normal_list(n);
+	/* flush too old packets
+	 * If HZ < 1000, flush all packets.
+	 */
+	gro_flush(&n->gro, HZ >= 1000);
+	gro_normal_list(&n->gro);
 
 	/* Some drivers may have called napi_schedule
 	 * prior to exhausting their budget.
@@ -10396,7 +10389,7 @@ static struct hlist_head * __net_init netdev_create_hash(void)
 static int __net_init netdev_init(struct net *net)
 {
 	BUILD_BUG_ON(GRO_HASH_BUCKETS >
-		     8 * sizeof_field(struct napi_struct, gro_bitmask));
+		     BITS_PER_BYTE * sizeof_field(struct gro_node, bitmask));
 
 	INIT_LIST_HEAD(&net->dev_base_head);
 
diff --git a/net/core/gro.c b/net/core/gro.c
index b4190eb08467..67fd587a87c9 100644
--- a/net/core/gro.c
+++ b/net/core/gro.c
@@ -278,8 +278,7 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
 	return 0;
 }
 
-
-static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
+static void gro_complete(struct gro_node *gro, struct sk_buff *skb)
 {
 	struct packet_offload *ptype;
 	__be16 type = skb->protocol;
@@ -312,43 +311,42 @@ static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
 	}
 
 out:
-	gro_normal_one(napi, skb, NAPI_GRO_CB(skb)->count);
+	gro_normal_one(gro, skb, NAPI_GRO_CB(skb)->count);
 }
 
-static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
-				   bool flush_old)
+static void __gro_flush_chain(struct gro_node *gro, u32 index, bool flush_old)
 {
-	struct list_head *head = &napi->gro_hash[index].list;
+	struct list_head *head = &gro->hash[index].list;
 	struct sk_buff *skb, *p;
 
 	list_for_each_entry_safe_reverse(skb, p, head, list) {
 		if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
 			return;
 		skb_list_del_init(skb);
-		napi_gro_complete(napi, skb);
-		napi->gro_hash[index].count--;
+		gro_complete(gro, skb);
+		gro->hash[index].count--;
 	}
 
-	if (!napi->gro_hash[index].count)
-		__clear_bit(index, &napi->gro_bitmask);
+	if (!gro->hash[index].count)
+		__clear_bit(index, &gro->bitmask);
 }
 
-/* napi->gro_hash[].list contains packets ordered by age.
+/* gro->hash[].list contains packets ordered by age.
  * youngest packets at the head of it.
  * Complete skbs in reverse order to reduce latencies.
  */
-void napi_gro_flush(struct napi_struct *napi, bool flush_old)
+void __gro_flush(struct gro_node *gro, bool flush_old)
 {
-	unsigned long bitmask = napi->gro_bitmask;
+	unsigned long bitmask = gro->bitmask;
 	unsigned int i, base = ~0U;
 
 	while ((i = ffs(bitmask)) != 0) {
 		bitmask >>= i;
 		base += i;
-		__napi_gro_flush_chain(napi, base, flush_old);
+		__gro_flush_chain(gro, base, flush_old);
 	}
 }
-EXPORT_SYMBOL(napi_gro_flush);
+EXPORT_SYMBOL(__gro_flush);
 
 static void gro_list_prepare(const struct list_head *head,
 			     const struct sk_buff *skb)
@@ -449,7 +447,7 @@ static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
 	}
 }
 
-static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head)
+static void gro_flush_oldest(struct gro_node *gro, struct list_head *head)
 {
 	struct sk_buff *oldest;
 
@@ -465,13 +463,14 @@ static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head)
 	 * SKB to the chain.
 	 */
 	skb_list_del_init(oldest);
-	napi_gro_complete(napi, oldest);
+	gro_complete(gro, oldest);
 }
 
-static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
+static enum gro_result dev_gro_receive(struct gro_node *gro,
+				       struct sk_buff *skb)
 {
 	u32 bucket = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1);
-	struct gro_list *gro_list = &napi->gro_hash[bucket];
+	struct gro_list *gro_list = &gro->hash[bucket];
 	struct list_head *head = &offload_base;
 	struct packet_offload *ptype;
 	__be16 type = skb->protocol;
@@ -530,7 +529,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
 
 	if (pp) {
 		skb_list_del_init(pp);
-		napi_gro_complete(napi, pp);
+		gro_complete(gro, pp);
 		gro_list->count--;
 	}
 
@@ -541,7 +540,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
 		goto normal;
 
 	if (unlikely(gro_list->count >= MAX_GRO_SKBS))
-		gro_flush_oldest(napi, &gro_list->list);
+		gro_flush_oldest(gro, &gro_list->list);
 	else
 		gro_list->count++;
 
@@ -558,10 +557,10 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
 		gro_pull_from_frag0(skb, grow);
 ok:
 	if (gro_list->count) {
-		if (!test_bit(bucket, &napi->gro_bitmask))
-			__set_bit(bucket, &napi->gro_bitmask);
-	} else if (test_bit(bucket, &napi->gro_bitmask)) {
-		__clear_bit(bucket, &napi->gro_bitmask);
+		if (!test_bit(bucket, &gro->bitmask))
+			__set_bit(bucket, &gro->bitmask);
+	} else if (test_bit(bucket, &gro->bitmask)) {
+		__clear_bit(bucket, &gro->bitmask);
 	}
 
 	return ret;
@@ -599,13 +598,12 @@ struct packet_offload *gro_find_complete_by_type(__be16 type)
 }
 EXPORT_SYMBOL(gro_find_complete_by_type);
 
-static gro_result_t napi_skb_finish(struct napi_struct *napi,
-				    struct sk_buff *skb,
-				    gro_result_t ret)
+static gro_result_t gro_skb_finish(struct gro_node *gro, struct sk_buff *skb,
+				   gro_result_t ret)
 {
 	switch (ret) {
 	case GRO_NORMAL:
-		gro_normal_one(napi, skb, 1);
+		gro_normal_one(gro, skb, 1);
 		break;
 
 	case GRO_MERGED_FREE:
@@ -628,6 +626,7 @@ static gro_result_t napi_skb_finish(struct napi_struct *napi,
 
 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
 {
+	struct gro_node *gro = &napi->gro;
 	gro_result_t ret;
 
 	skb_mark_napi_id(skb, napi);
@@ -635,7 +634,7 @@ gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
 
 	skb_gro_reset_offset(skb, 0);
 
-	ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb));
+	ret = gro_skb_finish(gro, skb, dev_gro_receive(gro, skb));
 	trace_napi_gro_receive_exit(ret);
 
 	return ret;
@@ -695,7 +694,7 @@ static gro_result_t napi_frags_finish(struct napi_struct *napi,
 		__skb_push(skb, ETH_HLEN);
 		skb->protocol = eth_type_trans(skb, skb->dev);
 		if (ret == GRO_NORMAL)
-			gro_normal_one(napi, skb, 1);
+			gro_normal_one(&napi->gro, skb, 1);
 		break;
 
 	case GRO_MERGED_FREE:
@@ -761,7 +760,7 @@ gro_result_t napi_gro_frags(struct napi_struct *napi)
 
 	trace_napi_gro_frags_entry(skb);
 
-	ret = napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
+	ret = napi_frags_finish(napi, skb, dev_gro_receive(&napi->gro, skb));
 	trace_napi_gro_frags_exit(ret);
 
 	return ret;
-- 
2.36.1


  parent reply	other threads:[~2022-06-28 19:49 UTC|newest]

Thread overview: 98+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-06-28 19:47 [xdp-hints] [PATCH RFC bpf-next 00/52] bpf, xdp: introduce and use Generic Hints/metadata Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 01/52] libbpf: factor out BTF loading from load_module_btfs() Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 02/52] libbpf: try to load vmlinux BTF from the kernel first Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 03/52] libbpf: add function to get the pair BTF ID + type ID for a given type Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 04/52] libbpf: patch module BTF ID into BPF insns Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 05/52] net, xdp: decouple XDP code from the core networking code Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 06/52] bpf: pass a pointer to union bpf_attr to bpf_link_ops::update_prog() Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 07/52] net, xdp: remove redundant arguments from dev_xdp_{at,de}tach_link() Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 08/52] net, xdp: factor out XDP install arguments to a separate structure Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 09/52] net, xdp: add ability to specify BTF ID for XDP metadata Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 10/52] net, xdp: add ability to specify frame size threshold " Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 11/52] libbpf: factor out __bpf_set_link_xdp_fd_replace() args into a struct Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 12/52] libbpf: add ability to set the BTF/type ID on setting XDP prog Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 13/52] libbpf: add ability to set the meta threshold " Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 14/52] libbpf: pass &bpf_link_create_opts directly to bpf_program__attach_fd() Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 15/52] libbpf: add bpf_program__attach_xdp_opts() Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 16/52] selftests/bpf: expand xdp_link to check that setting meta opts works Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 17/52] samples/bpf: pass a struct to sample_install_xdp() Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 18/52] samples/bpf: add ability to specify metadata threshold Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 19/52] stddef: make __struct_group() UAPI C++-friendly Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 20/52] net, xdp: move XDP metadata helpers into new xdp_meta.h Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 21/52] net, xdp: allow metadata > 32 Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 22/52] net, skbuff: add ability to skip skb metadata comparison Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 23/52] net, skbuff: constify the @skb argument of skb_hwtstamps() Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 24/52] bpf, xdp: declare generic XDP metadata structure Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 25/52] net, xdp: add basic generic metadata accessors Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 26/52] bpf, btf: add a pair of function to work with the BTF ID + type ID pair Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 27/52] net, xdp: add &sk_buff <-> &xdp_meta_generic converters Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 28/52] net, xdp: prefetch data a bit when building an skb from an &xdp_frame Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 29/52] net, xdp: try to fill skb fields when converting " Alexander Lobakin
2022-06-28 19:47 ` Alexander Lobakin [this message]
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 31/52] net, gro: expose some GRO API to use outside of NAPI Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 32/52] bpf, cpumap: switch to GRO from netif_receive_skb_list() Alexander Lobakin
2024-08-07 20:38   ` [xdp-hints] " Daniel Xu
2024-08-08  4:54     ` Lorenzo Bianconi
2024-08-08 11:57       ` Alexander Lobakin
2024-08-08 17:22         ` Lorenzo Bianconi
2024-08-08 20:52         ` Daniel Xu
2024-08-09 10:02           ` Jesper Dangaard Brouer
2024-08-09 12:20           ` Alexander Lobakin
2024-08-09 12:45             ` Toke Høiland-Jørgensen
2024-08-09 12:56               ` Alexander Lobakin
2024-08-09 13:42                 ` Toke Høiland-Jørgensen
2024-08-10  0:54                   ` Martin KaFai Lau
2024-08-10  8:02                   ` Lorenzo Bianconi
2024-08-13  1:33             ` Jakub Kicinski
2024-08-13  9:51               ` Jesper Dangaard Brouer
2024-08-10  8:00         ` Lorenzo Bianconi
2024-08-13 14:09         ` Alexander Lobakin
2024-08-13 14:54           ` Toke Høiland-Jørgensen
2024-08-13 15:57             ` Jesper Dangaard Brouer
2024-08-19 14:50               ` Alexander Lobakin
2024-08-21  0:29                 ` Daniel Xu
2024-08-21 13:16                   ` Alexander Lobakin
2024-08-21 16:36                     ` Daniel Xu
2024-08-13 16:14           ` Lorenzo Bianconi
2024-08-13 16:27           ` Lorenzo Bianconi
2024-08-13 16:31             ` Alexander Lobakin
2024-08-08 20:44       ` Daniel Xu
2024-08-09  9:32         ` Jesper Dangaard Brouer
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 33/52] bpf, cpumap: add option to set a timeout for deferred flush Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 34/52] samples/bpf: add 'timeout' option to xdp_redirect_cpu Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 35/52] net, skbuff: introduce napi_skb_cache_get_bulk() Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 36/52] bpf, cpumap: switch to napi_skb_cache_get_bulk() Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 37/52] rcupdate: fix access helpers for incomplete struct pointers on GCC < 10 Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 38/52] net, xdp: remove unused xdp_attachment_info::flags Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 39/52] net, xdp: make &xdp_attachment_info a bit more useful in drivers Alexander Lobakin
2022-06-28 19:48 ` [xdp-hints] [PATCH RFC bpf-next 40/52] net, xdp: add an RCU version of xdp_attachment_setup() Alexander Lobakin
2022-06-28 19:48 ` [xdp-hints] [PATCH RFC bpf-next 41/52] net, xdp: replace net_device::xdp_prog pointer with &xdp_attachment_info Alexander Lobakin
2022-06-28 19:48 ` [xdp-hints] [PATCH RFC bpf-next 42/52] net, xdp: shortcut skb->dev in bpf_prog_run_generic_xdp() Alexander Lobakin
2022-06-28 19:48 ` [xdp-hints] [PATCH RFC bpf-next 43/52] net, xdp: build XDP generic metadata on Generic (skb) XDP path Alexander Lobakin
2022-06-28 19:48 ` [xdp-hints] [PATCH RFC bpf-next 44/52] net, ice: allow XDP prog hot-swapping Alexander Lobakin
2022-06-28 19:48 ` [xdp-hints] [PATCH RFC bpf-next 45/52] net, ice: consolidate all skb fields processing Alexander Lobakin
2022-06-28 19:48 ` [xdp-hints] [PATCH RFC bpf-next 46/52] net, ice: use an onstack &xdp_meta_generic_rx to store HW frame info Alexander Lobakin
2022-06-28 19:48 ` [xdp-hints] [PATCH RFC bpf-next 47/52] net, ice: build XDP generic metadata Alexander Lobakin
2022-06-28 19:48 ` [xdp-hints] [PATCH RFC bpf-next 48/52] libbpf: compress Endianness ops with a macro Alexander Lobakin
2022-06-28 19:48 ` [xdp-hints] [PATCH RFC bpf-next 49/52] libbpf: add LE <--> CPU conversion helpers Alexander Lobakin
2022-06-28 19:48 ` [xdp-hints] [PATCH RFC bpf-next 50/52] libbpf: introduce a couple memory access helpers Alexander Lobakin
2022-06-28 19:48 ` [xdp-hints] [PATCH RFC bpf-next 51/52] selftests/bpf: fix using test_xdp_meta BPF prog via skeleton infra Alexander Lobakin
2022-06-28 19:48 ` [xdp-hints] [PATCH RFC bpf-next 52/52] selftests/bpf: add XDP Generic Hints selftest Alexander Lobakin
2022-06-29  6:15 ` [xdp-hints] Re: [PATCH RFC bpf-next 00/52] bpf, xdp: introduce and use Generic Hints/metadata John Fastabend
2022-06-29 13:43   ` Toke Høiland-Jørgensen
2022-07-04 15:44     ` Alexander Lobakin
2022-07-04 17:13       ` Jesper Dangaard Brouer
2022-07-05 14:38         ` Alexander Lobakin
2022-07-05 19:08           ` Daniel Borkmann
2022-07-04 17:14       ` Toke Høiland-Jørgensen
2022-07-05 15:41         ` Alexander Lobakin
2022-07-05 18:51           ` Toke Høiland-Jørgensen
2022-07-06 13:50             ` Alexander Lobakin
2022-07-06 23:22               ` Toke Høiland-Jørgensen
2022-07-07 11:41                 ` Jesper Dangaard Brouer
2022-07-12 10:33                 ` Magnus Karlsson
2022-07-12 14:14                   ` Jesper Dangaard Brouer
2022-07-15 11:11                     ` Magnus Karlsson
2022-06-29 17:56   ` Zvi Effron
2022-06-30  7:39     ` Magnus Karlsson
2022-07-04 15:31   ` Alexander Lobakin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

  List information: https://lists.xdp-project.net/postorius/lists/xdp-hints.xdp-project.net/

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220628194812.1453059-31-alexandr.lobakin@intel.com \
    --to=alexandr.lobakin@intel.com \
    --cc=andrii@kernel.org \
    --cc=ast@kernel.org \
    --cc=bjorn@kernel.org \
    --cc=bpf@vger.kernel.org \
    --cc=daniel@iogearbox.net \
    --cc=davem@davemloft.net \
    --cc=edumazet@google.com \
    --cc=hawk@kernel.org \
    --cc=jesse.brandeburg@intel.com \
    --cc=john.fastabend@gmail.com \
    --cc=jonathan.lemon@gmail.com \
    --cc=kuba@kernel.org \
    --cc=larysa.zaremba@intel.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=lorenzo@kernel.org \
    --cc=maciej.fijalkowski@intel.com \
    --cc=magnus.karlsson@intel.com \
    --cc=michal.swiatkowski@linux.intel.com \
    --cc=netdev@vger.kernel.org \
    --cc=pabeni@redhat.com \
    --cc=toke@redhat.com \
    --cc=willemb@google.com \
    --cc=xdp-hints@xdp-project.net \
    --cc=yajun.deng@linux.dev \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox