XDP hardware hints discussion mail archive
 help / color / mirror / Atom feed
From: Alexander Lobakin <alexandr.lobakin@intel.com>
To: Alexei Starovoitov <ast@kernel.org>,
	Daniel Borkmann <daniel@iogearbox.net>,
	Andrii Nakryiko <andrii@kernel.org>
Cc: "Alexander Lobakin" <alexandr.lobakin@intel.com>,
	"Larysa Zaremba" <larysa.zaremba@intel.com>,
	"Michal Swiatkowski" <michal.swiatkowski@linux.intel.com>,
	"Jesper Dangaard Brouer" <hawk@kernel.org>,
	"Björn Töpel" <bjorn@kernel.org>,
	"Magnus Karlsson" <magnus.karlsson@intel.com>,
	"Maciej Fijalkowski" <maciej.fijalkowski@intel.com>,
	"Jonathan Lemon" <jonathan.lemon@gmail.com>,
	"Toke Hoiland-Jorgensen" <toke@redhat.com>,
	"Lorenzo Bianconi" <lorenzo@kernel.org>,
	"David S. Miller" <davem@davemloft.net>,
	"Eric Dumazet" <edumazet@google.com>,
	"Jakub Kicinski" <kuba@kernel.org>,
	"Paolo Abeni" <pabeni@redhat.com>,
	"Jesse Brandeburg" <jesse.brandeburg@intel.com>,
	"John Fastabend" <john.fastabend@gmail.com>,
	"Yajun Deng" <yajun.deng@linux.dev>,
	"Willem de Bruijn" <willemb@google.com>,
	bpf@vger.kernel.org, netdev@vger.kernel.org,
	linux-kernel@vger.kernel.org, xdp-hints@xdp-project.net
Subject: [xdp-hints] [PATCH RFC bpf-next 44/52] net, ice: allow XDP prog hot-swapping
Date: Tue, 28 Jun 2022 21:48:04 +0200	[thread overview]
Message-ID: <20220628194812.1453059-45-alexandr.lobakin@intel.com> (raw)
In-Reply-To: <20220628194812.1453059-1-alexandr.lobakin@intel.com>

Currently, an interface is always being brought down on
%XDP_SETUP_PROG, no matter if there would be a global configuration
change (no prog -> prog, prog -> no prog) or just a hot-swap (prog
-> prog). That is suboptimal, especially when old_prog == new_prog,
which should be a no-op at all. Moreover, it makes it impossible to
change some aux XDP options on the fly which could be designed to
work like that.
Store &xdp_attachment_info in just one copy inside the VSI
structure, RQs will only have pointers to it. This way we only need
to rewrite it once and xdp_attachment_setup_rcu() now may be used.
Guard NAPI poll routines with RCU read locks to make sure the BPF
prog won't get freed right in the middle of a cycle. Now the old
program will be freed only when all of the rings will use the new
one already. Then do an ifdown->ifup cycle in ::ndo_bpf() only if
absolutely needed (mentioned above), the rest will be completely
safe to do on the go.

Signed-off-by: Alexander Lobakin <alexandr.lobakin@intel.com>
---
 drivers/net/ethernet/intel/ice/ice.h      |  8 +--
 drivers/net/ethernet/intel/ice/ice_lib.c  |  4 +-
 drivers/net/ethernet/intel/ice/ice_main.c | 61 ++++++++++-------------
 drivers/net/ethernet/intel/ice/ice_txrx.c | 11 ++--
 drivers/net/ethernet/intel/ice/ice_txrx.h |  2 +-
 drivers/net/ethernet/intel/ice/ice_xsk.c  |  2 +-
 6 files changed, 40 insertions(+), 48 deletions(-)

diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 60453b3b8d23..402b71ab48e4 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -386,7 +386,7 @@ struct ice_vsi {
 	u16 num_tx_desc;
 	u16 qset_handle[ICE_MAX_TRAFFIC_CLASS];
 	struct ice_tc_cfg tc_cfg;
-	struct bpf_prog *xdp_prog;
+	struct xdp_attachment_info xdp_info;
 	struct ice_tx_ring **xdp_rings;	 /* XDP ring array */
 	unsigned long *af_xdp_zc_qps;	 /* tracks AF_XDP ZC enabled qps */
 	u16 num_xdp_txq;		 /* Used XDP queues */
@@ -672,7 +672,7 @@ static inline struct ice_pf *ice_netdev_to_pf(struct net_device *netdev)
 
 static inline bool ice_is_xdp_ena_vsi(struct ice_vsi *vsi)
 {
-	return !!READ_ONCE(vsi->xdp_prog);
+	return !!rcu_access_pointer(vsi->xdp_info.prog_rcu);
 }
 
 static inline void ice_set_ring_xdp(struct ice_tx_ring *ring)
@@ -857,8 +857,8 @@ int ice_down(struct ice_vsi *vsi);
 int ice_vsi_cfg(struct ice_vsi *vsi);
 struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi);
 int ice_vsi_determine_xdp_res(struct ice_vsi *vsi);
-int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog);
-int ice_destroy_xdp_rings(struct ice_vsi *vsi);
+int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct netdev_bpf *xdp);
+int ice_destroy_xdp_rings(struct ice_vsi *vsi, struct netdev_bpf *xdp);
 int
 ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
 	     u32 flags);
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index b28fb8eacffb..3db1271b5176 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -3200,7 +3200,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
 		/* return value check can be skipped here, it always returns
 		 * 0 if reset is in progress
 		 */
-		ice_destroy_xdp_rings(vsi);
+		ice_destroy_xdp_rings(vsi, NULL);
 	ice_vsi_put_qs(vsi);
 	ice_vsi_clear_rings(vsi);
 	ice_vsi_free_arrays(vsi);
@@ -3248,7 +3248,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
 			ret = ice_vsi_determine_xdp_res(vsi);
 			if (ret)
 				goto err_vectors;
-			ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog);
+			ret = ice_prepare_xdp_rings(vsi, NULL);
 			if (ret)
 				goto err_vectors;
 		}
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index c1ac2f746714..7d049930a0a8 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -2603,32 +2603,14 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
 	return -ENOMEM;
 }
 
-/**
- * ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI
- * @vsi: VSI to set the bpf prog on
- * @prog: the bpf prog pointer
- */
-static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
-{
-	struct bpf_prog *old_prog;
-	int i;
-
-	old_prog = xchg(&vsi->xdp_prog, prog);
-	if (old_prog)
-		bpf_prog_put(old_prog);
-
-	ice_for_each_rxq(vsi, i)
-		WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
-}
-
 /**
  * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
  * @vsi: VSI to bring up Tx rings used by XDP
- * @prog: bpf program that will be assigned to VSI
+ * @xdp: &netdev_bpf with XDP program and additional data passed from the stack
  *
  * Return 0 on success and negative value on error
  */
-int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
+int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct netdev_bpf *xdp)
 {
 	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
 	int xdp_rings_rem = vsi->num_xdp_txq;
@@ -2713,8 +2695,8 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
 	 * this is not harmful as dev_xdp_install bumps the refcount
 	 * before calling the op exposed by the driver;
 	 */
-	if (!ice_is_xdp_ena_vsi(vsi))
-		ice_vsi_assign_bpf_prog(vsi, prog);
+	if (xdp)
+		xdp_attachment_setup_rcu(&vsi->xdp_info, xdp);
 
 	return 0;
 clear_xdp_rings:
@@ -2739,11 +2721,12 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
 /**
  * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
  * @vsi: VSI to remove XDP rings
+ * @xdp: &netdev_bpf with XDP program and additional data passed from the stack
  *
  * Detach XDP rings from irq vectors, clean up the PF bitmap and free
  * resources
  */
-int ice_destroy_xdp_rings(struct ice_vsi *vsi)
+int ice_destroy_xdp_rings(struct ice_vsi *vsi, struct netdev_bpf *xdp)
 {
 	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
 	struct ice_pf *pf = vsi->back;
@@ -2796,7 +2779,11 @@ int ice_destroy_xdp_rings(struct ice_vsi *vsi)
 	if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
 		return 0;
 
-	ice_vsi_assign_bpf_prog(vsi, NULL);
+	/* Symmetrically to ice_prepare_xdp_rings(), touch XDP program only
+	 * when called from ::ndo_bpf().
+	 */
+	if (xdp)
+		xdp_attachment_setup_rcu(&vsi->xdp_info, xdp);
 
 	/* notify Tx scheduler that we destroyed XDP queues and bring
 	 * back the old number of child nodes
@@ -2853,15 +2840,14 @@ int ice_vsi_determine_xdp_res(struct ice_vsi *vsi)
 /**
  * ice_xdp_setup_prog - Add or remove XDP eBPF program
  * @vsi: VSI to setup XDP for
- * @prog: XDP program
- * @extack: netlink extended ack
+ * @xdp: &netdev_bpf with XDP program and additional data passed from the stack
  */
 static int
-ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
-		   struct netlink_ext_ack *extack)
+ice_xdp_setup_prog(struct ice_vsi *vsi, struct netdev_bpf *xdp)
 {
 	int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
-	bool if_running = netif_running(vsi->netdev);
+	struct netlink_ext_ack *extack = xdp->extack;
+	bool restart = false, prog = !!xdp->prog;
 	int ret = 0, xdp_ring_err = 0;
 
 	if (frame_size > vsi->rx_buf_len) {
@@ -2870,12 +2856,15 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
 	}
 
 	/* need to stop netdev while setting up the program for Rx rings */
-	if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
+	if (ice_is_xdp_ena_vsi(vsi) != prog && netif_running(vsi->netdev) &&
+	    !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
 		ret = ice_down(vsi);
 		if (ret) {
 			NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed");
 			return ret;
 		}
+
+		restart = true;
 	}
 
 	if (!ice_is_xdp_ena_vsi(vsi) && prog) {
@@ -2883,24 +2872,24 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
 		if (xdp_ring_err) {
 			NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP");
 		} else {
-			xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
+			xdp_ring_err = ice_prepare_xdp_rings(vsi, xdp);
 			if (xdp_ring_err)
 				NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
 		}
 	} else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
-		xdp_ring_err = ice_destroy_xdp_rings(vsi);
+		xdp_ring_err = ice_destroy_xdp_rings(vsi, xdp);
 		if (xdp_ring_err)
 			NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
 	} else {
-		/* safe to call even when prog == vsi->xdp_prog as
+		/* safe to call even when prog == vsi->xdp_info.prog as
 		 * dev_xdp_install in net/core/dev.c incremented prog's
 		 * refcount so corresponding bpf_prog_put won't cause
 		 * underflow
 		 */
-		ice_vsi_assign_bpf_prog(vsi, prog);
+		xdp_attachment_setup_rcu(&vsi->xdp_info, xdp);
 	}
 
-	if (if_running)
+	if (restart)
 		ret = ice_up(vsi);
 
 	if (!ret && prog)
@@ -2940,7 +2929,7 @@ static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
 
 	switch (xdp->command) {
 	case XDP_SETUP_PROG:
-		return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
+		return ice_xdp_setup_prog(vsi, xdp);
 	case XDP_SETUP_XSK_POOL:
 		return ice_xsk_pool_setup(vsi, xdp->xsk.pool,
 					  xdp->xsk.queue_id);
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 3f8b7274ed2f..25383bbf8245 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -454,7 +454,7 @@ void ice_free_rx_ring(struct ice_rx_ring *rx_ring)
 	if (rx_ring->vsi->type == ICE_VSI_PF)
 		if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
 			xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
-	rx_ring->xdp_prog = NULL;
+
 	if (rx_ring->xsk_pool) {
 		kfree(rx_ring->xdp_buf);
 		rx_ring->xdp_buf = NULL;
@@ -507,8 +507,7 @@ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
 	rx_ring->next_to_use = 0;
 	rx_ring->next_to_clean = 0;
 
-	if (ice_is_xdp_ena_vsi(rx_ring->vsi))
-		WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog);
+	rx_ring->xdp_info = &rx_ring->vsi->xdp_info;
 
 	if (rx_ring->vsi->type == ICE_VSI_PF &&
 	    !xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
@@ -1123,7 +1122,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
 #endif
 	xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
 
-	xdp_prog = READ_ONCE(rx_ring->xdp_prog);
+	xdp_prog = rcu_dereference(rx_ring->xdp_info->prog_rcu);
 	if (xdp_prog)
 		xdp_ring = rx_ring->xdp_ring;
 
@@ -1489,6 +1488,8 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
 		/* Max of 1 Rx ring in this q_vector so give it the budget */
 		budget_per_ring = budget;
 
+	rcu_read_lock();
+
 	ice_for_each_rx_ring(rx_ring, q_vector->rx) {
 		int cleaned;
 
@@ -1505,6 +1506,8 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
 			clean_complete = false;
 	}
 
+	rcu_read_unlock();
+
 	/* If work not completed, return budget and polling will return */
 	if (!clean_complete) {
 		/* Set the writeback on ITR so partial completions of
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index ca902af54bb4..1fc31ab0bf33 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -290,7 +290,7 @@ struct ice_rx_ring {
 	struct rcu_head rcu;		/* to avoid race on free */
 	/* CL4 - 3rd cacheline starts here */
 	struct ice_channel *ch;
-	struct bpf_prog *xdp_prog;
+	const struct xdp_attachment_info *xdp_info;
 	struct ice_tx_ring *xdp_ring;
 	struct xsk_buff_pool *xsk_pool;
 	struct sk_buff *skb;
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 49ba8bfdbf04..eb994cf68ff4 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -597,7 +597,7 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
 	/* ZC patch is enabled only when XDP program is set,
 	 * so here it can not be NULL
 	 */
-	xdp_prog = READ_ONCE(rx_ring->xdp_prog);
+	xdp_prog = rcu_dereference(rx_ring->xdp_info->prog_rcu);
 	xdp_ring = rx_ring->xdp_ring;
 
 	while (likely(total_rx_packets < (unsigned int)budget)) {
-- 
2.36.1


  parent reply	other threads:[~2022-06-28 19:50 UTC|newest]

Thread overview: 98+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-06-28 19:47 [xdp-hints] [PATCH RFC bpf-next 00/52] bpf, xdp: introduce and use Generic Hints/metadata Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 01/52] libbpf: factor out BTF loading from load_module_btfs() Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 02/52] libbpf: try to load vmlinux BTF from the kernel first Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 03/52] libbpf: add function to get the pair BTF ID + type ID for a given type Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 04/52] libbpf: patch module BTF ID into BPF insns Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 05/52] net, xdp: decouple XDP code from the core networking code Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 06/52] bpf: pass a pointer to union bpf_attr to bpf_link_ops::update_prog() Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 07/52] net, xdp: remove redundant arguments from dev_xdp_{at,de}tach_link() Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 08/52] net, xdp: factor out XDP install arguments to a separate structure Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 09/52] net, xdp: add ability to specify BTF ID for XDP metadata Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 10/52] net, xdp: add ability to specify frame size threshold " Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 11/52] libbpf: factor out __bpf_set_link_xdp_fd_replace() args into a struct Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 12/52] libbpf: add ability to set the BTF/type ID on setting XDP prog Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 13/52] libbpf: add ability to set the meta threshold " Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 14/52] libbpf: pass &bpf_link_create_opts directly to bpf_program__attach_fd() Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 15/52] libbpf: add bpf_program__attach_xdp_opts() Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 16/52] selftests/bpf: expand xdp_link to check that setting meta opts works Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 17/52] samples/bpf: pass a struct to sample_install_xdp() Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 18/52] samples/bpf: add ability to specify metadata threshold Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 19/52] stddef: make __struct_group() UAPI C++-friendly Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 20/52] net, xdp: move XDP metadata helpers into new xdp_meta.h Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 21/52] net, xdp: allow metadata > 32 Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 22/52] net, skbuff: add ability to skip skb metadata comparison Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 23/52] net, skbuff: constify the @skb argument of skb_hwtstamps() Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 24/52] bpf, xdp: declare generic XDP metadata structure Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 25/52] net, xdp: add basic generic metadata accessors Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 26/52] bpf, btf: add a pair of function to work with the BTF ID + type ID pair Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 27/52] net, xdp: add &sk_buff <-> &xdp_meta_generic converters Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 28/52] net, xdp: prefetch data a bit when building an skb from an &xdp_frame Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 29/52] net, xdp: try to fill skb fields when converting " Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 30/52] net, gro: decouple GRO from the NAPI layer Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 31/52] net, gro: expose some GRO API to use outside of NAPI Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 32/52] bpf, cpumap: switch to GRO from netif_receive_skb_list() Alexander Lobakin
2024-08-07 20:38   ` [xdp-hints] " Daniel Xu
2024-08-08  4:54     ` Lorenzo Bianconi
2024-08-08 11:57       ` Alexander Lobakin
2024-08-08 17:22         ` Lorenzo Bianconi
2024-08-08 20:52         ` Daniel Xu
2024-08-09 10:02           ` Jesper Dangaard Brouer
2024-08-09 12:20           ` Alexander Lobakin
2024-08-09 12:45             ` Toke Høiland-Jørgensen
2024-08-09 12:56               ` Alexander Lobakin
2024-08-09 13:42                 ` Toke Høiland-Jørgensen
2024-08-10  0:54                   ` Martin KaFai Lau
2024-08-10  8:02                   ` Lorenzo Bianconi
2024-08-13  1:33             ` Jakub Kicinski
2024-08-13  9:51               ` Jesper Dangaard Brouer
2024-08-10  8:00         ` Lorenzo Bianconi
2024-08-13 14:09         ` Alexander Lobakin
2024-08-13 14:54           ` Toke Høiland-Jørgensen
2024-08-13 15:57             ` Jesper Dangaard Brouer
2024-08-19 14:50               ` Alexander Lobakin
2024-08-21  0:29                 ` Daniel Xu
2024-08-21 13:16                   ` Alexander Lobakin
2024-08-21 16:36                     ` Daniel Xu
2024-08-13 16:14           ` Lorenzo Bianconi
2024-08-13 16:27           ` Lorenzo Bianconi
2024-08-13 16:31             ` Alexander Lobakin
2024-08-08 20:44       ` Daniel Xu
2024-08-09  9:32         ` Jesper Dangaard Brouer
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 33/52] bpf, cpumap: add option to set a timeout for deferred flush Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 34/52] samples/bpf: add 'timeout' option to xdp_redirect_cpu Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 35/52] net, skbuff: introduce napi_skb_cache_get_bulk() Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 36/52] bpf, cpumap: switch to napi_skb_cache_get_bulk() Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 37/52] rcupdate: fix access helpers for incomplete struct pointers on GCC < 10 Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 38/52] net, xdp: remove unused xdp_attachment_info::flags Alexander Lobakin
2022-06-28 19:47 ` [xdp-hints] [PATCH RFC bpf-next 39/52] net, xdp: make &xdp_attachment_info a bit more useful in drivers Alexander Lobakin
2022-06-28 19:48 ` [xdp-hints] [PATCH RFC bpf-next 40/52] net, xdp: add an RCU version of xdp_attachment_setup() Alexander Lobakin
2022-06-28 19:48 ` [xdp-hints] [PATCH RFC bpf-next 41/52] net, xdp: replace net_device::xdp_prog pointer with &xdp_attachment_info Alexander Lobakin
2022-06-28 19:48 ` [xdp-hints] [PATCH RFC bpf-next 42/52] net, xdp: shortcut skb->dev in bpf_prog_run_generic_xdp() Alexander Lobakin
2022-06-28 19:48 ` [xdp-hints] [PATCH RFC bpf-next 43/52] net, xdp: build XDP generic metadata on Generic (skb) XDP path Alexander Lobakin
2022-06-28 19:48 ` Alexander Lobakin [this message]
2022-06-28 19:48 ` [xdp-hints] [PATCH RFC bpf-next 45/52] net, ice: consolidate all skb fields processing Alexander Lobakin
2022-06-28 19:48 ` [xdp-hints] [PATCH RFC bpf-next 46/52] net, ice: use an onstack &xdp_meta_generic_rx to store HW frame info Alexander Lobakin
2022-06-28 19:48 ` [xdp-hints] [PATCH RFC bpf-next 47/52] net, ice: build XDP generic metadata Alexander Lobakin
2022-06-28 19:48 ` [xdp-hints] [PATCH RFC bpf-next 48/52] libbpf: compress Endianness ops with a macro Alexander Lobakin
2022-06-28 19:48 ` [xdp-hints] [PATCH RFC bpf-next 49/52] libbpf: add LE <--> CPU conversion helpers Alexander Lobakin
2022-06-28 19:48 ` [xdp-hints] [PATCH RFC bpf-next 50/52] libbpf: introduce a couple memory access helpers Alexander Lobakin
2022-06-28 19:48 ` [xdp-hints] [PATCH RFC bpf-next 51/52] selftests/bpf: fix using test_xdp_meta BPF prog via skeleton infra Alexander Lobakin
2022-06-28 19:48 ` [xdp-hints] [PATCH RFC bpf-next 52/52] selftests/bpf: add XDP Generic Hints selftest Alexander Lobakin
2022-06-29  6:15 ` [xdp-hints] Re: [PATCH RFC bpf-next 00/52] bpf, xdp: introduce and use Generic Hints/metadata John Fastabend
2022-06-29 13:43   ` Toke Høiland-Jørgensen
2022-07-04 15:44     ` Alexander Lobakin
2022-07-04 17:13       ` Jesper Dangaard Brouer
2022-07-05 14:38         ` Alexander Lobakin
2022-07-05 19:08           ` Daniel Borkmann
2022-07-04 17:14       ` Toke Høiland-Jørgensen
2022-07-05 15:41         ` Alexander Lobakin
2022-07-05 18:51           ` Toke Høiland-Jørgensen
2022-07-06 13:50             ` Alexander Lobakin
2022-07-06 23:22               ` Toke Høiland-Jørgensen
2022-07-07 11:41                 ` Jesper Dangaard Brouer
2022-07-12 10:33                 ` Magnus Karlsson
2022-07-12 14:14                   ` Jesper Dangaard Brouer
2022-07-15 11:11                     ` Magnus Karlsson
2022-06-29 17:56   ` Zvi Effron
2022-06-30  7:39     ` Magnus Karlsson
2022-07-04 15:31   ` Alexander Lobakin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

  List information: https://lists.xdp-project.net/postorius/lists/xdp-hints.xdp-project.net/

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220628194812.1453059-45-alexandr.lobakin@intel.com \
    --to=alexandr.lobakin@intel.com \
    --cc=andrii@kernel.org \
    --cc=ast@kernel.org \
    --cc=bjorn@kernel.org \
    --cc=bpf@vger.kernel.org \
    --cc=daniel@iogearbox.net \
    --cc=davem@davemloft.net \
    --cc=edumazet@google.com \
    --cc=hawk@kernel.org \
    --cc=jesse.brandeburg@intel.com \
    --cc=john.fastabend@gmail.com \
    --cc=jonathan.lemon@gmail.com \
    --cc=kuba@kernel.org \
    --cc=larysa.zaremba@intel.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=lorenzo@kernel.org \
    --cc=maciej.fijalkowski@intel.com \
    --cc=magnus.karlsson@intel.com \
    --cc=michal.swiatkowski@linux.intel.com \
    --cc=netdev@vger.kernel.org \
    --cc=pabeni@redhat.com \
    --cc=toke@redhat.com \
    --cc=willemb@google.com \
    --cc=xdp-hints@xdp-project.net \
    --cc=yajun.deng@linux.dev \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox