From: Stanislav Fomichev <sdf@google.com>
To: bpf@vger.kernel.org
Cc: ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org,
martin.lau@linux.dev, song@kernel.org, yhs@fb.com,
john.fastabend@gmail.com, kpsingh@kernel.org, sdf@google.com,
haoluo@google.com, jolsa@kernel.org,
"Toke Høiland-Jørgensen" <toke@redhat.com>,
"Saeed Mahameed" <saeedm@nvidia.com>,
"David Ahern" <dsahern@gmail.com>,
"Jakub Kicinski" <kuba@kernel.org>,
"Willem de Bruijn" <willemb@google.com>,
"Jesper Dangaard Brouer" <brouer@redhat.com>,
"Anatoly Burakov" <anatoly.burakov@intel.com>,
"Alexander Lobakin" <alexandr.lobakin@intel.com>,
"Magnus Karlsson" <magnus.karlsson@gmail.com>,
"Maryam Tahhan" <mtahhan@redhat.com>,
xdp-hints@xdp-project.net, netdev@vger.kernel.org
Subject: [xdp-hints] [PATCH bpf-next v3 10/12] mlx5: Introduce mlx5_xdp_buff wrapper for xdp_buff
Date: Mon, 5 Dec 2022 18:45:52 -0800 [thread overview]
Message-ID: <20221206024554.3826186-11-sdf@google.com> (raw)
In-Reply-To: <20221206024554.3826186-1-sdf@google.com>
From: Toke Høiland-Jørgensen <toke@redhat.com>
Preparation for implementing HW metadata kfuncs. No functional change.
Cc: Saeed Mahameed <saeedm@nvidia.com>
Cc: John Fastabend <john.fastabend@gmail.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Martin KaFai Lau <martin.lau@linux.dev>
Cc: Jakub Kicinski <kuba@kernel.org>
Cc: Willem de Bruijn <willemb@google.com>
Cc: Jesper Dangaard Brouer <brouer@redhat.com>
Cc: Anatoly Burakov <anatoly.burakov@intel.com>
Cc: Alexander Lobakin <alexandr.lobakin@intel.com>
Cc: Magnus Karlsson <magnus.karlsson@gmail.com>
Cc: Maryam Tahhan <mtahhan@redhat.com>
Cc: xdp-hints@xdp-project.net
Cc: netdev@vger.kernel.org
Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
Signed-off-by: Stanislav Fomichev <sdf@google.com>
---
drivers/net/ethernet/mellanox/mlx5/core/en.h | 1 +
.../net/ethernet/mellanox/mlx5/core/en/xdp.c | 3 +-
.../net/ethernet/mellanox/mlx5/core/en/xdp.h | 6 +-
.../ethernet/mellanox/mlx5/core/en/xsk/rx.c | 25 +++++----
.../net/ethernet/mellanox/mlx5/core/en_rx.c | 56 +++++++++----------
5 files changed, 49 insertions(+), 42 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index ff5b302531d5..cdbaac5f6d25 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -469,6 +469,7 @@ struct mlx5e_txqsq {
union mlx5e_alloc_unit {
struct page *page;
struct xdp_buff *xsk;
+ struct mlx5_xdp_buff *mxbuf;
};
/* XDP packets can be transmitted in different ways. On completion, we need to
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index 20507ef2f956..db49b813bcb5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -158,8 +158,9 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
/* returns true if packet was consumed by xdp */
bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct page *page,
- struct bpf_prog *prog, struct xdp_buff *xdp)
+ struct bpf_prog *prog, struct mlx5_xdp_buff *mxbuf)
{
+ struct xdp_buff *xdp = &mxbuf->xdp;
u32 act;
int err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
index bc2d9034af5b..a33b448d542d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
@@ -44,10 +44,14 @@
(MLX5E_XDP_INLINE_WQE_MAX_DS_CNT * MLX5_SEND_WQE_DS - \
sizeof(struct mlx5_wqe_inline_seg))
+struct mlx5_xdp_buff {
+ struct xdp_buff xdp;
+};
+
struct mlx5e_xsk_param;
int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk);
bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct page *page,
- struct bpf_prog *prog, struct xdp_buff *xdp);
+ struct bpf_prog *prog, struct mlx5_xdp_buff *mlctx);
void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq);
bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq);
void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
index c91b54d9ff27..5e88dc61824e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
@@ -22,6 +22,7 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
goto err;
BUILD_BUG_ON(sizeof(wi->alloc_units[0]) != sizeof(wi->alloc_units[0].xsk));
+ XSK_CHECK_PRIV_TYPE(struct mlx5_xdp_buff);
batch = xsk_buff_alloc_batch(rq->xsk_pool, (struct xdp_buff **)wi->alloc_units,
rq->mpwqe.pages_per_wqe);
@@ -233,7 +234,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
u32 head_offset,
u32 page_idx)
{
- struct xdp_buff *xdp = wi->alloc_units[page_idx].xsk;
+ struct mlx5_xdp_buff *mxbuf = wi->alloc_units[page_idx].mxbuf;
struct bpf_prog *prog;
/* Check packet size. Note LRO doesn't use linear SKB */
@@ -249,9 +250,9 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
*/
WARN_ON_ONCE(head_offset);
- xsk_buff_set_size(xdp, cqe_bcnt);
- xsk_buff_dma_sync_for_cpu(xdp, rq->xsk_pool);
- net_prefetch(xdp->data);
+ xsk_buff_set_size(&mxbuf->xdp, cqe_bcnt);
+ xsk_buff_dma_sync_for_cpu(&mxbuf->xdp, rq->xsk_pool);
+ net_prefetch(mxbuf->xdp.data);
/* Possible flows:
* - XDP_REDIRECT to XSKMAP:
@@ -269,7 +270,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
*/
prog = rcu_dereference(rq->xdp_prog);
- if (likely(prog && mlx5e_xdp_handle(rq, NULL, prog, xdp))) {
+ if (likely(prog && mlx5e_xdp_handle(rq, NULL, prog, mxbuf))) {
if (likely(__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)))
__set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */
return NULL; /* page/packet was consumed by XDP */
@@ -278,14 +279,14 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
/* XDP_PASS: copy the data from the UMEM to a new SKB and reuse the
* frame. On SKB allocation failure, NULL is returned.
*/
- return mlx5e_xsk_construct_skb(rq, xdp);
+ return mlx5e_xsk_construct_skb(rq, &mxbuf->xdp);
}
struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
struct mlx5e_wqe_frag_info *wi,
u32 cqe_bcnt)
{
- struct xdp_buff *xdp = wi->au->xsk;
+ struct mlx5_xdp_buff *mxbuf = wi->au->mxbuf;
struct bpf_prog *prog;
/* wi->offset is not used in this function, because xdp->data and the
@@ -295,17 +296,17 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
*/
WARN_ON_ONCE(wi->offset);
- xsk_buff_set_size(xdp, cqe_bcnt);
- xsk_buff_dma_sync_for_cpu(xdp, rq->xsk_pool);
- net_prefetch(xdp->data);
+ xsk_buff_set_size(&mxbuf->xdp, cqe_bcnt);
+ xsk_buff_dma_sync_for_cpu(&mxbuf->xdp, rq->xsk_pool);
+ net_prefetch(mxbuf->xdp.data);
prog = rcu_dereference(rq->xdp_prog);
- if (likely(prog && mlx5e_xdp_handle(rq, NULL, prog, xdp)))
+ if (likely(prog && mlx5e_xdp_handle(rq, NULL, prog, mxbuf)))
return NULL; /* page/packet was consumed by XDP */
/* XDP_PASS: copy the data from the UMEM to a new SKB. The frame reuse
* will be handled by mlx5e_free_rx_wqe.
* On SKB allocation failure, NULL is returned.
*/
- return mlx5e_xsk_construct_skb(rq, xdp);
+ return mlx5e_xsk_construct_skb(rq, &mxbuf->xdp);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index b1ea0b995d9c..434025703e50 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -1565,10 +1565,10 @@ struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
}
static void mlx5e_fill_xdp_buff(struct mlx5e_rq *rq, void *va, u16 headroom,
- u32 len, struct xdp_buff *xdp)
+ u32 len, struct mlx5_xdp_buff *mxbuf)
{
- xdp_init_buff(xdp, rq->buff.frame0_sz, &rq->xdp_rxq);
- xdp_prepare_buff(xdp, va, headroom, len, true);
+ xdp_init_buff(&mxbuf->xdp, rq->buff.frame0_sz, &rq->xdp_rxq);
+ xdp_prepare_buff(&mxbuf->xdp, va, headroom, len, true);
}
static struct sk_buff *
@@ -1595,16 +1595,16 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
prog = rcu_dereference(rq->xdp_prog);
if (prog) {
- struct xdp_buff xdp;
+ struct mlx5_xdp_buff mxbuf;
net_prefetchw(va); /* xdp_frame data area */
- mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp);
- if (mlx5e_xdp_handle(rq, au->page, prog, &xdp))
+ mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &mxbuf);
+ if (mlx5e_xdp_handle(rq, au->page, prog, &mxbuf))
return NULL; /* page/packet was consumed by XDP */
- rx_headroom = xdp.data - xdp.data_hard_start;
- metasize = xdp.data - xdp.data_meta;
- cqe_bcnt = xdp.data_end - xdp.data;
+ rx_headroom = mxbuf.xdp.data - mxbuf.xdp.data_hard_start;
+ metasize = mxbuf.xdp.data - mxbuf.xdp.data_meta;
+ cqe_bcnt = mxbuf.xdp.data_end - mxbuf.xdp.data;
}
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
@@ -1626,9 +1626,9 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
union mlx5e_alloc_unit *au = wi->au;
u16 rx_headroom = rq->buff.headroom;
struct skb_shared_info *sinfo;
+ struct mlx5_xdp_buff mxbuf;
u32 frag_consumed_bytes;
struct bpf_prog *prog;
- struct xdp_buff xdp;
struct sk_buff *skb;
dma_addr_t addr;
u32 truesize;
@@ -1643,8 +1643,8 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
net_prefetchw(va); /* xdp_frame data area */
net_prefetch(va + rx_headroom);
- mlx5e_fill_xdp_buff(rq, va, rx_headroom, frag_consumed_bytes, &xdp);
- sinfo = xdp_get_shared_info_from_buff(&xdp);
+ mlx5e_fill_xdp_buff(rq, va, rx_headroom, frag_consumed_bytes, &mxbuf);
+ sinfo = xdp_get_shared_info_from_buff(&mxbuf.xdp);
truesize = 0;
cqe_bcnt -= frag_consumed_bytes;
@@ -1662,13 +1662,13 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
dma_sync_single_for_cpu(rq->pdev, addr + wi->offset,
frag_consumed_bytes, rq->buff.map_dir);
- if (!xdp_buff_has_frags(&xdp)) {
+ if (!xdp_buff_has_frags(&mxbuf.xdp)) {
/* Init on the first fragment to avoid cold cache access
* when possible.
*/
sinfo->nr_frags = 0;
sinfo->xdp_frags_size = 0;
- xdp_buff_set_frags_flag(&xdp);
+ xdp_buff_set_frags_flag(&mxbuf.xdp);
}
frag = &sinfo->frags[sinfo->nr_frags++];
@@ -1677,7 +1677,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
skb_frag_size_set(frag, frag_consumed_bytes);
if (page_is_pfmemalloc(au->page))
- xdp_buff_set_frag_pfmemalloc(&xdp);
+ xdp_buff_set_frag_pfmemalloc(&mxbuf.xdp);
sinfo->xdp_frags_size += frag_consumed_bytes;
truesize += frag_info->frag_stride;
@@ -1690,7 +1690,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
au = head_wi->au;
prog = rcu_dereference(rq->xdp_prog);
- if (prog && mlx5e_xdp_handle(rq, au->page, prog, &xdp)) {
+ if (prog && mlx5e_xdp_handle(rq, au->page, prog, &mxbuf)) {
if (test_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
int i;
@@ -1700,22 +1700,22 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
return NULL; /* page/packet was consumed by XDP */
}
- skb = mlx5e_build_linear_skb(rq, xdp.data_hard_start, rq->buff.frame0_sz,
- xdp.data - xdp.data_hard_start,
- xdp.data_end - xdp.data,
- xdp.data - xdp.data_meta);
+ skb = mlx5e_build_linear_skb(rq, mxbuf.xdp.data_hard_start, rq->buff.frame0_sz,
+ mxbuf.xdp.data - mxbuf.xdp.data_hard_start,
+ mxbuf.xdp.data_end - mxbuf.xdp.data,
+ mxbuf.xdp.data - mxbuf.xdp.data_meta);
if (unlikely(!skb))
return NULL;
page_ref_inc(au->page);
- if (unlikely(xdp_buff_has_frags(&xdp))) {
+ if (unlikely(xdp_buff_has_frags(&mxbuf.xdp))) {
int i;
/* sinfo->nr_frags is reset by build_skb, calculate again. */
xdp_update_skb_shared_info(skb, wi - head_wi - 1,
sinfo->xdp_frags_size, truesize,
- xdp_buff_is_frag_pfmemalloc(&xdp));
+ xdp_buff_is_frag_pfmemalloc(&mxbuf.xdp));
for (i = 0; i < sinfo->nr_frags; i++) {
skb_frag_t *frag = &sinfo->frags[i];
@@ -1996,19 +1996,19 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
prog = rcu_dereference(rq->xdp_prog);
if (prog) {
- struct xdp_buff xdp;
+ struct mlx5_xdp_buff mxbuf;
net_prefetchw(va); /* xdp_frame data area */
- mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp);
- if (mlx5e_xdp_handle(rq, au->page, prog, &xdp)) {
+ mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &mxbuf);
+ if (mlx5e_xdp_handle(rq, au->page, prog, &mxbuf)) {
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
__set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */
return NULL; /* page/packet was consumed by XDP */
}
- rx_headroom = xdp.data - xdp.data_hard_start;
- metasize = xdp.data - xdp.data_meta;
- cqe_bcnt = xdp.data_end - xdp.data;
+ rx_headroom = mxbuf.xdp.data - mxbuf.xdp.data_hard_start;
+ metasize = mxbuf.xdp.data - mxbuf.xdp.data_meta;
+ cqe_bcnt = mxbuf.xdp.data_end - mxbuf.xdp.data;
}
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
--
2.39.0.rc0.267.gcb52ba06e7-goog
next prev parent reply other threads:[~2022-12-06 2:46 UTC|newest]
Thread overview: 61+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-12-06 2:45 [xdp-hints] [PATCH bpf-next v3 00/12] xdp: hints via kfuncs Stanislav Fomichev
2022-12-06 2:45 ` [xdp-hints] [PATCH bpf-next v3 01/12] bpf: Document XDP RX metadata Stanislav Fomichev
2022-12-08 4:25 ` [xdp-hints] " Jakub Kicinski
2022-12-08 19:06 ` Stanislav Fomichev
2022-12-06 2:45 ` [xdp-hints] [PATCH bpf-next v3 02/12] bpf: Rename bpf_{prog,map}_is_dev_bound to is_offloaded Stanislav Fomichev
2022-12-08 4:26 ` [xdp-hints] " Jakub Kicinski
2022-12-06 2:45 ` [xdp-hints] [PATCH bpf-next v3 03/12] bpf: XDP metadata RX kfuncs Stanislav Fomichev
2022-12-07 4:29 ` [xdp-hints] " Alexei Starovoitov
2022-12-07 4:52 ` Stanislav Fomichev
2022-12-07 7:23 ` Martin KaFai Lau
2022-12-07 18:05 ` Stanislav Fomichev
2022-12-08 2:47 ` Martin KaFai Lau
2022-12-08 19:07 ` Stanislav Fomichev
2022-12-08 22:53 ` Martin KaFai Lau
2022-12-08 23:45 ` Stanislav Fomichev
2022-12-08 5:00 ` Jakub Kicinski
2022-12-08 19:07 ` Stanislav Fomichev
2022-12-09 1:30 ` Jakub Kicinski
2022-12-09 2:57 ` Stanislav Fomichev
2022-12-08 22:39 ` Toke Høiland-Jørgensen
2022-12-08 23:46 ` Stanislav Fomichev
2022-12-09 0:07 ` Toke Høiland-Jørgensen
2022-12-09 2:57 ` Stanislav Fomichev
2022-12-10 0:42 ` Martin KaFai Lau
2022-12-10 1:12 ` Martin KaFai Lau
2022-12-09 11:10 ` Jesper Dangaard Brouer
2022-12-09 17:47 ` Stanislav Fomichev
2022-12-11 11:09 ` Jesper Dangaard Brouer
2022-12-06 2:45 ` [xdp-hints] [PATCH bpf-next v3 04/12] veth: Introduce veth_xdp_buff wrapper for xdp_buff Stanislav Fomichev
2022-12-06 2:45 ` [xdp-hints] [PATCH bpf-next v3 05/12] veth: Support RX XDP metadata Stanislav Fomichev
2022-12-06 2:45 ` [xdp-hints] [PATCH bpf-next v3 06/12] selftests/bpf: Verify xdp_metadata xdp->af_xdp path Stanislav Fomichev
2022-12-06 2:45 ` [xdp-hints] [PATCH bpf-next v3 07/12] mlx4: Introduce mlx4_xdp_buff wrapper for xdp_buff Stanislav Fomichev
2022-12-08 6:11 ` [xdp-hints] " Tariq Toukan
2022-12-08 19:07 ` Stanislav Fomichev
2022-12-06 2:45 ` [xdp-hints] [PATCH bpf-next v3 08/12] mxl4: Support RX XDP metadata Stanislav Fomichev
2022-12-08 6:09 ` [xdp-hints] " Tariq Toukan
2022-12-08 19:07 ` Stanislav Fomichev
2022-12-08 20:23 ` Tariq Toukan
2022-12-06 2:45 ` [xdp-hints] [PATCH bpf-next v3 09/12] xsk: Add cb area to struct xdp_buff_xsk Stanislav Fomichev
2022-12-06 2:45 ` Stanislav Fomichev [this message]
2022-12-06 2:45 ` [xdp-hints] [PATCH bpf-next v3 11/12] mlx5: Support RX XDP metadata Stanislav Fomichev
2022-12-08 22:59 ` [xdp-hints] " Toke Høiland-Jørgensen
2022-12-08 23:45 ` Stanislav Fomichev
2022-12-09 0:02 ` Toke Høiland-Jørgensen
2022-12-09 0:07 ` Alexei Starovoitov
2022-12-09 0:29 ` Toke Høiland-Jørgensen
2022-12-09 0:32 ` Alexei Starovoitov
2022-12-09 0:53 ` Toke Høiland-Jørgensen
2022-12-09 2:57 ` Stanislav Fomichev
2022-12-09 5:24 ` Saeed Mahameed
2022-12-09 12:59 ` Jesper Dangaard Brouer
2022-12-09 14:37 ` Toke Høiland-Jørgensen
2022-12-09 15:19 ` Dave Taht
2022-12-09 14:42 ` Toke Høiland-Jørgensen
2022-12-09 16:45 ` Jakub Kicinski
2022-12-09 17:46 ` Stanislav Fomichev
2022-12-09 22:13 ` Jakub Kicinski
2022-12-06 2:45 ` [xdp-hints] [PATCH bpf-next v3 12/12] selftests/bpf: Simple program to dump XDP RX metadata Stanislav Fomichev
2022-12-08 22:28 ` [xdp-hints] Re: [PATCH bpf-next v3 00/12] xdp: hints via kfuncs Toke Høiland-Jørgensen
2022-12-08 23:47 ` Stanislav Fomichev
2022-12-09 0:14 ` Toke Høiland-Jørgensen
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
List information: https://lists.xdp-project.net/postorius/lists/xdp-hints.xdp-project.net/
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20221206024554.3826186-11-sdf@google.com \
--to=sdf@google.com \
--cc=alexandr.lobakin@intel.com \
--cc=anatoly.burakov@intel.com \
--cc=andrii@kernel.org \
--cc=ast@kernel.org \
--cc=bpf@vger.kernel.org \
--cc=brouer@redhat.com \
--cc=daniel@iogearbox.net \
--cc=dsahern@gmail.com \
--cc=haoluo@google.com \
--cc=john.fastabend@gmail.com \
--cc=jolsa@kernel.org \
--cc=kpsingh@kernel.org \
--cc=kuba@kernel.org \
--cc=magnus.karlsson@gmail.com \
--cc=martin.lau@linux.dev \
--cc=mtahhan@redhat.com \
--cc=netdev@vger.kernel.org \
--cc=saeedm@nvidia.com \
--cc=song@kernel.org \
--cc=toke@redhat.com \
--cc=willemb@google.com \
--cc=xdp-hints@xdp-project.net \
--cc=yhs@fb.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox