From: Stanislav Fomichev <sdf@google.com>
To: bpf@vger.kernel.org
Cc: ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org,
martin.lau@linux.dev, song@kernel.org, yhs@fb.com,
john.fastabend@gmail.com, kpsingh@kernel.org, sdf@google.com,
haoluo@google.com, jolsa@kernel.org, kuba@kernel.org,
toke@kernel.org, willemb@google.com, dsahern@kernel.org,
magnus.karlsson@intel.com, bjorn@kernel.org,
maciej.fijalkowski@intel.com, hawk@kernel.org,
netdev@vger.kernel.org, xdp-hints@xdp-project.net
Subject: [xdp-hints] [RFC bpf-next v3 06/14] net: veth: Implement devtx timestamp kfuncs
Date: Fri, 7 Jul 2023 12:29:58 -0700 [thread overview]
Message-ID: <20230707193006.1309662-7-sdf@google.com> (raw)
In-Reply-To: <20230707193006.1309662-1-sdf@google.com>
Have a software-based example for kfuncs to showcase how it
can be used in the real devices and to have something to
test against in the selftests.
Both path (skb & xdp) are covered. Only the skb path is really
tested though.
Signed-off-by: Stanislav Fomichev <sdf@google.com>
---
drivers/net/veth.c | 97 +++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 95 insertions(+), 2 deletions(-)
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 614f3e3efab0..5af4b15e107c 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -27,6 +27,7 @@
#include <linux/bpf_trace.h>
#include <linux/net_tstamp.h>
#include <net/page_pool.h>
+#include <net/devtx.h>
#define DRV_NAME "veth"
#define DRV_VERSION "1.0"
@@ -123,6 +124,13 @@ struct veth_xdp_buff {
struct sk_buff *skb;
};
+struct veth_devtx_ctx {
+ struct devtx_ctx devtx;
+ struct xdp_frame *xdpf;
+ struct sk_buff *skb;
+ ktime_t xdp_tx_timestamp;
+};
+
static int veth_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *cmd)
{
@@ -313,10 +321,33 @@ static int veth_xdp_rx(struct veth_rq *rq, struct sk_buff *skb)
return NET_RX_SUCCESS;
}
+DEFINE_DEVTX_HOOKS(veth);
+
static int veth_forward_skb(struct net_device *dev, struct sk_buff *skb,
struct veth_rq *rq, bool xdp)
{
- return __dev_forward_skb(dev, skb) ?: xdp ?
+ struct net_device *orig_dev = skb->dev;
+ int ret;
+
+ ret = __dev_forward_skb(dev, skb);
+ if (ret)
+ return ret;
+
+ if (devtx_enabled()) {
+ struct veth_devtx_ctx ctx = {
+ .devtx = {
+ .netdev = orig_dev,
+ .sinfo = skb_shinfo(skb),
+ },
+ .skb = skb,
+ };
+
+ __skb_push(skb, ETH_HLEN);
+ veth_devtx_complete_skb(&ctx.devtx, skb);
+ __skb_pull(skb, ETH_HLEN);
+ }
+
+ return xdp ?
veth_xdp_rx(rq, skb) :
__netif_rx(skb);
}
@@ -356,6 +387,18 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
goto drop;
}
+ if (devtx_enabled()) {
+ struct veth_devtx_ctx ctx = {
+ .devtx = {
+ .netdev = skb->dev,
+ .sinfo = skb_shinfo(skb),
+ },
+ .skb = skb,
+ };
+
+ veth_devtx_submit_skb(&ctx.devtx, skb);
+ }
+
rcv_priv = netdev_priv(rcv);
rxq = skb_get_queue_mapping(skb);
if (rxq < rcv->real_num_rx_queues) {
@@ -509,11 +552,28 @@ static int veth_xdp_xmit(struct net_device *dev, int n,
for (i = 0; i < n; i++) {
struct xdp_frame *frame = frames[i];
void *ptr = veth_xdp_to_ptr(frame);
+ struct veth_devtx_ctx ctx;
if (unlikely(xdp_get_frame_len(frame) > max_len ||
- __ptr_ring_produce(&rq->xdp_ring, ptr)))
+ __ptr_ring_full(&rq->xdp_ring)))
+ break;
+
+ if (devtx_enabled()) {
+ memset(&ctx, 0, sizeof(ctx));
+ ctx.devtx.netdev = dev;
+ ctx.devtx.sinfo = xdp_frame_has_frags(frame) ?
+ xdp_get_shared_info_from_frame(frame) : NULL;
+ ctx.xdpf = frame;
+
+ veth_devtx_submit_xdp(&ctx.devtx, frame);
+ }
+
+ if (unlikely(__ptr_ring_produce(&rq->xdp_ring, ptr)))
break;
nxmit++;
+
+ if (devtx_enabled())
+ veth_devtx_complete_xdp(&ctx.devtx, frame);
}
spin_unlock(&rq->xdp_ring.producer_lock);
@@ -1732,6 +1792,28 @@ static int veth_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash,
return 0;
}
+static int veth_devtx_request_tx_timestamp(const struct devtx_ctx *_ctx)
+{
+ struct veth_devtx_ctx *ctx = (struct veth_devtx_ctx *)_ctx;
+
+ if (ctx->skb)
+ __net_timestamp(ctx->skb);
+ else
+ ctx->xdp_tx_timestamp = ktime_get_real();
+
+ return 0;
+}
+
+static int veth_devtx_tx_timestamp(const struct devtx_ctx *_ctx, u64 *timestamp)
+{
+ struct veth_devtx_ctx *ctx = (struct veth_devtx_ctx *)_ctx;
+
+ if (ctx->skb)
+ *timestamp = ctx->skb->tstamp;
+
+ return 0;
+}
+
static const struct net_device_ops veth_netdev_ops = {
.ndo_init = veth_dev_init,
.ndo_open = veth_open,
@@ -1756,6 +1838,8 @@ static const struct net_device_ops veth_netdev_ops = {
static const struct xdp_metadata_ops veth_xdp_metadata_ops = {
.xmo_rx_timestamp = veth_xdp_rx_timestamp,
.xmo_rx_hash = veth_xdp_rx_hash,
+ .xmo_request_tx_timestamp = veth_devtx_request_tx_timestamp,
+ .xmo_tx_timestamp = veth_devtx_tx_timestamp,
};
#define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | \
@@ -2041,11 +2125,20 @@ static struct rtnl_link_ops veth_link_ops = {
static __init int veth_init(void)
{
+ int ret;
+
+ ret = devtx_hooks_register(&veth_devtx_hook_ids, &veth_xdp_metadata_ops);
+ if (ret) {
+ pr_warn("failed to register devtx hooks: %d", ret);
+ return ret;
+ }
+
return rtnl_link_register(&veth_link_ops);
}
static __exit void veth_exit(void)
{
+ devtx_hooks_unregister(&veth_devtx_hook_ids);
rtnl_link_unregister(&veth_link_ops);
}
--
2.41.0.255.g8b1d071c50-goog
next prev parent reply other threads:[~2023-07-07 19:30 UTC|newest]
Thread overview: 34+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-07-07 19:29 [xdp-hints] [RFC bpf-next v3 00/14] bpf: Netdev TX metadata Stanislav Fomichev
2023-07-07 19:29 ` [xdp-hints] [RFC bpf-next v3 01/14] bpf: Rename some xdp-metadata functions into dev-bound Stanislav Fomichev
2023-07-07 19:29 ` [xdp-hints] [RFC bpf-next v3 02/14] bpf: Make it easier to add new metadata kfunc Stanislav Fomichev
2023-07-07 19:29 ` [xdp-hints] [RFC bpf-next v3 03/14] xsk: Support XDP_TX_METADATA_LEN Stanislav Fomichev
2023-07-07 19:29 ` [xdp-hints] [RFC bpf-next v3 04/14] bpf: Implement devtx hook points Stanislav Fomichev
2023-07-07 19:29 ` [xdp-hints] [RFC bpf-next v3 05/14] bpf: Implement devtx timestamp kfunc Stanislav Fomichev
2023-07-07 19:29 ` Stanislav Fomichev [this message]
2023-07-07 19:29 ` [xdp-hints] [RFC bpf-next v3 07/14] bpf: Introduce tx checksum devtx kfuncs Stanislav Fomichev
2023-07-07 19:30 ` [xdp-hints] [RFC bpf-next v3 08/14] net: veth: Implement devtx tx checksum Stanislav Fomichev
2023-07-07 19:30 ` [xdp-hints] [RFC bpf-next v3 09/14] net/mlx5e: Implement devtx kfuncs Stanislav Fomichev
2023-07-11 22:56 ` [xdp-hints] " Alexei Starovoitov
2023-07-11 23:24 ` Stanislav Fomichev
2023-07-11 23:45 ` Alexei Starovoitov
2023-07-12 0:14 ` Stanislav Fomichev
2023-07-12 2:50 ` Alexei Starovoitov
2023-07-12 3:29 ` Stanislav Fomichev
2023-07-12 4:59 ` Alexei Starovoitov
2023-07-12 5:36 ` Stanislav Fomichev
2023-07-12 15:16 ` Willem de Bruijn
2023-07-12 16:28 ` Willem de Bruijn
2023-07-12 19:03 ` Alexei Starovoitov
2023-07-12 19:11 ` Willem de Bruijn
2023-07-12 19:42 ` Alexei Starovoitov
2023-07-12 20:09 ` Jakub Kicinski
2023-07-12 20:53 ` Stanislav Fomichev
2023-07-12 0:32 ` Jakub Kicinski
2023-07-12 2:37 ` Alexei Starovoitov
2023-07-12 3:07 ` Jakub Kicinski
2023-07-12 3:23 ` Alexei Starovoitov
2023-07-07 19:30 ` [xdp-hints] [RFC bpf-next v3 10/14] selftests/xsk: Support XDP_TX_METADATA_LEN Stanislav Fomichev
2023-07-07 19:30 ` [xdp-hints] [RFC bpf-next v3 11/14] selftests/bpf: Add helper to query current netns cookie Stanislav Fomichev
2023-07-07 19:30 ` [xdp-hints] [RFC bpf-next v3 12/14] selftests/bpf: Add csum helpers Stanislav Fomichev
2023-07-07 19:30 ` [xdp-hints] [RFC bpf-next v3 13/14] selftests/bpf: Extend xdp_metadata with devtx kfuncs Stanislav Fomichev
2023-07-07 19:30 ` [xdp-hints] [RFC bpf-next v3 14/14] selftests/bpf: Extend xdp_hw_metadata " Stanislav Fomichev
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
List information: https://lists.xdp-project.net/postorius/lists/xdp-hints.xdp-project.net/
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230707193006.1309662-7-sdf@google.com \
--to=sdf@google.com \
--cc=andrii@kernel.org \
--cc=ast@kernel.org \
--cc=bjorn@kernel.org \
--cc=bpf@vger.kernel.org \
--cc=daniel@iogearbox.net \
--cc=dsahern@kernel.org \
--cc=haoluo@google.com \
--cc=hawk@kernel.org \
--cc=john.fastabend@gmail.com \
--cc=jolsa@kernel.org \
--cc=kpsingh@kernel.org \
--cc=kuba@kernel.org \
--cc=maciej.fijalkowski@intel.com \
--cc=magnus.karlsson@intel.com \
--cc=martin.lau@linux.dev \
--cc=netdev@vger.kernel.org \
--cc=song@kernel.org \
--cc=toke@kernel.org \
--cc=willemb@google.com \
--cc=xdp-hints@xdp-project.net \
--cc=yhs@fb.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox