aboutsummaryrefslogtreecommitdiffstats
path: root/include/net/gro.h
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2024-03-07 21:12:45 -0800
committerJakub Kicinski <kuba@kernel.org>2024-03-07 21:12:46 -0800
commite8bb2ccff7216d520a7bc33c22484dafebe8147e (patch)
tree29b291f5ccbeb9324f98f48e352a84d311314f69 /include/net/gro.h
parentMerge branch 'selftests-mptcp-share-code-and-fix-shellcheck-warnings' (diff)
parentnet: move rps_sock_flow_table to net_hotdata (diff)
downloadlinux-e8bb2ccff7216d520a7bc33c22484dafebe8147e.tar.gz
linux-e8bb2ccff7216d520a7bc33c22484dafebe8147e.zip
Merge branch 'net-group-together-hot-data'
Eric Dumazet says: ==================== net: group together hot data While our recent structure reorganizations were focused on increasing max throughput, there is still an area where improvements are much needed. In many cases, a cpu handles one packet at a time, instead of a nice batch. Hardware interrupt. -> Software interrupt. -> Network/Protocol stacks. If the cpu was idle or busy in other layers, it has to pull many cache lines. This series adds a new net_hotdata structure, where some critical (and read-mostly) data used in rx and tx path is packed in a small number of cache lines. Synthetic benchmarks will not see much difference, but latency of single packet should improve. net_hodata current size on 64bit is 416 bytes, but might grow in the future. Also move RPS definitions to a new include file. ==================== Link: https://lore.kernel.org/r/20240306160031.874438-1-edumazet@google.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'include/net/gro.h')
-rw-r--r--include/net/gro.h5
1 files changed, 2 insertions, 3 deletions
diff --git a/include/net/gro.h b/include/net/gro.h
index 2b58671a6549..d6fc8fbd3730 100644
--- a/include/net/gro.h
+++ b/include/net/gro.h
@@ -9,6 +9,7 @@
#include <net/ip6_checksum.h>
#include <linux/skbuff.h>
#include <net/udp.h>
+#include <net/hotdata.h>
struct napi_gro_cb {
union {
@@ -446,7 +447,7 @@ static inline void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb,
{
list_add_tail(&skb->list, &napi->rx_list);
napi->rx_count += segs;
- if (napi->rx_count >= READ_ONCE(gro_normal_batch))
+ if (napi->rx_count >= READ_ONCE(net_hotdata.gro_normal_batch))
gro_normal_list(napi);
}
@@ -493,6 +494,4 @@ static inline void inet6_get_iif_sdif(const struct sk_buff *skb, int *iif, int *
#endif
}
-extern struct list_head offload_base;
-
#endif /* _NET_IPV6_GRO_H */